net/hns3: fix typos on comments
[dpdk.git] / drivers / net / txgbe / txgbe_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020
3  */
4
5 #include <sys/queue.h>
6 #include <rte_bus_pci.h>
7 #include <rte_malloc.h>
8 #include <rte_flow.h>
9 #include <rte_flow_driver.h>
10
11 #include "txgbe_ethdev.h"
12
13 #define TXGBE_MIN_N_TUPLE_PRIO 1
14 #define TXGBE_MAX_N_TUPLE_PRIO 7
15 #define TXGBE_MAX_FLX_SOURCE_OFF 62
16
17 /* ntuple filter list structure */
18 struct txgbe_ntuple_filter_ele {
19         TAILQ_ENTRY(txgbe_ntuple_filter_ele) entries;
20         struct rte_eth_ntuple_filter filter_info;
21 };
22 /* ethertype filter list structure */
23 struct txgbe_ethertype_filter_ele {
24         TAILQ_ENTRY(txgbe_ethertype_filter_ele) entries;
25         struct rte_eth_ethertype_filter filter_info;
26 };
27 /* syn filter list structure */
28 struct txgbe_eth_syn_filter_ele {
29         TAILQ_ENTRY(txgbe_eth_syn_filter_ele) entries;
30         struct rte_eth_syn_filter filter_info;
31 };
32 /* fdir filter list structure */
33 struct txgbe_fdir_rule_ele {
34         TAILQ_ENTRY(txgbe_fdir_rule_ele) entries;
35         struct txgbe_fdir_rule filter_info;
36 };
37 /* l2_tunnel filter list structure */
38 struct txgbe_eth_l2_tunnel_conf_ele {
39         TAILQ_ENTRY(txgbe_eth_l2_tunnel_conf_ele) entries;
40         struct txgbe_l2_tunnel_conf filter_info;
41 };
42 /* rss filter list structure */
43 struct txgbe_rss_conf_ele {
44         TAILQ_ENTRY(txgbe_rss_conf_ele) entries;
45         struct txgbe_rte_flow_rss_conf filter_info;
46 };
47 /* txgbe_flow memory list structure */
48 struct txgbe_flow_mem {
49         TAILQ_ENTRY(txgbe_flow_mem) entries;
50         struct rte_flow *flow;
51 };
52
53 TAILQ_HEAD(txgbe_ntuple_filter_list, txgbe_ntuple_filter_ele);
54 TAILQ_HEAD(txgbe_ethertype_filter_list, txgbe_ethertype_filter_ele);
55 TAILQ_HEAD(txgbe_syn_filter_list, txgbe_eth_syn_filter_ele);
56 TAILQ_HEAD(txgbe_fdir_rule_filter_list, txgbe_fdir_rule_ele);
57 TAILQ_HEAD(txgbe_l2_tunnel_filter_list, txgbe_eth_l2_tunnel_conf_ele);
58 TAILQ_HEAD(txgbe_rss_filter_list, txgbe_rss_conf_ele);
59 TAILQ_HEAD(txgbe_flow_mem_list, txgbe_flow_mem);
60
61 static struct txgbe_ntuple_filter_list filter_ntuple_list;
62 static struct txgbe_ethertype_filter_list filter_ethertype_list;
63 static struct txgbe_syn_filter_list filter_syn_list;
64 static struct txgbe_fdir_rule_filter_list filter_fdir_list;
65 static struct txgbe_l2_tunnel_filter_list filter_l2_tunnel_list;
66 static struct txgbe_rss_filter_list filter_rss_list;
67 static struct txgbe_flow_mem_list txgbe_flow_list;
68
69 /**
70  * Endless loop will never happen with below assumption
71  * 1. there is at least one no-void item(END)
72  * 2. cur is before END.
73  */
74 static inline
75 const struct rte_flow_item *next_no_void_pattern(
76                 const struct rte_flow_item pattern[],
77                 const struct rte_flow_item *cur)
78 {
79         const struct rte_flow_item *next =
80                 cur ? cur + 1 : &pattern[0];
81         while (1) {
82                 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
83                         return next;
84                 next++;
85         }
86 }
87
88 static inline
89 const struct rte_flow_action *next_no_void_action(
90                 const struct rte_flow_action actions[],
91                 const struct rte_flow_action *cur)
92 {
93         const struct rte_flow_action *next =
94                 cur ? cur + 1 : &actions[0];
95         while (1) {
96                 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
97                         return next;
98                 next++;
99         }
100 }
101
102 /**
103  * Please aware there's an assumption for all the parsers.
104  * rte_flow_item is using big endian, rte_flow_attr and
105  * rte_flow_action are using CPU order.
106  * Because the pattern is used to describe the packets,
107  * normally the packets should use network order.
108  */
109
110 /**
111  * Parse the rule to see if it is a n-tuple rule.
112  * And get the n-tuple filter info BTW.
113  * pattern:
114  * The first not void item can be ETH or IPV4.
115  * The second not void item must be IPV4 if the first one is ETH.
116  * The third not void item must be UDP or TCP.
117  * The next not void item must be END.
118  * action:
119  * The first not void action should be QUEUE.
120  * The next not void action should be END.
121  * pattern example:
122  * ITEM         Spec                    Mask
123  * ETH          NULL                    NULL
124  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
125  *              dst_addr 192.167.3.50   0xFFFFFFFF
126  *              next_proto_id   17      0xFF
127  * UDP/TCP/     src_port        80      0xFFFF
128  * SCTP         dst_port        80      0xFFFF
129  * END
130  * other members in mask and spec should set to 0x00.
131  * item->last should be NULL.
132  *
133  * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY.
134  *
135  */
136 static int
137 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
138                          const struct rte_flow_item pattern[],
139                          const struct rte_flow_action actions[],
140                          struct rte_eth_ntuple_filter *filter,
141                          struct rte_flow_error *error)
142 {
143         const struct rte_flow_item *item;
144         const struct rte_flow_action *act;
145         const struct rte_flow_item_ipv4 *ipv4_spec;
146         const struct rte_flow_item_ipv4 *ipv4_mask;
147         const struct rte_flow_item_tcp *tcp_spec;
148         const struct rte_flow_item_tcp *tcp_mask;
149         const struct rte_flow_item_udp *udp_spec;
150         const struct rte_flow_item_udp *udp_mask;
151         const struct rte_flow_item_sctp *sctp_spec;
152         const struct rte_flow_item_sctp *sctp_mask;
153         const struct rte_flow_item_eth *eth_spec;
154         const struct rte_flow_item_eth *eth_mask;
155         const struct rte_flow_item_vlan *vlan_spec;
156         const struct rte_flow_item_vlan *vlan_mask;
157         struct rte_flow_item_eth eth_null;
158         struct rte_flow_item_vlan vlan_null;
159
160         if (!pattern) {
161                 rte_flow_error_set(error,
162                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
163                         NULL, "NULL pattern.");
164                 return -rte_errno;
165         }
166
167         if (!actions) {
168                 rte_flow_error_set(error, EINVAL,
169                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
170                                    NULL, "NULL action.");
171                 return -rte_errno;
172         }
173         if (!attr) {
174                 rte_flow_error_set(error, EINVAL,
175                                    RTE_FLOW_ERROR_TYPE_ATTR,
176                                    NULL, "NULL attribute.");
177                 return -rte_errno;
178         }
179
180         memset(&eth_null, 0, sizeof(struct rte_flow_item_eth));
181         memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan));
182
183 #ifdef RTE_LIB_SECURITY
184         /**
185          *  Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY
186          */
187         act = next_no_void_action(actions, NULL);
188         if (act->type == RTE_FLOW_ACTION_TYPE_SECURITY) {
189                 const void *conf = act->conf;
190                 /* check if the next not void item is END */
191                 act = next_no_void_action(actions, act);
192                 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
193                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
194                         rte_flow_error_set(error, EINVAL,
195                                 RTE_FLOW_ERROR_TYPE_ACTION,
196                                 act, "Not supported action.");
197                         return -rte_errno;
198                 }
199
200                 /* get the IP pattern*/
201                 item = next_no_void_pattern(pattern, NULL);
202                 while (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
203                                 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
204                         if (item->last ||
205                                         item->type == RTE_FLOW_ITEM_TYPE_END) {
206                                 rte_flow_error_set(error, EINVAL,
207                                         RTE_FLOW_ERROR_TYPE_ITEM,
208                                         item, "IP pattern missing.");
209                                 return -rte_errno;
210                         }
211                         item = next_no_void_pattern(pattern, item);
212                 }
213
214                 filter->proto = IPPROTO_ESP;
215                 return txgbe_crypto_add_ingress_sa_from_flow(conf, item->spec,
216                                         item->type == RTE_FLOW_ITEM_TYPE_IPV6);
217         }
218 #endif
219
220         /* the first not void item can be MAC or IPv4 */
221         item = next_no_void_pattern(pattern, NULL);
222
223         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
224             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
225                 rte_flow_error_set(error, EINVAL,
226                         RTE_FLOW_ERROR_TYPE_ITEM,
227                         item, "Not supported by ntuple filter");
228                 return -rte_errno;
229         }
230         /* Skip Ethernet */
231         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
232                 eth_spec = item->spec;
233                 eth_mask = item->mask;
234                 /*Not supported last point for range*/
235                 if (item->last) {
236                         rte_flow_error_set(error,
237                           EINVAL,
238                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
239                           item, "Not supported last point for range");
240                         return -rte_errno;
241                 }
242                 /* if the first item is MAC, the content should be NULL */
243                 if ((item->spec || item->mask) &&
244                         (memcmp(eth_spec, &eth_null,
245                                 sizeof(struct rte_flow_item_eth)) ||
246                          memcmp(eth_mask, &eth_null,
247                                 sizeof(struct rte_flow_item_eth)))) {
248                         rte_flow_error_set(error, EINVAL,
249                                 RTE_FLOW_ERROR_TYPE_ITEM,
250                                 item, "Not supported by ntuple filter");
251                         return -rte_errno;
252                 }
253                 /* check if the next not void item is IPv4 or Vlan */
254                 item = next_no_void_pattern(pattern, item);
255                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
256                         item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
257                         rte_flow_error_set(error,
258                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
259                                 item, "Not supported by ntuple filter");
260                         return -rte_errno;
261                 }
262         }
263
264         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
265                 vlan_spec = item->spec;
266                 vlan_mask = item->mask;
267                 /*Not supported last point for range*/
268                 if (item->last) {
269                         rte_flow_error_set(error,
270                                 EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
271                                 item, "Not supported last point for range");
272                         return -rte_errno;
273                 }
274                 /* the content should be NULL */
275                 if ((item->spec || item->mask) &&
276                         (memcmp(vlan_spec, &vlan_null,
277                                 sizeof(struct rte_flow_item_vlan)) ||
278                          memcmp(vlan_mask, &vlan_null,
279                                 sizeof(struct rte_flow_item_vlan)))) {
280                         rte_flow_error_set(error, EINVAL,
281                                 RTE_FLOW_ERROR_TYPE_ITEM,
282                                 item, "Not supported by ntuple filter");
283                         return -rte_errno;
284                 }
285                 /* check if the next not void item is IPv4 */
286                 item = next_no_void_pattern(pattern, item);
287                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
288                         rte_flow_error_set(error,
289                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
290                           item, "Not supported by ntuple filter");
291                         return -rte_errno;
292                 }
293         }
294
295         if (item->mask) {
296                 /* get the IPv4 info */
297                 if (!item->spec || !item->mask) {
298                         rte_flow_error_set(error, EINVAL,
299                                 RTE_FLOW_ERROR_TYPE_ITEM,
300                                 item, "Invalid ntuple mask");
301                         return -rte_errno;
302                 }
303                 /*Not supported last point for range*/
304                 if (item->last) {
305                         rte_flow_error_set(error, EINVAL,
306                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
307                                 item, "Not supported last point for range");
308                         return -rte_errno;
309                 }
310
311                 ipv4_mask = item->mask;
312                 /**
313                  * Only support src & dst addresses, protocol,
314                  * others should be masked.
315                  */
316                 if (ipv4_mask->hdr.version_ihl ||
317                     ipv4_mask->hdr.type_of_service ||
318                     ipv4_mask->hdr.total_length ||
319                     ipv4_mask->hdr.packet_id ||
320                     ipv4_mask->hdr.fragment_offset ||
321                     ipv4_mask->hdr.time_to_live ||
322                     ipv4_mask->hdr.hdr_checksum) {
323                         rte_flow_error_set(error,
324                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
325                                 item, "Not supported by ntuple filter");
326                         return -rte_errno;
327                 }
328                 if ((ipv4_mask->hdr.src_addr != 0 &&
329                         ipv4_mask->hdr.src_addr != UINT32_MAX) ||
330                         (ipv4_mask->hdr.dst_addr != 0 &&
331                         ipv4_mask->hdr.dst_addr != UINT32_MAX) ||
332                         (ipv4_mask->hdr.next_proto_id != UINT8_MAX &&
333                         ipv4_mask->hdr.next_proto_id != 0)) {
334                         rte_flow_error_set(error,
335                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
336                                 item, "Not supported by ntuple filter");
337                         return -rte_errno;
338                 }
339
340                 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
341                 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
342                 filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
343
344                 ipv4_spec = item->spec;
345                 filter->dst_ip = ipv4_spec->hdr.dst_addr;
346                 filter->src_ip = ipv4_spec->hdr.src_addr;
347                 filter->proto  = ipv4_spec->hdr.next_proto_id;
348         }
349
350         /* check if the next not void item is TCP or UDP */
351         item = next_no_void_pattern(pattern, item);
352         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
353             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
354             item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
355             item->type != RTE_FLOW_ITEM_TYPE_END) {
356                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
357                 rte_flow_error_set(error, EINVAL,
358                         RTE_FLOW_ERROR_TYPE_ITEM,
359                         item, "Not supported by ntuple filter");
360                 return -rte_errno;
361         }
362
363         if (item->type != RTE_FLOW_ITEM_TYPE_END &&
364                 (!item->spec && !item->mask)) {
365                 goto action;
366         }
367
368         /* get the TCP/UDP/SCTP info */
369         if (item->type != RTE_FLOW_ITEM_TYPE_END &&
370                 (!item->spec || !item->mask)) {
371                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
372                 rte_flow_error_set(error, EINVAL,
373                         RTE_FLOW_ERROR_TYPE_ITEM,
374                         item, "Invalid ntuple mask");
375                 return -rte_errno;
376         }
377
378         /*Not supported last point for range*/
379         if (item->last) {
380                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
381                 rte_flow_error_set(error, EINVAL,
382                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
383                         item, "Not supported last point for range");
384                 return -rte_errno;
385         }
386
387         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
388                 tcp_mask = item->mask;
389
390                 /**
391                  * Only support src & dst ports, tcp flags,
392                  * others should be masked.
393                  */
394                 if (tcp_mask->hdr.sent_seq ||
395                     tcp_mask->hdr.recv_ack ||
396                     tcp_mask->hdr.data_off ||
397                     tcp_mask->hdr.rx_win ||
398                     tcp_mask->hdr.cksum ||
399                     tcp_mask->hdr.tcp_urp) {
400                         memset(filter, 0,
401                                 sizeof(struct rte_eth_ntuple_filter));
402                         rte_flow_error_set(error, EINVAL,
403                                 RTE_FLOW_ERROR_TYPE_ITEM,
404                                 item, "Not supported by ntuple filter");
405                         return -rte_errno;
406                 }
407                 if ((tcp_mask->hdr.src_port != 0 &&
408                         tcp_mask->hdr.src_port != UINT16_MAX) ||
409                         (tcp_mask->hdr.dst_port != 0 &&
410                         tcp_mask->hdr.dst_port != UINT16_MAX)) {
411                         rte_flow_error_set(error,
412                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
413                                 item, "Not supported by ntuple filter");
414                         return -rte_errno;
415                 }
416
417                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
418                 filter->src_port_mask  = tcp_mask->hdr.src_port;
419                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
420                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
421                 } else if (!tcp_mask->hdr.tcp_flags) {
422                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
423                 } else {
424                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
425                         rte_flow_error_set(error, EINVAL,
426                                 RTE_FLOW_ERROR_TYPE_ITEM,
427                                 item, "Not supported by ntuple filter");
428                         return -rte_errno;
429                 }
430
431                 tcp_spec = item->spec;
432                 filter->dst_port  = tcp_spec->hdr.dst_port;
433                 filter->src_port  = tcp_spec->hdr.src_port;
434                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
435         } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
436                 udp_mask = item->mask;
437
438                 /**
439                  * Only support src & dst ports,
440                  * others should be masked.
441                  */
442                 if (udp_mask->hdr.dgram_len ||
443                     udp_mask->hdr.dgram_cksum) {
444                         memset(filter, 0,
445                                 sizeof(struct rte_eth_ntuple_filter));
446                         rte_flow_error_set(error, EINVAL,
447                                 RTE_FLOW_ERROR_TYPE_ITEM,
448                                 item, "Not supported by ntuple filter");
449                         return -rte_errno;
450                 }
451                 if ((udp_mask->hdr.src_port != 0 &&
452                         udp_mask->hdr.src_port != UINT16_MAX) ||
453                         (udp_mask->hdr.dst_port != 0 &&
454                         udp_mask->hdr.dst_port != UINT16_MAX)) {
455                         rte_flow_error_set(error,
456                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
457                                 item, "Not supported by ntuple filter");
458                         return -rte_errno;
459                 }
460
461                 filter->dst_port_mask = udp_mask->hdr.dst_port;
462                 filter->src_port_mask = udp_mask->hdr.src_port;
463
464                 udp_spec = item->spec;
465                 filter->dst_port = udp_spec->hdr.dst_port;
466                 filter->src_port = udp_spec->hdr.src_port;
467         } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
468                 sctp_mask = item->mask;
469
470                 /**
471                  * Only support src & dst ports,
472                  * others should be masked.
473                  */
474                 if (sctp_mask->hdr.tag ||
475                     sctp_mask->hdr.cksum) {
476                         memset(filter, 0,
477                                 sizeof(struct rte_eth_ntuple_filter));
478                         rte_flow_error_set(error, EINVAL,
479                                 RTE_FLOW_ERROR_TYPE_ITEM,
480                                 item, "Not supported by ntuple filter");
481                         return -rte_errno;
482                 }
483
484                 filter->dst_port_mask = sctp_mask->hdr.dst_port;
485                 filter->src_port_mask = sctp_mask->hdr.src_port;
486
487                 sctp_spec = item->spec;
488                 filter->dst_port = sctp_spec->hdr.dst_port;
489                 filter->src_port = sctp_spec->hdr.src_port;
490         } else {
491                 goto action;
492         }
493
494         /* check if the next not void item is END */
495         item = next_no_void_pattern(pattern, item);
496         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
497                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
498                 rte_flow_error_set(error, EINVAL,
499                         RTE_FLOW_ERROR_TYPE_ITEM,
500                         item, "Not supported by ntuple filter");
501                 return -rte_errno;
502         }
503
504 action:
505
506         /**
507          * n-tuple only supports forwarding,
508          * check if the first not void action is QUEUE.
509          */
510         act = next_no_void_action(actions, NULL);
511         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
512                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
513                 rte_flow_error_set(error, EINVAL,
514                         RTE_FLOW_ERROR_TYPE_ACTION,
515                         act, "Not supported action.");
516                 return -rte_errno;
517         }
518         filter->queue =
519                 ((const struct rte_flow_action_queue *)act->conf)->index;
520
521         /* check if the next not void item is END */
522         act = next_no_void_action(actions, act);
523         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
524                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
525                 rte_flow_error_set(error, EINVAL,
526                         RTE_FLOW_ERROR_TYPE_ACTION,
527                         act, "Not supported action.");
528                 return -rte_errno;
529         }
530
531         /* parse attr */
532         /* must be input direction */
533         if (!attr->ingress) {
534                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
535                 rte_flow_error_set(error, EINVAL,
536                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
537                                    attr, "Only support ingress.");
538                 return -rte_errno;
539         }
540
541         /* not supported */
542         if (attr->egress) {
543                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
544                 rte_flow_error_set(error, EINVAL,
545                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
546                                    attr, "Not support egress.");
547                 return -rte_errno;
548         }
549
550         /* not supported */
551         if (attr->transfer) {
552                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
553                 rte_flow_error_set(error, EINVAL,
554                                    RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
555                                    attr, "No support for transfer.");
556                 return -rte_errno;
557         }
558
559         if (attr->priority > 0xFFFF) {
560                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
561                 rte_flow_error_set(error, EINVAL,
562                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
563                                    attr, "Error priority.");
564                 return -rte_errno;
565         }
566         filter->priority = (uint16_t)attr->priority;
567         if (attr->priority < TXGBE_MIN_N_TUPLE_PRIO ||
568                 attr->priority > TXGBE_MAX_N_TUPLE_PRIO)
569                 filter->priority = 1;
570
571         return 0;
572 }
573
574 /* a specific function for txgbe because the flags is specific */
575 static int
576 txgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
577                           const struct rte_flow_attr *attr,
578                           const struct rte_flow_item pattern[],
579                           const struct rte_flow_action actions[],
580                           struct rte_eth_ntuple_filter *filter,
581                           struct rte_flow_error *error)
582 {
583         int ret;
584
585         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
586
587         if (ret)
588                 return ret;
589
590 #ifdef RTE_LIB_SECURITY
591         /* ESP flow not really a flow */
592         if (filter->proto == IPPROTO_ESP)
593                 return 0;
594 #endif
595
596         /* txgbe doesn't support tcp flags */
597         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
598                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
599                 rte_flow_error_set(error, EINVAL,
600                                    RTE_FLOW_ERROR_TYPE_ITEM,
601                                    NULL, "Not supported by ntuple filter");
602                 return -rte_errno;
603         }
604
605         /* txgbe doesn't support many priorities */
606         if (filter->priority < TXGBE_MIN_N_TUPLE_PRIO ||
607             filter->priority > TXGBE_MAX_N_TUPLE_PRIO) {
608                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
609                 rte_flow_error_set(error, EINVAL,
610                         RTE_FLOW_ERROR_TYPE_ITEM,
611                         NULL, "Priority not supported by ntuple filter");
612                 return -rte_errno;
613         }
614
615         if (filter->queue >= dev->data->nb_rx_queues) {
616                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
617                 rte_flow_error_set(error, EINVAL,
618                                    RTE_FLOW_ERROR_TYPE_ITEM,
619                                    NULL, "Not supported by ntuple filter");
620                 return -rte_errno;
621         }
622
623         /* fixed value for txgbe */
624         filter->flags = RTE_5TUPLE_FLAGS;
625         return 0;
626 }
627
628 /**
629  * Parse the rule to see if it is a ethertype rule.
630  * And get the ethertype filter info BTW.
631  * pattern:
632  * The first not void item can be ETH.
633  * The next not void item must be END.
634  * action:
635  * The first not void action should be QUEUE.
636  * The next not void action should be END.
637  * pattern example:
638  * ITEM         Spec                    Mask
639  * ETH          type    0x0807          0xFFFF
640  * END
641  * other members in mask and spec should set to 0x00.
642  * item->last should be NULL.
643  */
644 static int
645 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
646                             const struct rte_flow_item *pattern,
647                             const struct rte_flow_action *actions,
648                             struct rte_eth_ethertype_filter *filter,
649                             struct rte_flow_error *error)
650 {
651         const struct rte_flow_item *item;
652         const struct rte_flow_action *act;
653         const struct rte_flow_item_eth *eth_spec;
654         const struct rte_flow_item_eth *eth_mask;
655         const struct rte_flow_action_queue *act_q;
656
657         if (!pattern) {
658                 rte_flow_error_set(error, EINVAL,
659                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
660                                 NULL, "NULL pattern.");
661                 return -rte_errno;
662         }
663
664         if (!actions) {
665                 rte_flow_error_set(error, EINVAL,
666                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
667                                 NULL, "NULL action.");
668                 return -rte_errno;
669         }
670
671         if (!attr) {
672                 rte_flow_error_set(error, EINVAL,
673                                    RTE_FLOW_ERROR_TYPE_ATTR,
674                                    NULL, "NULL attribute.");
675                 return -rte_errno;
676         }
677
678         item = next_no_void_pattern(pattern, NULL);
679         /* The first non-void item should be MAC. */
680         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
681                 rte_flow_error_set(error, EINVAL,
682                         RTE_FLOW_ERROR_TYPE_ITEM,
683                         item, "Not supported by ethertype filter");
684                 return -rte_errno;
685         }
686
687         /*Not supported last point for range*/
688         if (item->last) {
689                 rte_flow_error_set(error, EINVAL,
690                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
691                         item, "Not supported last point for range");
692                 return -rte_errno;
693         }
694
695         /* Get the MAC info. */
696         if (!item->spec || !item->mask) {
697                 rte_flow_error_set(error, EINVAL,
698                                 RTE_FLOW_ERROR_TYPE_ITEM,
699                                 item, "Not supported by ethertype filter");
700                 return -rte_errno;
701         }
702
703         eth_spec = item->spec;
704         eth_mask = item->mask;
705
706         /* Mask bits of source MAC address must be full of 0.
707          * Mask bits of destination MAC address must be full
708          * of 1 or full of 0.
709          */
710         if (!rte_is_zero_ether_addr(&eth_mask->src) ||
711             (!rte_is_zero_ether_addr(&eth_mask->dst) &&
712              !rte_is_broadcast_ether_addr(&eth_mask->dst))) {
713                 rte_flow_error_set(error, EINVAL,
714                                 RTE_FLOW_ERROR_TYPE_ITEM,
715                                 item, "Invalid ether address mask");
716                 return -rte_errno;
717         }
718
719         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
720                 rte_flow_error_set(error, EINVAL,
721                                 RTE_FLOW_ERROR_TYPE_ITEM,
722                                 item, "Invalid ethertype mask");
723                 return -rte_errno;
724         }
725
726         /* If mask bits of destination MAC address
727          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
728          */
729         if (rte_is_broadcast_ether_addr(&eth_mask->dst)) {
730                 filter->mac_addr = eth_spec->dst;
731                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
732         } else {
733                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
734         }
735         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
736
737         /* Check if the next non-void item is END. */
738         item = next_no_void_pattern(pattern, item);
739         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
740                 rte_flow_error_set(error, EINVAL,
741                                 RTE_FLOW_ERROR_TYPE_ITEM,
742                                 item, "Not supported by ethertype filter.");
743                 return -rte_errno;
744         }
745
746         /* Parse action */
747
748         act = next_no_void_action(actions, NULL);
749         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
750             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
751                 rte_flow_error_set(error, EINVAL,
752                                 RTE_FLOW_ERROR_TYPE_ACTION,
753                                 act, "Not supported action.");
754                 return -rte_errno;
755         }
756
757         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
758                 act_q = (const struct rte_flow_action_queue *)act->conf;
759                 filter->queue = act_q->index;
760         } else {
761                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
762         }
763
764         /* Check if the next non-void item is END */
765         act = next_no_void_action(actions, act);
766         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
767                 rte_flow_error_set(error, EINVAL,
768                                 RTE_FLOW_ERROR_TYPE_ACTION,
769                                 act, "Not supported action.");
770                 return -rte_errno;
771         }
772
773         /* Parse attr */
774         /* Must be input direction */
775         if (!attr->ingress) {
776                 rte_flow_error_set(error, EINVAL,
777                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
778                                 attr, "Only support ingress.");
779                 return -rte_errno;
780         }
781
782         /* Not supported */
783         if (attr->egress) {
784                 rte_flow_error_set(error, EINVAL,
785                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
786                                 attr, "Not support egress.");
787                 return -rte_errno;
788         }
789
790         /* Not supported */
791         if (attr->transfer) {
792                 rte_flow_error_set(error, EINVAL,
793                                 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
794                                 attr, "No support for transfer.");
795                 return -rte_errno;
796         }
797
798         /* Not supported */
799         if (attr->priority) {
800                 rte_flow_error_set(error, EINVAL,
801                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
802                                 attr, "Not support priority.");
803                 return -rte_errno;
804         }
805
806         /* Not supported */
807         if (attr->group) {
808                 rte_flow_error_set(error, EINVAL,
809                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
810                                 attr, "Not support group.");
811                 return -rte_errno;
812         }
813
814         return 0;
815 }
816
817 static int
818 txgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
819                              const struct rte_flow_attr *attr,
820                              const struct rte_flow_item pattern[],
821                              const struct rte_flow_action actions[],
822                              struct rte_eth_ethertype_filter *filter,
823                              struct rte_flow_error *error)
824 {
825         int ret;
826
827         ret = cons_parse_ethertype_filter(attr, pattern,
828                                         actions, filter, error);
829
830         if (ret)
831                 return ret;
832
833         if (filter->queue >= dev->data->nb_rx_queues) {
834                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
835                 rte_flow_error_set(error, EINVAL,
836                         RTE_FLOW_ERROR_TYPE_ITEM,
837                         NULL, "queue index much too big");
838                 return -rte_errno;
839         }
840
841         if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
842                 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
843                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
844                 rte_flow_error_set(error, EINVAL,
845                         RTE_FLOW_ERROR_TYPE_ITEM,
846                         NULL, "IPv4/IPv6 not supported by ethertype filter");
847                 return -rte_errno;
848         }
849
850         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
851                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
852                 rte_flow_error_set(error, EINVAL,
853                         RTE_FLOW_ERROR_TYPE_ITEM,
854                         NULL, "mac compare is unsupported");
855                 return -rte_errno;
856         }
857
858         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
859                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
860                 rte_flow_error_set(error, EINVAL,
861                         RTE_FLOW_ERROR_TYPE_ITEM,
862                         NULL, "drop option is unsupported");
863                 return -rte_errno;
864         }
865
866         return 0;
867 }
868
869 /**
870  * Parse the rule to see if it is a TCP SYN rule.
871  * And get the TCP SYN filter info BTW.
872  * pattern:
873  * The first not void item must be ETH.
874  * The second not void item must be IPV4 or IPV6.
875  * The third not void item must be TCP.
876  * The next not void item must be END.
877  * action:
878  * The first not void action should be QUEUE.
879  * The next not void action should be END.
880  * pattern example:
881  * ITEM         Spec                    Mask
882  * ETH          NULL                    NULL
883  * IPV4/IPV6    NULL                    NULL
884  * TCP          tcp_flags       0x02    0xFF
885  * END
886  * other members in mask and spec should set to 0x00.
887  * item->last should be NULL.
888  */
889 static int
890 cons_parse_syn_filter(const struct rte_flow_attr *attr,
891                                 const struct rte_flow_item pattern[],
892                                 const struct rte_flow_action actions[],
893                                 struct rte_eth_syn_filter *filter,
894                                 struct rte_flow_error *error)
895 {
896         const struct rte_flow_item *item;
897         const struct rte_flow_action *act;
898         const struct rte_flow_item_tcp *tcp_spec;
899         const struct rte_flow_item_tcp *tcp_mask;
900         const struct rte_flow_action_queue *act_q;
901
902         if (!pattern) {
903                 rte_flow_error_set(error, EINVAL,
904                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
905                                 NULL, "NULL pattern.");
906                 return -rte_errno;
907         }
908
909         if (!actions) {
910                 rte_flow_error_set(error, EINVAL,
911                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
912                                 NULL, "NULL action.");
913                 return -rte_errno;
914         }
915
916         if (!attr) {
917                 rte_flow_error_set(error, EINVAL,
918                                    RTE_FLOW_ERROR_TYPE_ATTR,
919                                    NULL, "NULL attribute.");
920                 return -rte_errno;
921         }
922
923
924         /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
925         item = next_no_void_pattern(pattern, NULL);
926         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
927             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
928             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
929             item->type != RTE_FLOW_ITEM_TYPE_TCP) {
930                 rte_flow_error_set(error, EINVAL,
931                                 RTE_FLOW_ERROR_TYPE_ITEM,
932                                 item, "Not supported by syn filter");
933                 return -rte_errno;
934         }
935                 /*Not supported last point for range*/
936         if (item->last) {
937                 rte_flow_error_set(error, EINVAL,
938                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
939                         item, "Not supported last point for range");
940                 return -rte_errno;
941         }
942
943         /* Skip Ethernet */
944         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
945                 /* if the item is MAC, the content should be NULL */
946                 if (item->spec || item->mask) {
947                         rte_flow_error_set(error, EINVAL,
948                                 RTE_FLOW_ERROR_TYPE_ITEM,
949                                 item, "Invalid SYN address mask");
950                         return -rte_errno;
951                 }
952
953                 /* check if the next not void item is IPv4 or IPv6 */
954                 item = next_no_void_pattern(pattern, item);
955                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
956                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
957                         rte_flow_error_set(error, EINVAL,
958                                 RTE_FLOW_ERROR_TYPE_ITEM,
959                                 item, "Not supported by syn filter");
960                         return -rte_errno;
961                 }
962         }
963
964         /* Skip IP */
965         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
966             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
967                 /* if the item is IP, the content should be NULL */
968                 if (item->spec || item->mask) {
969                         rte_flow_error_set(error, EINVAL,
970                                 RTE_FLOW_ERROR_TYPE_ITEM,
971                                 item, "Invalid SYN mask");
972                         return -rte_errno;
973                 }
974
975                 /* check if the next not void item is TCP */
976                 item = next_no_void_pattern(pattern, item);
977                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
978                         rte_flow_error_set(error, EINVAL,
979                                 RTE_FLOW_ERROR_TYPE_ITEM,
980                                 item, "Not supported by syn filter");
981                         return -rte_errno;
982                 }
983         }
984
985         /* Get the TCP info. Only support SYN. */
986         if (!item->spec || !item->mask) {
987                 rte_flow_error_set(error, EINVAL,
988                                 RTE_FLOW_ERROR_TYPE_ITEM,
989                                 item, "Invalid SYN mask");
990                 return -rte_errno;
991         }
992         /*Not supported last point for range*/
993         if (item->last) {
994                 rte_flow_error_set(error, EINVAL,
995                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
996                         item, "Not supported last point for range");
997                 return -rte_errno;
998         }
999
1000         tcp_spec = item->spec;
1001         tcp_mask = item->mask;
1002         if (!(tcp_spec->hdr.tcp_flags & RTE_TCP_SYN_FLAG) ||
1003             tcp_mask->hdr.src_port ||
1004             tcp_mask->hdr.dst_port ||
1005             tcp_mask->hdr.sent_seq ||
1006             tcp_mask->hdr.recv_ack ||
1007             tcp_mask->hdr.data_off ||
1008             tcp_mask->hdr.tcp_flags != RTE_TCP_SYN_FLAG ||
1009             tcp_mask->hdr.rx_win ||
1010             tcp_mask->hdr.cksum ||
1011             tcp_mask->hdr.tcp_urp) {
1012                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1013                 rte_flow_error_set(error, EINVAL,
1014                                 RTE_FLOW_ERROR_TYPE_ITEM,
1015                                 item, "Not supported by syn filter");
1016                 return -rte_errno;
1017         }
1018
1019         /* check if the next not void item is END */
1020         item = next_no_void_pattern(pattern, item);
1021         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1022                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1023                 rte_flow_error_set(error, EINVAL,
1024                                 RTE_FLOW_ERROR_TYPE_ITEM,
1025                                 item, "Not supported by syn filter");
1026                 return -rte_errno;
1027         }
1028
1029         /* check if the first not void action is QUEUE. */
1030         act = next_no_void_action(actions, NULL);
1031         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1032                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1033                 rte_flow_error_set(error, EINVAL,
1034                                 RTE_FLOW_ERROR_TYPE_ACTION,
1035                                 act, "Not supported action.");
1036                 return -rte_errno;
1037         }
1038
1039         act_q = (const struct rte_flow_action_queue *)act->conf;
1040         filter->queue = act_q->index;
1041         if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM) {
1042                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1043                 rte_flow_error_set(error, EINVAL,
1044                                 RTE_FLOW_ERROR_TYPE_ACTION,
1045                                 act, "Not supported action.");
1046                 return -rte_errno;
1047         }
1048
1049         /* check if the next not void item is END */
1050         act = next_no_void_action(actions, act);
1051         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1052                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1053                 rte_flow_error_set(error, EINVAL,
1054                                 RTE_FLOW_ERROR_TYPE_ACTION,
1055                                 act, "Not supported action.");
1056                 return -rte_errno;
1057         }
1058
1059         /* parse attr */
1060         /* must be input direction */
1061         if (!attr->ingress) {
1062                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1063                 rte_flow_error_set(error, EINVAL,
1064                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1065                         attr, "Only support ingress.");
1066                 return -rte_errno;
1067         }
1068
1069         /* not supported */
1070         if (attr->egress) {
1071                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1072                 rte_flow_error_set(error, EINVAL,
1073                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1074                         attr, "Not support egress.");
1075                 return -rte_errno;
1076         }
1077
1078         /* not supported */
1079         if (attr->transfer) {
1080                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1081                 rte_flow_error_set(error, EINVAL,
1082                         RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1083                         attr, "No support for transfer.");
1084                 return -rte_errno;
1085         }
1086
1087         /* Support 2 priorities, the lowest or highest. */
1088         if (!attr->priority) {
1089                 filter->hig_pri = 0;
1090         } else if (attr->priority == (uint32_t)~0U) {
1091                 filter->hig_pri = 1;
1092         } else {
1093                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1094                 rte_flow_error_set(error, EINVAL,
1095                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1096                         attr, "Not support priority.");
1097                 return -rte_errno;
1098         }
1099
1100         return 0;
1101 }
1102
1103 static int
1104 txgbe_parse_syn_filter(struct rte_eth_dev *dev,
1105                              const struct rte_flow_attr *attr,
1106                              const struct rte_flow_item pattern[],
1107                              const struct rte_flow_action actions[],
1108                              struct rte_eth_syn_filter *filter,
1109                              struct rte_flow_error *error)
1110 {
1111         int ret;
1112
1113         ret = cons_parse_syn_filter(attr, pattern,
1114                                         actions, filter, error);
1115
1116         if (filter->queue >= dev->data->nb_rx_queues)
1117                 return -rte_errno;
1118
1119         if (ret)
1120                 return ret;
1121
1122         return 0;
1123 }
1124
1125 /**
1126  * Parse the rule to see if it is a L2 tunnel rule.
1127  * And get the L2 tunnel filter info BTW.
1128  * Only support E-tag now.
1129  * pattern:
1130  * The first not void item can be E_TAG.
1131  * The next not void item must be END.
1132  * action:
1133  * The first not void action should be VF or PF.
1134  * The next not void action should be END.
1135  * pattern example:
1136  * ITEM         Spec                    Mask
1137  * E_TAG        grp             0x1     0x3
1138                 e_cid_base      0x309   0xFFF
1139  * END
1140  * other members in mask and spec should set to 0x00.
1141  * item->last should be NULL.
1142  */
1143 static int
1144 cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
1145                         const struct rte_flow_attr *attr,
1146                         const struct rte_flow_item pattern[],
1147                         const struct rte_flow_action actions[],
1148                         struct txgbe_l2_tunnel_conf *filter,
1149                         struct rte_flow_error *error)
1150 {
1151         const struct rte_flow_item *item;
1152         const struct rte_flow_item_e_tag *e_tag_spec;
1153         const struct rte_flow_item_e_tag *e_tag_mask;
1154         const struct rte_flow_action *act;
1155         const struct rte_flow_action_vf *act_vf;
1156         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1157
1158         if (!pattern) {
1159                 rte_flow_error_set(error, EINVAL,
1160                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1161                         NULL, "NULL pattern.");
1162                 return -rte_errno;
1163         }
1164
1165         if (!actions) {
1166                 rte_flow_error_set(error, EINVAL,
1167                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1168                                    NULL, "NULL action.");
1169                 return -rte_errno;
1170         }
1171
1172         if (!attr) {
1173                 rte_flow_error_set(error, EINVAL,
1174                                    RTE_FLOW_ERROR_TYPE_ATTR,
1175                                    NULL, "NULL attribute.");
1176                 return -rte_errno;
1177         }
1178
1179         /* The first not void item should be e-tag. */
1180         item = next_no_void_pattern(pattern, NULL);
1181         if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1182                 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1183                 rte_flow_error_set(error, EINVAL,
1184                         RTE_FLOW_ERROR_TYPE_ITEM,
1185                         item, "Not supported by L2 tunnel filter");
1186                 return -rte_errno;
1187         }
1188
1189         if (!item->spec || !item->mask) {
1190                 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1191                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1192                         item, "Not supported by L2 tunnel filter");
1193                 return -rte_errno;
1194         }
1195
1196         /*Not supported last point for range*/
1197         if (item->last) {
1198                 rte_flow_error_set(error, EINVAL,
1199                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1200                         item, "Not supported last point for range");
1201                 return -rte_errno;
1202         }
1203
1204         e_tag_spec = item->spec;
1205         e_tag_mask = item->mask;
1206
1207         /* Only care about GRP and E cid base. */
1208         if (e_tag_mask->epcp_edei_in_ecid_b ||
1209             e_tag_mask->in_ecid_e ||
1210             e_tag_mask->ecid_e ||
1211             e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1212                 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1213                 rte_flow_error_set(error, EINVAL,
1214                         RTE_FLOW_ERROR_TYPE_ITEM,
1215                         item, "Not supported by L2 tunnel filter");
1216                 return -rte_errno;
1217         }
1218
1219         filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1220         /**
1221          * grp and e_cid_base are bit fields and only use 14 bits.
1222          * e-tag id is taken as little endian by HW.
1223          */
1224         filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1225
1226         /* check if the next not void item is END */
1227         item = next_no_void_pattern(pattern, item);
1228         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1229                 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1230                 rte_flow_error_set(error, EINVAL,
1231                         RTE_FLOW_ERROR_TYPE_ITEM,
1232                         item, "Not supported by L2 tunnel filter");
1233                 return -rte_errno;
1234         }
1235
1236         /* parse attr */
1237         /* must be input direction */
1238         if (!attr->ingress) {
1239                 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1240                 rte_flow_error_set(error, EINVAL,
1241                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1242                         attr, "Only support ingress.");
1243                 return -rte_errno;
1244         }
1245
1246         /* not supported */
1247         if (attr->egress) {
1248                 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1249                 rte_flow_error_set(error, EINVAL,
1250                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1251                         attr, "Not support egress.");
1252                 return -rte_errno;
1253         }
1254
1255         /* not supported */
1256         if (attr->transfer) {
1257                 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1258                 rte_flow_error_set(error, EINVAL,
1259                         RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1260                         attr, "No support for transfer.");
1261                 return -rte_errno;
1262         }
1263
1264         /* not supported */
1265         if (attr->priority) {
1266                 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1267                 rte_flow_error_set(error, EINVAL,
1268                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1269                         attr, "Not support priority.");
1270                 return -rte_errno;
1271         }
1272
1273         /* check if the first not void action is VF or PF. */
1274         act = next_no_void_action(actions, NULL);
1275         if (act->type != RTE_FLOW_ACTION_TYPE_VF &&
1276                         act->type != RTE_FLOW_ACTION_TYPE_PF) {
1277                 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1278                 rte_flow_error_set(error, EINVAL,
1279                         RTE_FLOW_ERROR_TYPE_ACTION,
1280                         act, "Not supported action.");
1281                 return -rte_errno;
1282         }
1283
1284         if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
1285                 act_vf = (const struct rte_flow_action_vf *)act->conf;
1286                 filter->pool = act_vf->id;
1287         } else {
1288                 filter->pool = pci_dev->max_vfs;
1289         }
1290
1291         /* check if the next not void item is END */
1292         act = next_no_void_action(actions, act);
1293         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1294                 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1295                 rte_flow_error_set(error, EINVAL,
1296                         RTE_FLOW_ERROR_TYPE_ACTION,
1297                         act, "Not supported action.");
1298                 return -rte_errno;
1299         }
1300
1301         return 0;
1302 }
1303
1304 static int
1305 txgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1306                         const struct rte_flow_attr *attr,
1307                         const struct rte_flow_item pattern[],
1308                         const struct rte_flow_action actions[],
1309                         struct txgbe_l2_tunnel_conf *l2_tn_filter,
1310                         struct rte_flow_error *error)
1311 {
1312         int ret = 0;
1313         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1314         uint16_t vf_num;
1315
1316         ret = cons_parse_l2_tn_filter(dev, attr, pattern,
1317                                 actions, l2_tn_filter, error);
1318
1319         vf_num = pci_dev->max_vfs;
1320
1321         if (l2_tn_filter->pool > vf_num)
1322                 return -rte_errno;
1323
1324         return ret;
1325 }
1326
1327 /* Parse to get the attr and action info of flow director rule. */
1328 static int
1329 txgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1330                           const struct rte_flow_action actions[],
1331                           struct txgbe_fdir_rule *rule,
1332                           struct rte_flow_error *error)
1333 {
1334         const struct rte_flow_action *act;
1335         const struct rte_flow_action_queue *act_q;
1336         const struct rte_flow_action_mark *mark;
1337
1338         /* parse attr */
1339         /* must be input direction */
1340         if (!attr->ingress) {
1341                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1342                 rte_flow_error_set(error, EINVAL,
1343                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1344                         attr, "Only support ingress.");
1345                 return -rte_errno;
1346         }
1347
1348         /* not supported */
1349         if (attr->egress) {
1350                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1351                 rte_flow_error_set(error, EINVAL,
1352                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1353                         attr, "Not support egress.");
1354                 return -rte_errno;
1355         }
1356
1357         /* not supported */
1358         if (attr->transfer) {
1359                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1360                 rte_flow_error_set(error, EINVAL,
1361                         RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1362                         attr, "No support for transfer.");
1363                 return -rte_errno;
1364         }
1365
1366         /* not supported */
1367         if (attr->priority) {
1368                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1369                 rte_flow_error_set(error, EINVAL,
1370                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1371                         attr, "Not support priority.");
1372                 return -rte_errno;
1373         }
1374
1375         /* check if the first not void action is QUEUE or DROP. */
1376         act = next_no_void_action(actions, NULL);
1377         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1378             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1379                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1380                 rte_flow_error_set(error, EINVAL,
1381                         RTE_FLOW_ERROR_TYPE_ACTION,
1382                         act, "Not supported action.");
1383                 return -rte_errno;
1384         }
1385
1386         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1387                 act_q = (const struct rte_flow_action_queue *)act->conf;
1388                 rule->queue = act_q->index;
1389         } else { /* drop */
1390                 /* signature mode does not support drop action. */
1391                 if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1392                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1393                         rte_flow_error_set(error, EINVAL,
1394                                 RTE_FLOW_ERROR_TYPE_ACTION,
1395                                 act, "Not supported action.");
1396                         return -rte_errno;
1397                 }
1398                 rule->fdirflags = TXGBE_FDIRPICMD_DROP;
1399         }
1400
1401         /* check if the next not void item is MARK */
1402         act = next_no_void_action(actions, act);
1403         if (act->type != RTE_FLOW_ACTION_TYPE_MARK &&
1404                 act->type != RTE_FLOW_ACTION_TYPE_END) {
1405                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1406                 rte_flow_error_set(error, EINVAL,
1407                         RTE_FLOW_ERROR_TYPE_ACTION,
1408                         act, "Not supported action.");
1409                 return -rte_errno;
1410         }
1411
1412         rule->soft_id = 0;
1413
1414         if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1415                 mark = (const struct rte_flow_action_mark *)act->conf;
1416                 rule->soft_id = mark->id;
1417                 act = next_no_void_action(actions, act);
1418         }
1419
1420         /* check if the next not void item is END */
1421         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1422                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1423                 rte_flow_error_set(error, EINVAL,
1424                         RTE_FLOW_ERROR_TYPE_ACTION,
1425                         act, "Not supported action.");
1426                 return -rte_errno;
1427         }
1428
1429         return 0;
1430 }
1431
1432 /* search next no void pattern and skip fuzzy */
1433 static inline
1434 const struct rte_flow_item *next_no_fuzzy_pattern(
1435                 const struct rte_flow_item pattern[],
1436                 const struct rte_flow_item *cur)
1437 {
1438         const struct rte_flow_item *next =
1439                 next_no_void_pattern(pattern, cur);
1440         while (1) {
1441                 if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
1442                         return next;
1443                 next = next_no_void_pattern(pattern, next);
1444         }
1445 }
1446
1447 static inline uint8_t signature_match(const struct rte_flow_item pattern[])
1448 {
1449         const struct rte_flow_item_fuzzy *spec, *last, *mask;
1450         const struct rte_flow_item *item;
1451         uint32_t sh, lh, mh;
1452         int i = 0;
1453
1454         while (1) {
1455                 item = pattern + i;
1456                 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1457                         break;
1458
1459                 if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
1460                         spec = item->spec;
1461                         last = item->last;
1462                         mask = item->mask;
1463
1464                         if (!spec || !mask)
1465                                 return 0;
1466
1467                         sh = spec->thresh;
1468
1469                         if (!last)
1470                                 lh = sh;
1471                         else
1472                                 lh = last->thresh;
1473
1474                         mh = mask->thresh;
1475                         sh = sh & mh;
1476                         lh = lh & mh;
1477
1478                         if (!sh || sh > lh)
1479                                 return 0;
1480
1481                         return 1;
1482                 }
1483
1484                 i++;
1485         }
1486
1487         return 0;
1488 }
1489
1490 /**
1491  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1492  * And get the flow director filter info BTW.
1493  * UDP/TCP/SCTP PATTERN:
1494  * The first not void item can be ETH or IPV4 or IPV6
1495  * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1496  * The next not void item could be UDP or TCP or SCTP (optional)
1497  * The next not void item could be RAW (for flexbyte, optional)
1498  * The next not void item must be END.
1499  * A Fuzzy Match pattern can appear at any place before END.
1500  * Fuzzy Match is optional for IPV4 but is required for IPV6
1501  * MAC VLAN PATTERN:
1502  * The first not void item must be ETH.
1503  * The second not void item must be MAC VLAN.
1504  * The next not void item must be END.
1505  * ACTION:
1506  * The first not void action should be QUEUE or DROP.
1507  * The second not void optional action should be MARK,
1508  * mark_id is a uint32_t number.
1509  * The next not void action should be END.
1510  * UDP/TCP/SCTP pattern example:
1511  * ITEM         Spec                    Mask
1512  * ETH          NULL                    NULL
1513  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
1514  *              dst_addr 192.167.3.50   0xFFFFFFFF
1515  * UDP/TCP/SCTP src_port        80      0xFFFF
1516  *              dst_port        80      0xFFFF
1517  * FLEX relative        0       0x1
1518  *              search          0       0x1
1519  *              reserved        0       0
1520  *              offset          12      0xFFFFFFFF
1521  *              limit           0       0xFFFF
1522  *              length          2       0xFFFF
1523  *              pattern[0]      0x86    0xFF
1524  *              pattern[1]      0xDD    0xFF
1525  * END
1526  * MAC VLAN pattern example:
1527  * ITEM         Spec                    Mask
1528  * ETH          dst_addr
1529                 {0xAC, 0x7B, 0xA1,      {0xFF, 0xFF, 0xFF,
1530                 0x2C, 0x6D, 0x36}       0xFF, 0xFF, 0xFF}
1531  * MAC VLAN     tci     0x2016          0xEFFF
1532  * END
1533  * Other members in mask and spec should set to 0x00.
1534  * Item->last should be NULL.
1535  */
1536 static int
1537 txgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev __rte_unused,
1538                                const struct rte_flow_attr *attr,
1539                                const struct rte_flow_item pattern[],
1540                                const struct rte_flow_action actions[],
1541                                struct txgbe_fdir_rule *rule,
1542                                struct rte_flow_error *error)
1543 {
1544         const struct rte_flow_item *item;
1545         const struct rte_flow_item_eth *eth_mask;
1546         const struct rte_flow_item_ipv4 *ipv4_spec;
1547         const struct rte_flow_item_ipv4 *ipv4_mask;
1548         const struct rte_flow_item_ipv6 *ipv6_spec;
1549         const struct rte_flow_item_ipv6 *ipv6_mask;
1550         const struct rte_flow_item_tcp *tcp_spec;
1551         const struct rte_flow_item_tcp *tcp_mask;
1552         const struct rte_flow_item_udp *udp_spec;
1553         const struct rte_flow_item_udp *udp_mask;
1554         const struct rte_flow_item_sctp *sctp_spec;
1555         const struct rte_flow_item_sctp *sctp_mask;
1556         const struct rte_flow_item_raw *raw_mask;
1557         const struct rte_flow_item_raw *raw_spec;
1558         u32 ptype = 0;
1559         uint8_t j;
1560
1561         if (!pattern) {
1562                 rte_flow_error_set(error, EINVAL,
1563                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1564                         NULL, "NULL pattern.");
1565                 return -rte_errno;
1566         }
1567
1568         if (!actions) {
1569                 rte_flow_error_set(error, EINVAL,
1570                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1571                                    NULL, "NULL action.");
1572                 return -rte_errno;
1573         }
1574
1575         if (!attr) {
1576                 rte_flow_error_set(error, EINVAL,
1577                                    RTE_FLOW_ERROR_TYPE_ATTR,
1578                                    NULL, "NULL attribute.");
1579                 return -rte_errno;
1580         }
1581
1582         /**
1583          * Some fields may not be provided. Set spec to 0 and mask to default
1584          * value. So, we need not do anything for the not provided fields later.
1585          */
1586         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1587         memset(&rule->mask, 0xFF, sizeof(struct txgbe_hw_fdir_mask));
1588         rule->mask.vlan_tci_mask = 0;
1589         rule->mask.flex_bytes_mask = 0;
1590
1591         /**
1592          * The first not void item should be
1593          * MAC or IPv4 or TCP or UDP or SCTP.
1594          */
1595         item = next_no_fuzzy_pattern(pattern, NULL);
1596         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1597             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1598             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1599             item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1600             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1601             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1602                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1603                 rte_flow_error_set(error, EINVAL,
1604                         RTE_FLOW_ERROR_TYPE_ITEM,
1605                         item, "Not supported by fdir filter");
1606                 return -rte_errno;
1607         }
1608
1609         if (signature_match(pattern))
1610                 rule->mode = RTE_FDIR_MODE_SIGNATURE;
1611         else
1612                 rule->mode = RTE_FDIR_MODE_PERFECT;
1613
1614         /*Not supported last point for range*/
1615         if (item->last) {
1616                 rte_flow_error_set(error, EINVAL,
1617                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1618                         item, "Not supported last point for range");
1619                 return -rte_errno;
1620         }
1621
1622         /* Get the MAC info. */
1623         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1624                 /**
1625                  * Only support vlan and dst MAC address,
1626                  * others should be masked.
1627                  */
1628                 if (item->spec && !item->mask) {
1629                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1630                         rte_flow_error_set(error, EINVAL,
1631                                 RTE_FLOW_ERROR_TYPE_ITEM,
1632                                 item, "Not supported by fdir filter");
1633                         return -rte_errno;
1634                 }
1635
1636                 if (item->mask) {
1637                         rule->b_mask = TRUE;
1638                         eth_mask = item->mask;
1639
1640                         /* Ether type should be masked. */
1641                         if (eth_mask->type ||
1642                             rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1643                                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1644                                 rte_flow_error_set(error, EINVAL,
1645                                         RTE_FLOW_ERROR_TYPE_ITEM,
1646                                         item, "Not supported by fdir filter");
1647                                 return -rte_errno;
1648                         }
1649
1650                         /* If ethernet has meaning, it means MAC VLAN mode. */
1651                         rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1652
1653                         /**
1654                          * src MAC address must be masked,
1655                          * and don't support dst MAC address mask.
1656                          */
1657                         for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
1658                                 if (eth_mask->src.addr_bytes[j] ||
1659                                         eth_mask->dst.addr_bytes[j] != 0xFF) {
1660                                         memset(rule, 0,
1661                                         sizeof(struct txgbe_fdir_rule));
1662                                         rte_flow_error_set(error, EINVAL,
1663                                         RTE_FLOW_ERROR_TYPE_ITEM,
1664                                         item, "Not supported by fdir filter");
1665                                         return -rte_errno;
1666                                 }
1667                         }
1668
1669                         /* When no VLAN, considered as full mask. */
1670                         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1671                 }
1672                 /*** If both spec and mask are item,
1673                  * it means don't care about ETH.
1674                  * Do nothing.
1675                  */
1676
1677                 /**
1678                  * Check if the next not void item is vlan or ipv4.
1679                  * IPv6 is not supported.
1680                  */
1681                 item = next_no_fuzzy_pattern(pattern, item);
1682                 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1683                         if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1684                                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1685                                 rte_flow_error_set(error, EINVAL,
1686                                         RTE_FLOW_ERROR_TYPE_ITEM,
1687                                         item, "Not supported by fdir filter");
1688                                 return -rte_errno;
1689                         }
1690                 } else {
1691                         if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1692                                         item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1693                                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1694                                 rte_flow_error_set(error, EINVAL,
1695                                         RTE_FLOW_ERROR_TYPE_ITEM,
1696                                         item, "Not supported by fdir filter");
1697                                 return -rte_errno;
1698                         }
1699                 }
1700         }
1701
1702         /* Get the IPV4 info. */
1703         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1704                 /**
1705                  * Set the flow type even if there's no content
1706                  * as we must have a flow type.
1707                  */
1708                 rule->input.flow_type = TXGBE_ATR_FLOW_TYPE_IPV4;
1709                 ptype = txgbe_ptype_table[TXGBE_PT_IPV4];
1710                 /*Not supported last point for range*/
1711                 if (item->last) {
1712                         rte_flow_error_set(error, EINVAL,
1713                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1714                                 item, "Not supported last point for range");
1715                         return -rte_errno;
1716                 }
1717                 /**
1718                  * Only care about src & dst addresses,
1719                  * others should be masked.
1720                  */
1721                 if (!item->mask) {
1722                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1723                         rte_flow_error_set(error, EINVAL,
1724                                 RTE_FLOW_ERROR_TYPE_ITEM,
1725                                 item, "Not supported by fdir filter");
1726                         return -rte_errno;
1727                 }
1728                 rule->b_mask = TRUE;
1729                 ipv4_mask = item->mask;
1730                 if (ipv4_mask->hdr.version_ihl ||
1731                     ipv4_mask->hdr.type_of_service ||
1732                     ipv4_mask->hdr.total_length ||
1733                     ipv4_mask->hdr.packet_id ||
1734                     ipv4_mask->hdr.fragment_offset ||
1735                     ipv4_mask->hdr.time_to_live ||
1736                     ipv4_mask->hdr.next_proto_id ||
1737                     ipv4_mask->hdr.hdr_checksum) {
1738                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1739                         rte_flow_error_set(error, EINVAL,
1740                                 RTE_FLOW_ERROR_TYPE_ITEM,
1741                                 item, "Not supported by fdir filter");
1742                         return -rte_errno;
1743                 }
1744                 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1745                 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1746
1747                 if (item->spec) {
1748                         rule->b_spec = TRUE;
1749                         ipv4_spec = item->spec;
1750                         rule->input.dst_ip[0] =
1751                                 ipv4_spec->hdr.dst_addr;
1752                         rule->input.src_ip[0] =
1753                                 ipv4_spec->hdr.src_addr;
1754                 }
1755
1756                 /**
1757                  * Check if the next not void item is
1758                  * TCP or UDP or SCTP or END.
1759                  */
1760                 item = next_no_fuzzy_pattern(pattern, item);
1761                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1762                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1763                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1764                     item->type != RTE_FLOW_ITEM_TYPE_END &&
1765                     item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1766                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1767                         rte_flow_error_set(error, EINVAL,
1768                                 RTE_FLOW_ERROR_TYPE_ITEM,
1769                                 item, "Not supported by fdir filter");
1770                         return -rte_errno;
1771                 }
1772         }
1773
1774         /* Get the IPV6 info. */
1775         if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1776                 /**
1777                  * Set the flow type even if there's no content
1778                  * as we must have a flow type.
1779                  */
1780                 rule->input.flow_type = TXGBE_ATR_FLOW_TYPE_IPV6;
1781                 ptype = txgbe_ptype_table[TXGBE_PT_IPV6];
1782
1783                 /**
1784                  * 1. must signature match
1785                  * 2. not support last
1786                  * 3. mask must not null
1787                  */
1788                 if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
1789                     item->last ||
1790                     !item->mask) {
1791                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1792                         rte_flow_error_set(error, EINVAL,
1793                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1794                                 item, "Not supported last point for range");
1795                         return -rte_errno;
1796                 }
1797
1798                 rule->b_mask = TRUE;
1799                 ipv6_mask = item->mask;
1800                 if (ipv6_mask->hdr.vtc_flow ||
1801                     ipv6_mask->hdr.payload_len ||
1802                     ipv6_mask->hdr.proto ||
1803                     ipv6_mask->hdr.hop_limits) {
1804                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1805                         rte_flow_error_set(error, EINVAL,
1806                                 RTE_FLOW_ERROR_TYPE_ITEM,
1807                                 item, "Not supported by fdir filter");
1808                         return -rte_errno;
1809                 }
1810
1811                 /* check src addr mask */
1812                 for (j = 0; j < 16; j++) {
1813                         if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
1814                                 rule->mask.src_ipv6_mask |= 1 << j;
1815                         } else if (ipv6_mask->hdr.src_addr[j] != 0) {
1816                                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1817                                 rte_flow_error_set(error, EINVAL,
1818                                         RTE_FLOW_ERROR_TYPE_ITEM,
1819                                         item, "Not supported by fdir filter");
1820                                 return -rte_errno;
1821                         }
1822                 }
1823
1824                 /* check dst addr mask */
1825                 for (j = 0; j < 16; j++) {
1826                         if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
1827                                 rule->mask.dst_ipv6_mask |= 1 << j;
1828                         } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
1829                                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1830                                 rte_flow_error_set(error, EINVAL,
1831                                         RTE_FLOW_ERROR_TYPE_ITEM,
1832                                         item, "Not supported by fdir filter");
1833                                 return -rte_errno;
1834                         }
1835                 }
1836
1837                 if (item->spec) {
1838                         rule->b_spec = TRUE;
1839                         ipv6_spec = item->spec;
1840                         rte_memcpy(rule->input.src_ip,
1841                                    ipv6_spec->hdr.src_addr, 16);
1842                         rte_memcpy(rule->input.dst_ip,
1843                                    ipv6_spec->hdr.dst_addr, 16);
1844                 }
1845
1846                 /**
1847                  * Check if the next not void item is
1848                  * TCP or UDP or SCTP or END.
1849                  */
1850                 item = next_no_fuzzy_pattern(pattern, item);
1851                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1852                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1853                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1854                     item->type != RTE_FLOW_ITEM_TYPE_END &&
1855                     item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1856                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1857                         rte_flow_error_set(error, EINVAL,
1858                                 RTE_FLOW_ERROR_TYPE_ITEM,
1859                                 item, "Not supported by fdir filter");
1860                         return -rte_errno;
1861                 }
1862         }
1863
1864         /* Get the TCP info. */
1865         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1866                 /**
1867                  * Set the flow type even if there's no content
1868                  * as we must have a flow type.
1869                  */
1870                 rule->input.flow_type |= TXGBE_ATR_L4TYPE_TCP;
1871                 ptype = txgbe_ptype_table[TXGBE_PT_IPV4_TCP];
1872                 /*Not supported last point for range*/
1873                 if (item->last) {
1874                         rte_flow_error_set(error, EINVAL,
1875                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1876                                 item, "Not supported last point for range");
1877                         return -rte_errno;
1878                 }
1879                 /**
1880                  * Only care about src & dst ports,
1881                  * others should be masked.
1882                  */
1883                 if (!item->mask) {
1884                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1885                         rte_flow_error_set(error, EINVAL,
1886                                 RTE_FLOW_ERROR_TYPE_ITEM,
1887                                 item, "Not supported by fdir filter");
1888                         return -rte_errno;
1889                 }
1890                 rule->b_mask = TRUE;
1891                 tcp_mask = item->mask;
1892                 if (tcp_mask->hdr.sent_seq ||
1893                     tcp_mask->hdr.recv_ack ||
1894                     tcp_mask->hdr.data_off ||
1895                     tcp_mask->hdr.tcp_flags ||
1896                     tcp_mask->hdr.rx_win ||
1897                     tcp_mask->hdr.cksum ||
1898                     tcp_mask->hdr.tcp_urp) {
1899                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1900                         rte_flow_error_set(error, EINVAL,
1901                                 RTE_FLOW_ERROR_TYPE_ITEM,
1902                                 item, "Not supported by fdir filter");
1903                         return -rte_errno;
1904                 }
1905                 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1906                 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1907
1908                 if (item->spec) {
1909                         rule->b_spec = TRUE;
1910                         tcp_spec = item->spec;
1911                         rule->input.src_port =
1912                                 tcp_spec->hdr.src_port;
1913                         rule->input.dst_port =
1914                                 tcp_spec->hdr.dst_port;
1915                 }
1916
1917                 item = next_no_fuzzy_pattern(pattern, item);
1918                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1919                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1920                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1921                         rte_flow_error_set(error, EINVAL,
1922                                 RTE_FLOW_ERROR_TYPE_ITEM,
1923                                 item, "Not supported by fdir filter");
1924                         return -rte_errno;
1925                 }
1926         }
1927
1928         /* Get the UDP info */
1929         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1930                 /**
1931                  * Set the flow type even if there's no content
1932                  * as we must have a flow type.
1933                  */
1934                 rule->input.flow_type |= TXGBE_ATR_L4TYPE_UDP;
1935                 ptype = txgbe_ptype_table[TXGBE_PT_IPV4_UDP];
1936                 /*Not supported last point for range*/
1937                 if (item->last) {
1938                         rte_flow_error_set(error, EINVAL,
1939                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1940                                 item, "Not supported last point for range");
1941                         return -rte_errno;
1942                 }
1943                 /**
1944                  * Only care about src & dst ports,
1945                  * others should be masked.
1946                  */
1947                 if (!item->mask) {
1948                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1949                         rte_flow_error_set(error, EINVAL,
1950                                 RTE_FLOW_ERROR_TYPE_ITEM,
1951                                 item, "Not supported by fdir filter");
1952                         return -rte_errno;
1953                 }
1954                 rule->b_mask = TRUE;
1955                 udp_mask = item->mask;
1956                 if (udp_mask->hdr.dgram_len ||
1957                     udp_mask->hdr.dgram_cksum) {
1958                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1959                         rte_flow_error_set(error, EINVAL,
1960                                 RTE_FLOW_ERROR_TYPE_ITEM,
1961                                 item, "Not supported by fdir filter");
1962                         return -rte_errno;
1963                 }
1964                 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1965                 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1966
1967                 if (item->spec) {
1968                         rule->b_spec = TRUE;
1969                         udp_spec = item->spec;
1970                         rule->input.src_port =
1971                                 udp_spec->hdr.src_port;
1972                         rule->input.dst_port =
1973                                 udp_spec->hdr.dst_port;
1974                 }
1975
1976                 item = next_no_fuzzy_pattern(pattern, item);
1977                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1978                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1979                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
1980                         rte_flow_error_set(error, EINVAL,
1981                                 RTE_FLOW_ERROR_TYPE_ITEM,
1982                                 item, "Not supported by fdir filter");
1983                         return -rte_errno;
1984                 }
1985         }
1986
1987         /* Get the SCTP info */
1988         if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
1989                 /**
1990                  * Set the flow type even if there's no content
1991                  * as we must have a flow type.
1992                  */
1993                 rule->input.flow_type |= TXGBE_ATR_L4TYPE_SCTP;
1994                 ptype = txgbe_ptype_table[TXGBE_PT_IPV4_SCTP];
1995                 /*Not supported last point for range*/
1996                 if (item->last) {
1997                         rte_flow_error_set(error, EINVAL,
1998                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1999                                 item, "Not supported last point for range");
2000                         return -rte_errno;
2001                 }
2002
2003                 /**
2004                  * Only care about src & dst ports,
2005                  * others should be masked.
2006                  */
2007                 if (!item->mask) {
2008                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2009                         rte_flow_error_set(error, EINVAL,
2010                                 RTE_FLOW_ERROR_TYPE_ITEM,
2011                                 item, "Not supported by fdir filter");
2012                         return -rte_errno;
2013                 }
2014                 rule->b_mask = TRUE;
2015                 sctp_mask = item->mask;
2016                 if (sctp_mask->hdr.tag ||
2017                         sctp_mask->hdr.cksum) {
2018                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2019                         rte_flow_error_set(error, EINVAL,
2020                                 RTE_FLOW_ERROR_TYPE_ITEM,
2021                                 item, "Not supported by fdir filter");
2022                         return -rte_errno;
2023                 }
2024                 rule->mask.src_port_mask = sctp_mask->hdr.src_port;
2025                 rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
2026
2027                 if (item->spec) {
2028                         rule->b_spec = TRUE;
2029                         sctp_spec = item->spec;
2030                         rule->input.src_port =
2031                                 sctp_spec->hdr.src_port;
2032                         rule->input.dst_port =
2033                                 sctp_spec->hdr.dst_port;
2034                 }
2035                 /* others even sctp port is not supported */
2036                 sctp_mask = item->mask;
2037                 if (sctp_mask &&
2038                         (sctp_mask->hdr.src_port ||
2039                          sctp_mask->hdr.dst_port ||
2040                          sctp_mask->hdr.tag ||
2041                          sctp_mask->hdr.cksum)) {
2042                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2043                         rte_flow_error_set(error, EINVAL,
2044                                 RTE_FLOW_ERROR_TYPE_ITEM,
2045                                 item, "Not supported by fdir filter");
2046                         return -rte_errno;
2047                 }
2048
2049                 item = next_no_fuzzy_pattern(pattern, item);
2050                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
2051                         item->type != RTE_FLOW_ITEM_TYPE_END) {
2052                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2053                         rte_flow_error_set(error, EINVAL,
2054                                 RTE_FLOW_ERROR_TYPE_ITEM,
2055                                 item, "Not supported by fdir filter");
2056                         return -rte_errno;
2057                 }
2058         }
2059
2060         /* Get the flex byte info */
2061         if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
2062                 /* Not supported last point for range*/
2063                 if (item->last) {
2064                         rte_flow_error_set(error, EINVAL,
2065                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2066                                 item, "Not supported last point for range");
2067                         return -rte_errno;
2068                 }
2069                 /* mask should not be null */
2070                 if (!item->mask || !item->spec) {
2071                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2072                         rte_flow_error_set(error, EINVAL,
2073                                 RTE_FLOW_ERROR_TYPE_ITEM,
2074                                 item, "Not supported by fdir filter");
2075                         return -rte_errno;
2076                 }
2077
2078                 raw_mask = item->mask;
2079
2080                 /* check mask */
2081                 if (raw_mask->relative != 0x1 ||
2082                     raw_mask->search != 0x1 ||
2083                     raw_mask->reserved != 0x0 ||
2084                     (uint32_t)raw_mask->offset != 0xffffffff ||
2085                     raw_mask->limit != 0xffff ||
2086                     raw_mask->length != 0xffff) {
2087                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2088                         rte_flow_error_set(error, EINVAL,
2089                                 RTE_FLOW_ERROR_TYPE_ITEM,
2090                                 item, "Not supported by fdir filter");
2091                         return -rte_errno;
2092                 }
2093
2094                 raw_spec = item->spec;
2095
2096                 /* check spec */
2097                 if (raw_spec->relative != 0 ||
2098                     raw_spec->search != 0 ||
2099                     raw_spec->reserved != 0 ||
2100                     raw_spec->offset > TXGBE_MAX_FLX_SOURCE_OFF ||
2101                     raw_spec->offset % 2 ||
2102                     raw_spec->limit != 0 ||
2103                     raw_spec->length != 2 ||
2104                     /* pattern can't be 0xffff */
2105                     (raw_spec->pattern[0] == 0xff &&
2106                      raw_spec->pattern[1] == 0xff)) {
2107                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2108                         rte_flow_error_set(error, EINVAL,
2109                                 RTE_FLOW_ERROR_TYPE_ITEM,
2110                                 item, "Not supported by fdir filter");
2111                         return -rte_errno;
2112                 }
2113
2114                 /* check pattern mask */
2115                 if (raw_mask->pattern[0] != 0xff ||
2116                     raw_mask->pattern[1] != 0xff) {
2117                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2118                         rte_flow_error_set(error, EINVAL,
2119                                 RTE_FLOW_ERROR_TYPE_ITEM,
2120                                 item, "Not supported by fdir filter");
2121                         return -rte_errno;
2122                 }
2123
2124                 rule->mask.flex_bytes_mask = 0xffff;
2125                 rule->input.flex_bytes =
2126                         (((uint16_t)raw_spec->pattern[1]) << 8) |
2127                         raw_spec->pattern[0];
2128                 rule->flex_bytes_offset = raw_spec->offset;
2129         }
2130
2131         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2132                 /* check if the next not void item is END */
2133                 item = next_no_fuzzy_pattern(pattern, item);
2134                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2135                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2136                         rte_flow_error_set(error, EINVAL,
2137                                 RTE_FLOW_ERROR_TYPE_ITEM,
2138                                 item, "Not supported by fdir filter");
2139                         return -rte_errno;
2140                 }
2141         }
2142
2143         rule->input.pkt_type = cpu_to_be16(txgbe_encode_ptype(ptype));
2144
2145         return txgbe_parse_fdir_act_attr(attr, actions, rule, error);
2146 }
2147
2148 /**
2149  * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
2150  * And get the flow director filter info BTW.
2151  * VxLAN PATTERN:
2152  * The first not void item must be ETH.
2153  * The second not void item must be IPV4/ IPV6.
2154  * The third not void item must be NVGRE.
2155  * The next not void item must be END.
2156  * NVGRE PATTERN:
2157  * The first not void item must be ETH.
2158  * The second not void item must be IPV4/ IPV6.
2159  * The third not void item must be NVGRE.
2160  * The next not void item must be END.
2161  * ACTION:
2162  * The first not void action should be QUEUE or DROP.
2163  * The second not void optional action should be MARK,
2164  * mark_id is a uint32_t number.
2165  * The next not void action should be END.
2166  * VxLAN pattern example:
2167  * ITEM         Spec                    Mask
2168  * ETH          NULL                    NULL
2169  * IPV4/IPV6    NULL                    NULL
2170  * UDP          NULL                    NULL
2171  * VxLAN        vni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
2172  * MAC VLAN     tci     0x2016          0xEFFF
2173  * END
2174  * NEGRV pattern example:
2175  * ITEM         Spec                    Mask
2176  * ETH          NULL                    NULL
2177  * IPV4/IPV6    NULL                    NULL
2178  * NVGRE        protocol        0x6558  0xFFFF
2179  *              tni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
2180  * MAC VLAN     tci     0x2016          0xEFFF
2181  * END
2182  * other members in mask and spec should set to 0x00.
2183  * item->last should be NULL.
2184  */
2185 static int
2186 txgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
2187                                const struct rte_flow_item pattern[],
2188                                const struct rte_flow_action actions[],
2189                                struct txgbe_fdir_rule *rule,
2190                                struct rte_flow_error *error)
2191 {
2192         const struct rte_flow_item *item;
2193         const struct rte_flow_item_eth *eth_mask;
2194         uint32_t j;
2195
2196         if (!pattern) {
2197                 rte_flow_error_set(error, EINVAL,
2198                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2199                                    NULL, "NULL pattern.");
2200                 return -rte_errno;
2201         }
2202
2203         if (!actions) {
2204                 rte_flow_error_set(error, EINVAL,
2205                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2206                                    NULL, "NULL action.");
2207                 return -rte_errno;
2208         }
2209
2210         if (!attr) {
2211                 rte_flow_error_set(error, EINVAL,
2212                                    RTE_FLOW_ERROR_TYPE_ATTR,
2213                                    NULL, "NULL attribute.");
2214                 return -rte_errno;
2215         }
2216
2217         /**
2218          * Some fields may not be provided. Set spec to 0 and mask to default
2219          * value. So, we need not do anything for the not provided fields later.
2220          */
2221         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2222         memset(&rule->mask, 0xFF, sizeof(struct txgbe_hw_fdir_mask));
2223         rule->mask.vlan_tci_mask = 0;
2224
2225         /**
2226          * The first not void item should be
2227          * MAC or IPv4 or IPv6 or UDP or VxLAN.
2228          */
2229         item = next_no_void_pattern(pattern, NULL);
2230         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
2231             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2232             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
2233             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2234             item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
2235             item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2236                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2237                 rte_flow_error_set(error, EINVAL,
2238                         RTE_FLOW_ERROR_TYPE_ITEM,
2239                         item, "Not supported by fdir filter");
2240                 return -rte_errno;
2241         }
2242
2243         rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
2244
2245         /* Skip MAC. */
2246         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
2247                 /* Only used to describe the protocol stack. */
2248                 if (item->spec || item->mask) {
2249                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2250                         rte_flow_error_set(error, EINVAL,
2251                                 RTE_FLOW_ERROR_TYPE_ITEM,
2252                                 item, "Not supported by fdir filter");
2253                         return -rte_errno;
2254                 }
2255                 /* Not supported last point for range*/
2256                 if (item->last) {
2257                         rte_flow_error_set(error, EINVAL,
2258                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2259                                 item, "Not supported last point for range");
2260                         return -rte_errno;
2261                 }
2262
2263                 /* Check if the next not void item is IPv4 or IPv6. */
2264                 item = next_no_void_pattern(pattern, item);
2265                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2266                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2267                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2268                         rte_flow_error_set(error, EINVAL,
2269                                 RTE_FLOW_ERROR_TYPE_ITEM,
2270                                 item, "Not supported by fdir filter");
2271                         return -rte_errno;
2272                 }
2273         }
2274
2275         /* Skip IP. */
2276         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2277             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2278                 /* Only used to describe the protocol stack. */
2279                 if (item->spec || item->mask) {
2280                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2281                         rte_flow_error_set(error, EINVAL,
2282                                 RTE_FLOW_ERROR_TYPE_ITEM,
2283                                 item, "Not supported by fdir filter");
2284                         return -rte_errno;
2285                 }
2286                 /*Not supported last point for range*/
2287                 if (item->last) {
2288                         rte_flow_error_set(error, EINVAL,
2289                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2290                                 item, "Not supported last point for range");
2291                         return -rte_errno;
2292                 }
2293
2294                 /* Check if the next not void item is UDP or NVGRE. */
2295                 item = next_no_void_pattern(pattern, item);
2296                 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2297                     item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2298                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2299                         rte_flow_error_set(error, EINVAL,
2300                                 RTE_FLOW_ERROR_TYPE_ITEM,
2301                                 item, "Not supported by fdir filter");
2302                         return -rte_errno;
2303                 }
2304         }
2305
2306         /* Skip UDP. */
2307         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2308                 /* Only used to describe the protocol stack. */
2309                 if (item->spec || item->mask) {
2310                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2311                         rte_flow_error_set(error, EINVAL,
2312                                 RTE_FLOW_ERROR_TYPE_ITEM,
2313                                 item, "Not supported by fdir filter");
2314                         return -rte_errno;
2315                 }
2316                 /*Not supported last point for range*/
2317                 if (item->last) {
2318                         rte_flow_error_set(error, EINVAL,
2319                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2320                                 item, "Not supported last point for range");
2321                         return -rte_errno;
2322                 }
2323
2324                 /* Check if the next not void item is VxLAN. */
2325                 item = next_no_void_pattern(pattern, item);
2326                 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2327                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2328                         rte_flow_error_set(error, EINVAL,
2329                                 RTE_FLOW_ERROR_TYPE_ITEM,
2330                                 item, "Not supported by fdir filter");
2331                         return -rte_errno;
2332                 }
2333         }
2334
2335         /* check if the next not void item is MAC */
2336         item = next_no_void_pattern(pattern, item);
2337         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2338                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2339                 rte_flow_error_set(error, EINVAL,
2340                         RTE_FLOW_ERROR_TYPE_ITEM,
2341                         item, "Not supported by fdir filter");
2342                 return -rte_errno;
2343         }
2344
2345         /**
2346          * Only support vlan and dst MAC address,
2347          * others should be masked.
2348          */
2349
2350         if (!item->mask) {
2351                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2352                 rte_flow_error_set(error, EINVAL,
2353                         RTE_FLOW_ERROR_TYPE_ITEM,
2354                         item, "Not supported by fdir filter");
2355                 return -rte_errno;
2356         }
2357         /*Not supported last point for range*/
2358         if (item->last) {
2359                 rte_flow_error_set(error, EINVAL,
2360                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2361                         item, "Not supported last point for range");
2362                 return -rte_errno;
2363         }
2364         rule->b_mask = TRUE;
2365         eth_mask = item->mask;
2366
2367         /* Ether type should be masked. */
2368         if (eth_mask->type) {
2369                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2370                 rte_flow_error_set(error, EINVAL,
2371                         RTE_FLOW_ERROR_TYPE_ITEM,
2372                         item, "Not supported by fdir filter");
2373                 return -rte_errno;
2374         }
2375
2376         /* src MAC address should be masked. */
2377         for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
2378                 if (eth_mask->src.addr_bytes[j]) {
2379                         memset(rule, 0,
2380                                sizeof(struct txgbe_fdir_rule));
2381                         rte_flow_error_set(error, EINVAL,
2382                                 RTE_FLOW_ERROR_TYPE_ITEM,
2383                                 item, "Not supported by fdir filter");
2384                         return -rte_errno;
2385                 }
2386         }
2387         rule->mask.mac_addr_byte_mask = 0;
2388         for (j = 0; j < ETH_ADDR_LEN; j++) {
2389                 /* It's a per byte mask. */
2390                 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2391                         rule->mask.mac_addr_byte_mask |= 0x1 << j;
2392                 } else if (eth_mask->dst.addr_bytes[j]) {
2393                         memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2394                         rte_flow_error_set(error, EINVAL,
2395                                 RTE_FLOW_ERROR_TYPE_ITEM,
2396                                 item, "Not supported by fdir filter");
2397                         return -rte_errno;
2398                 }
2399         }
2400
2401         /* When no vlan, considered as full mask. */
2402         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2403
2404         /**
2405          * Check if the next not void item is vlan or ipv4.
2406          * IPv6 is not supported.
2407          */
2408         item = next_no_void_pattern(pattern, item);
2409         if (item->type != RTE_FLOW_ITEM_TYPE_VLAN &&
2410                 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
2411                 memset(rule, 0, sizeof(struct txgbe_fdir_rule));
2412                 rte_flow_error_set(error, EINVAL,
2413                         RTE_FLOW_ERROR_TYPE_ITEM,
2414                         item, "Not supported by fdir filter");
2415                 return -rte_errno;
2416         }
2417         /*Not supported last point for range*/
2418         if (item->last) {
2419                 rte_flow_error_set(error, EINVAL,
2420                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2421                         item, "Not supported last point for range");
2422                 return -rte_errno;
2423         }
2424
2425         /**
2426          * If the tags is 0, it means don't care about the VLAN.
2427          * Do nothing.
2428          */
2429
2430         return txgbe_parse_fdir_act_attr(attr, actions, rule, error);
2431 }
2432
2433 static int
2434 txgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2435                         const struct rte_flow_attr *attr,
2436                         const struct rte_flow_item pattern[],
2437                         const struct rte_flow_action actions[],
2438                         struct txgbe_fdir_rule *rule,
2439                         struct rte_flow_error *error)
2440 {
2441         int ret;
2442         struct txgbe_hw *hw = TXGBE_DEV_HW(dev);
2443         enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2444
2445         ret = txgbe_parse_fdir_filter_normal(dev, attr, pattern,
2446                                         actions, rule, error);
2447         if (!ret)
2448                 goto step_next;
2449
2450         ret = txgbe_parse_fdir_filter_tunnel(attr, pattern,
2451                                         actions, rule, error);
2452         if (ret)
2453                 return ret;
2454
2455 step_next:
2456
2457         if (hw->mac.type == txgbe_mac_raptor &&
2458                 rule->fdirflags == TXGBE_FDIRPICMD_DROP &&
2459                 (rule->input.src_port != 0 || rule->input.dst_port != 0))
2460                 return -ENOTSUP;
2461
2462         if (fdir_mode == RTE_FDIR_MODE_NONE ||
2463             fdir_mode != rule->mode)
2464                 return -ENOTSUP;
2465
2466         if (rule->queue >= dev->data->nb_rx_queues)
2467                 return -ENOTSUP;
2468
2469         return ret;
2470 }
2471
2472 static int
2473 txgbe_parse_rss_filter(struct rte_eth_dev *dev,
2474                         const struct rte_flow_attr *attr,
2475                         const struct rte_flow_action actions[],
2476                         struct txgbe_rte_flow_rss_conf *rss_conf,
2477                         struct rte_flow_error *error)
2478 {
2479         const struct rte_flow_action *act;
2480         const struct rte_flow_action_rss *rss;
2481         uint16_t n;
2482
2483         /**
2484          * rss only supports forwarding,
2485          * check if the first not void action is RSS.
2486          */
2487         act = next_no_void_action(actions, NULL);
2488         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
2489                 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2490                 rte_flow_error_set(error, EINVAL,
2491                         RTE_FLOW_ERROR_TYPE_ACTION,
2492                         act, "Not supported action.");
2493                 return -rte_errno;
2494         }
2495
2496         rss = (const struct rte_flow_action_rss *)act->conf;
2497
2498         if (!rss || !rss->queue_num) {
2499                 rte_flow_error_set(error, EINVAL,
2500                                 RTE_FLOW_ERROR_TYPE_ACTION,
2501                                 act,
2502                            "no valid queues");
2503                 return -rte_errno;
2504         }
2505
2506         for (n = 0; n < rss->queue_num; n++) {
2507                 if (rss->queue[n] >= dev->data->nb_rx_queues) {
2508                         rte_flow_error_set(error, EINVAL,
2509                                    RTE_FLOW_ERROR_TYPE_ACTION,
2510                                    act,
2511                                    "queue id > max number of queues");
2512                         return -rte_errno;
2513                 }
2514         }
2515
2516         if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
2517                 return rte_flow_error_set
2518                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2519                          "non-default RSS hash functions are not supported");
2520         if (rss->level)
2521                 return rte_flow_error_set
2522                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2523                          "a nonzero RSS encapsulation level is not supported");
2524         if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
2525                 return rte_flow_error_set
2526                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2527                          "RSS hash key must be exactly 40 bytes");
2528         if (rss->queue_num > RTE_DIM(rss_conf->queue))
2529                 return rte_flow_error_set
2530                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2531                          "too many queues for RSS context");
2532         if (txgbe_rss_conf_init(rss_conf, rss))
2533                 return rte_flow_error_set
2534                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
2535                          "RSS context initialization failure");
2536
2537         /* check if the next not void item is END */
2538         act = next_no_void_action(actions, act);
2539         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
2540                 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2541                 rte_flow_error_set(error, EINVAL,
2542                         RTE_FLOW_ERROR_TYPE_ACTION,
2543                         act, "Not supported action.");
2544                 return -rte_errno;
2545         }
2546
2547         /* parse attr */
2548         /* must be input direction */
2549         if (!attr->ingress) {
2550                 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2551                 rte_flow_error_set(error, EINVAL,
2552                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
2553                                    attr, "Only support ingress.");
2554                 return -rte_errno;
2555         }
2556
2557         /* not supported */
2558         if (attr->egress) {
2559                 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2560                 rte_flow_error_set(error, EINVAL,
2561                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2562                                    attr, "Not support egress.");
2563                 return -rte_errno;
2564         }
2565
2566         /* not supported */
2567         if (attr->transfer) {
2568                 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2569                 rte_flow_error_set(error, EINVAL,
2570                                    RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
2571                                    attr, "No support for transfer.");
2572                 return -rte_errno;
2573         }
2574
2575         if (attr->priority > 0xFFFF) {
2576                 memset(rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2577                 rte_flow_error_set(error, EINVAL,
2578                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
2579                                    attr, "Error priority.");
2580                 return -rte_errno;
2581         }
2582
2583         return 0;
2584 }
2585
2586 /* remove the rss filter */
2587 static void
2588 txgbe_clear_rss_filter(struct rte_eth_dev *dev)
2589 {
2590         struct txgbe_filter_info *filter_info = TXGBE_DEV_FILTER(dev);
2591
2592         if (filter_info->rss_info.conf.queue_num)
2593                 txgbe_config_rss_filter(dev, &filter_info->rss_info, FALSE);
2594 }
2595
2596 void
2597 txgbe_filterlist_init(void)
2598 {
2599         TAILQ_INIT(&filter_ntuple_list);
2600         TAILQ_INIT(&filter_ethertype_list);
2601         TAILQ_INIT(&filter_syn_list);
2602         TAILQ_INIT(&filter_fdir_list);
2603         TAILQ_INIT(&filter_l2_tunnel_list);
2604         TAILQ_INIT(&filter_rss_list);
2605         TAILQ_INIT(&txgbe_flow_list);
2606 }
2607
2608 void
2609 txgbe_filterlist_flush(void)
2610 {
2611         struct txgbe_ntuple_filter_ele *ntuple_filter_ptr;
2612         struct txgbe_ethertype_filter_ele *ethertype_filter_ptr;
2613         struct txgbe_eth_syn_filter_ele *syn_filter_ptr;
2614         struct txgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2615         struct txgbe_fdir_rule_ele *fdir_rule_ptr;
2616         struct txgbe_flow_mem *txgbe_flow_mem_ptr;
2617         struct txgbe_rss_conf_ele *rss_filter_ptr;
2618
2619         while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2620                 TAILQ_REMOVE(&filter_ntuple_list,
2621                                  ntuple_filter_ptr,
2622                                  entries);
2623                 rte_free(ntuple_filter_ptr);
2624         }
2625
2626         while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2627                 TAILQ_REMOVE(&filter_ethertype_list,
2628                                  ethertype_filter_ptr,
2629                                  entries);
2630                 rte_free(ethertype_filter_ptr);
2631         }
2632
2633         while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2634                 TAILQ_REMOVE(&filter_syn_list,
2635                                  syn_filter_ptr,
2636                                  entries);
2637                 rte_free(syn_filter_ptr);
2638         }
2639
2640         while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2641                 TAILQ_REMOVE(&filter_l2_tunnel_list,
2642                                  l2_tn_filter_ptr,
2643                                  entries);
2644                 rte_free(l2_tn_filter_ptr);
2645         }
2646
2647         while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2648                 TAILQ_REMOVE(&filter_fdir_list,
2649                                  fdir_rule_ptr,
2650                                  entries);
2651                 rte_free(fdir_rule_ptr);
2652         }
2653
2654         while ((rss_filter_ptr = TAILQ_FIRST(&filter_rss_list))) {
2655                 TAILQ_REMOVE(&filter_rss_list,
2656                                  rss_filter_ptr,
2657                                  entries);
2658                 rte_free(rss_filter_ptr);
2659         }
2660
2661         while ((txgbe_flow_mem_ptr = TAILQ_FIRST(&txgbe_flow_list))) {
2662                 TAILQ_REMOVE(&txgbe_flow_list,
2663                                  txgbe_flow_mem_ptr,
2664                                  entries);
2665                 rte_free(txgbe_flow_mem_ptr->flow);
2666                 rte_free(txgbe_flow_mem_ptr);
2667         }
2668 }
2669
2670 /**
2671  * Create or destroy a flow rule.
2672  * Theorically one rule can match more than one filters.
2673  * We will let it use the filter which it hit first.
2674  * So, the sequence matters.
2675  */
2676 static struct rte_flow *
2677 txgbe_flow_create(struct rte_eth_dev *dev,
2678                   const struct rte_flow_attr *attr,
2679                   const struct rte_flow_item pattern[],
2680                   const struct rte_flow_action actions[],
2681                   struct rte_flow_error *error)
2682 {
2683         int ret;
2684         struct rte_eth_ntuple_filter ntuple_filter;
2685         struct rte_eth_ethertype_filter ethertype_filter;
2686         struct rte_eth_syn_filter syn_filter;
2687         struct txgbe_fdir_rule fdir_rule;
2688         struct txgbe_l2_tunnel_conf l2_tn_filter;
2689         struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(dev);
2690         struct txgbe_rte_flow_rss_conf rss_conf;
2691         struct rte_flow *flow = NULL;
2692         struct txgbe_ntuple_filter_ele *ntuple_filter_ptr;
2693         struct txgbe_ethertype_filter_ele *ethertype_filter_ptr;
2694         struct txgbe_eth_syn_filter_ele *syn_filter_ptr;
2695         struct txgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2696         struct txgbe_fdir_rule_ele *fdir_rule_ptr;
2697         struct txgbe_rss_conf_ele *rss_filter_ptr;
2698         struct txgbe_flow_mem *txgbe_flow_mem_ptr;
2699         uint8_t first_mask = FALSE;
2700
2701         flow = rte_zmalloc("txgbe_rte_flow", sizeof(struct rte_flow), 0);
2702         if (!flow) {
2703                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2704                 return (struct rte_flow *)flow;
2705         }
2706         txgbe_flow_mem_ptr = rte_zmalloc("txgbe_flow_mem",
2707                         sizeof(struct txgbe_flow_mem), 0);
2708         if (!txgbe_flow_mem_ptr) {
2709                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2710                 rte_free(flow);
2711                 return NULL;
2712         }
2713         txgbe_flow_mem_ptr->flow = flow;
2714         TAILQ_INSERT_TAIL(&txgbe_flow_list,
2715                                 txgbe_flow_mem_ptr, entries);
2716
2717         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2718         ret = txgbe_parse_ntuple_filter(dev, attr, pattern,
2719                         actions, &ntuple_filter, error);
2720
2721 #ifdef RTE_LIB_SECURITY
2722         /* ESP flow not really a flow*/
2723         if (ntuple_filter.proto == IPPROTO_ESP)
2724                 return flow;
2725 #endif
2726
2727         if (!ret) {
2728                 ret = txgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2729                 if (!ret) {
2730                         ntuple_filter_ptr = rte_zmalloc("txgbe_ntuple_filter",
2731                                 sizeof(struct txgbe_ntuple_filter_ele), 0);
2732                         if (!ntuple_filter_ptr) {
2733                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2734                                 goto out;
2735                         }
2736                         rte_memcpy(&ntuple_filter_ptr->filter_info,
2737                                 &ntuple_filter,
2738                                 sizeof(struct rte_eth_ntuple_filter));
2739                         TAILQ_INSERT_TAIL(&filter_ntuple_list,
2740                                 ntuple_filter_ptr, entries);
2741                         flow->rule = ntuple_filter_ptr;
2742                         flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2743                         return flow;
2744                 }
2745                 goto out;
2746         }
2747
2748         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2749         ret = txgbe_parse_ethertype_filter(dev, attr, pattern,
2750                                 actions, &ethertype_filter, error);
2751         if (!ret) {
2752                 ret = txgbe_add_del_ethertype_filter(dev,
2753                                 &ethertype_filter, TRUE);
2754                 if (!ret) {
2755                         ethertype_filter_ptr =
2756                                 rte_zmalloc("txgbe_ethertype_filter",
2757                                 sizeof(struct txgbe_ethertype_filter_ele), 0);
2758                         if (!ethertype_filter_ptr) {
2759                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2760                                 goto out;
2761                         }
2762                         rte_memcpy(&ethertype_filter_ptr->filter_info,
2763                                 &ethertype_filter,
2764                                 sizeof(struct rte_eth_ethertype_filter));
2765                         TAILQ_INSERT_TAIL(&filter_ethertype_list,
2766                                 ethertype_filter_ptr, entries);
2767                         flow->rule = ethertype_filter_ptr;
2768                         flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2769                         return flow;
2770                 }
2771                 goto out;
2772         }
2773
2774         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2775         ret = txgbe_parse_syn_filter(dev, attr, pattern,
2776                                 actions, &syn_filter, error);
2777         if (!ret) {
2778                 ret = txgbe_syn_filter_set(dev, &syn_filter, TRUE);
2779                 if (!ret) {
2780                         syn_filter_ptr = rte_zmalloc("txgbe_syn_filter",
2781                                 sizeof(struct txgbe_eth_syn_filter_ele), 0);
2782                         if (!syn_filter_ptr) {
2783                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2784                                 goto out;
2785                         }
2786                         rte_memcpy(&syn_filter_ptr->filter_info,
2787                                 &syn_filter,
2788                                 sizeof(struct rte_eth_syn_filter));
2789                         TAILQ_INSERT_TAIL(&filter_syn_list,
2790                                 syn_filter_ptr,
2791                                 entries);
2792                         flow->rule = syn_filter_ptr;
2793                         flow->filter_type = RTE_ETH_FILTER_SYN;
2794                         return flow;
2795                 }
2796                 goto out;
2797         }
2798
2799         memset(&fdir_rule, 0, sizeof(struct txgbe_fdir_rule));
2800         ret = txgbe_parse_fdir_filter(dev, attr, pattern,
2801                                 actions, &fdir_rule, error);
2802         if (!ret) {
2803                 /* A mask cannot be deleted. */
2804                 if (fdir_rule.b_mask) {
2805                         if (!fdir_info->mask_added) {
2806                                 /* It's the first time the mask is set. */
2807                                 rte_memcpy(&fdir_info->mask,
2808                                         &fdir_rule.mask,
2809                                         sizeof(struct txgbe_hw_fdir_mask));
2810                                 fdir_info->flex_bytes_offset =
2811                                         fdir_rule.flex_bytes_offset;
2812
2813                                 if (fdir_rule.mask.flex_bytes_mask)
2814                                         txgbe_fdir_set_flexbytes_offset(dev,
2815                                                 fdir_rule.flex_bytes_offset);
2816
2817                                 ret = txgbe_fdir_set_input_mask(dev);
2818                                 if (ret)
2819                                         goto out;
2820
2821                                 fdir_info->mask_added = TRUE;
2822                                 first_mask = TRUE;
2823                         } else {
2824                                 /**
2825                                  * Only support one global mask,
2826                                  * all the masks should be the same.
2827                                  */
2828                                 ret = memcmp(&fdir_info->mask,
2829                                         &fdir_rule.mask,
2830                                         sizeof(struct txgbe_hw_fdir_mask));
2831                                 if (ret)
2832                                         goto out;
2833
2834                                 if (fdir_info->flex_bytes_offset !=
2835                                                 fdir_rule.flex_bytes_offset)
2836                                         goto out;
2837                         }
2838                 }
2839
2840                 if (fdir_rule.b_spec) {
2841                         ret = txgbe_fdir_filter_program(dev, &fdir_rule,
2842                                         FALSE, FALSE);
2843                         if (!ret) {
2844                                 fdir_rule_ptr = rte_zmalloc("txgbe_fdir_filter",
2845                                         sizeof(struct txgbe_fdir_rule_ele), 0);
2846                                 if (!fdir_rule_ptr) {
2847                                         PMD_DRV_LOG(ERR,
2848                                                 "failed to allocate memory");
2849                                         goto out;
2850                                 }
2851                                 rte_memcpy(&fdir_rule_ptr->filter_info,
2852                                         &fdir_rule,
2853                                         sizeof(struct txgbe_fdir_rule));
2854                                 TAILQ_INSERT_TAIL(&filter_fdir_list,
2855                                         fdir_rule_ptr, entries);
2856                                 flow->rule = fdir_rule_ptr;
2857                                 flow->filter_type = RTE_ETH_FILTER_FDIR;
2858
2859                                 return flow;
2860                         }
2861
2862                         if (ret) {
2863                                 /**
2864                                  * clean the mask_added flag if fail to
2865                                  * program
2866                                  **/
2867                                 if (first_mask)
2868                                         fdir_info->mask_added = FALSE;
2869                                 goto out;
2870                         }
2871                 }
2872
2873                 goto out;
2874         }
2875
2876         memset(&l2_tn_filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
2877         ret = txgbe_parse_l2_tn_filter(dev, attr, pattern,
2878                                         actions, &l2_tn_filter, error);
2879         if (!ret) {
2880                 ret = txgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
2881                 if (!ret) {
2882                         l2_tn_filter_ptr = rte_zmalloc("txgbe_l2_tn_filter",
2883                                 sizeof(struct txgbe_eth_l2_tunnel_conf_ele), 0);
2884                         if (!l2_tn_filter_ptr) {
2885                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2886                                 goto out;
2887                         }
2888                         rte_memcpy(&l2_tn_filter_ptr->filter_info,
2889                                 &l2_tn_filter,
2890                                 sizeof(struct txgbe_l2_tunnel_conf));
2891                         TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
2892                                 l2_tn_filter_ptr, entries);
2893                         flow->rule = l2_tn_filter_ptr;
2894                         flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
2895                         return flow;
2896                 }
2897         }
2898
2899         memset(&rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2900         ret = txgbe_parse_rss_filter(dev, attr,
2901                                         actions, &rss_conf, error);
2902         if (!ret) {
2903                 ret = txgbe_config_rss_filter(dev, &rss_conf, TRUE);
2904                 if (!ret) {
2905                         rss_filter_ptr = rte_zmalloc("txgbe_rss_filter",
2906                                 sizeof(struct txgbe_rss_conf_ele), 0);
2907                         if (!rss_filter_ptr) {
2908                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2909                                 goto out;
2910                         }
2911                         txgbe_rss_conf_init(&rss_filter_ptr->filter_info,
2912                                             &rss_conf.conf);
2913                         TAILQ_INSERT_TAIL(&filter_rss_list,
2914                                 rss_filter_ptr, entries);
2915                         flow->rule = rss_filter_ptr;
2916                         flow->filter_type = RTE_ETH_FILTER_HASH;
2917                         return flow;
2918                 }
2919         }
2920
2921 out:
2922         TAILQ_REMOVE(&txgbe_flow_list,
2923                 txgbe_flow_mem_ptr, entries);
2924         rte_flow_error_set(error, -ret,
2925                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2926                            "Failed to create flow.");
2927         rte_free(txgbe_flow_mem_ptr);
2928         rte_free(flow);
2929         return NULL;
2930 }
2931
2932 /**
2933  * Check if the flow rule is supported by txgbe.
2934  * It only checks the format. Don't guarantee the rule can be programmed into
2935  * the HW. Because there can be no enough room for the rule.
2936  */
2937 static int
2938 txgbe_flow_validate(struct rte_eth_dev *dev,
2939                 const struct rte_flow_attr *attr,
2940                 const struct rte_flow_item pattern[],
2941                 const struct rte_flow_action actions[],
2942                 struct rte_flow_error *error)
2943 {
2944         struct rte_eth_ntuple_filter ntuple_filter;
2945         struct rte_eth_ethertype_filter ethertype_filter;
2946         struct rte_eth_syn_filter syn_filter;
2947         struct txgbe_l2_tunnel_conf l2_tn_filter;
2948         struct txgbe_fdir_rule fdir_rule;
2949         struct txgbe_rte_flow_rss_conf rss_conf;
2950         int ret = 0;
2951
2952         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2953         ret = txgbe_parse_ntuple_filter(dev, attr, pattern,
2954                                 actions, &ntuple_filter, error);
2955         if (!ret)
2956                 return 0;
2957
2958         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2959         ret = txgbe_parse_ethertype_filter(dev, attr, pattern,
2960                                 actions, &ethertype_filter, error);
2961         if (!ret)
2962                 return 0;
2963
2964         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2965         ret = txgbe_parse_syn_filter(dev, attr, pattern,
2966                                 actions, &syn_filter, error);
2967         if (!ret)
2968                 return 0;
2969
2970         memset(&fdir_rule, 0, sizeof(struct txgbe_fdir_rule));
2971         ret = txgbe_parse_fdir_filter(dev, attr, pattern,
2972                                 actions, &fdir_rule, error);
2973         if (!ret)
2974                 return 0;
2975
2976         memset(&l2_tn_filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
2977         ret = txgbe_parse_l2_tn_filter(dev, attr, pattern,
2978                                 actions, &l2_tn_filter, error);
2979         if (!ret)
2980                 return 0;
2981
2982         memset(&rss_conf, 0, sizeof(struct txgbe_rte_flow_rss_conf));
2983         ret = txgbe_parse_rss_filter(dev, attr,
2984                                         actions, &rss_conf, error);
2985
2986         return ret;
2987 }
2988
2989 /* Destroy a flow rule on txgbe. */
2990 static int
2991 txgbe_flow_destroy(struct rte_eth_dev *dev,
2992                 struct rte_flow *flow,
2993                 struct rte_flow_error *error)
2994 {
2995         int ret = 0;
2996         struct rte_flow *pmd_flow = flow;
2997         enum rte_filter_type filter_type = pmd_flow->filter_type;
2998         struct rte_eth_ntuple_filter ntuple_filter;
2999         struct rte_eth_ethertype_filter ethertype_filter;
3000         struct rte_eth_syn_filter syn_filter;
3001         struct txgbe_fdir_rule fdir_rule;
3002         struct txgbe_l2_tunnel_conf l2_tn_filter;
3003         struct txgbe_ntuple_filter_ele *ntuple_filter_ptr;
3004         struct txgbe_ethertype_filter_ele *ethertype_filter_ptr;
3005         struct txgbe_eth_syn_filter_ele *syn_filter_ptr;
3006         struct txgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
3007         struct txgbe_fdir_rule_ele *fdir_rule_ptr;
3008         struct txgbe_flow_mem *txgbe_flow_mem_ptr;
3009         struct txgbe_hw_fdir_info *fdir_info = TXGBE_DEV_FDIR(dev);
3010         struct txgbe_rss_conf_ele *rss_filter_ptr;
3011
3012         switch (filter_type) {
3013         case RTE_ETH_FILTER_NTUPLE:
3014                 ntuple_filter_ptr = (struct txgbe_ntuple_filter_ele *)
3015                                         pmd_flow->rule;
3016                 rte_memcpy(&ntuple_filter,
3017                         &ntuple_filter_ptr->filter_info,
3018                         sizeof(struct rte_eth_ntuple_filter));
3019                 ret = txgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
3020                 if (!ret) {
3021                         TAILQ_REMOVE(&filter_ntuple_list,
3022                         ntuple_filter_ptr, entries);
3023                         rte_free(ntuple_filter_ptr);
3024                 }
3025                 break;
3026         case RTE_ETH_FILTER_ETHERTYPE:
3027                 ethertype_filter_ptr = (struct txgbe_ethertype_filter_ele *)
3028                                         pmd_flow->rule;
3029                 rte_memcpy(&ethertype_filter,
3030                         &ethertype_filter_ptr->filter_info,
3031                         sizeof(struct rte_eth_ethertype_filter));
3032                 ret = txgbe_add_del_ethertype_filter(dev,
3033                                 &ethertype_filter, FALSE);
3034                 if (!ret) {
3035                         TAILQ_REMOVE(&filter_ethertype_list,
3036                                 ethertype_filter_ptr, entries);
3037                         rte_free(ethertype_filter_ptr);
3038                 }
3039                 break;
3040         case RTE_ETH_FILTER_SYN:
3041                 syn_filter_ptr = (struct txgbe_eth_syn_filter_ele *)
3042                                 pmd_flow->rule;
3043                 rte_memcpy(&syn_filter,
3044                         &syn_filter_ptr->filter_info,
3045                         sizeof(struct rte_eth_syn_filter));
3046                 ret = txgbe_syn_filter_set(dev, &syn_filter, FALSE);
3047                 if (!ret) {
3048                         TAILQ_REMOVE(&filter_syn_list,
3049                                 syn_filter_ptr, entries);
3050                         rte_free(syn_filter_ptr);
3051                 }
3052                 break;
3053         case RTE_ETH_FILTER_FDIR:
3054                 fdir_rule_ptr = (struct txgbe_fdir_rule_ele *)pmd_flow->rule;
3055                 rte_memcpy(&fdir_rule,
3056                         &fdir_rule_ptr->filter_info,
3057                         sizeof(struct txgbe_fdir_rule));
3058                 ret = txgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
3059                 if (!ret) {
3060                         TAILQ_REMOVE(&filter_fdir_list,
3061                                 fdir_rule_ptr, entries);
3062                         rte_free(fdir_rule_ptr);
3063                         if (TAILQ_EMPTY(&filter_fdir_list))
3064                                 fdir_info->mask_added = false;
3065                 }
3066                 break;
3067         case RTE_ETH_FILTER_L2_TUNNEL:
3068                 l2_tn_filter_ptr = (struct txgbe_eth_l2_tunnel_conf_ele *)
3069                                 pmd_flow->rule;
3070                 rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
3071                         sizeof(struct txgbe_l2_tunnel_conf));
3072                 ret = txgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
3073                 if (!ret) {
3074                         TAILQ_REMOVE(&filter_l2_tunnel_list,
3075                                 l2_tn_filter_ptr, entries);
3076                         rte_free(l2_tn_filter_ptr);
3077                 }
3078                 break;
3079         case RTE_ETH_FILTER_HASH:
3080                 rss_filter_ptr = (struct txgbe_rss_conf_ele *)
3081                                 pmd_flow->rule;
3082                 ret = txgbe_config_rss_filter(dev,
3083                                         &rss_filter_ptr->filter_info, FALSE);
3084                 if (!ret) {
3085                         TAILQ_REMOVE(&filter_rss_list,
3086                                 rss_filter_ptr, entries);
3087                         rte_free(rss_filter_ptr);
3088                 }
3089                 break;
3090         default:
3091                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
3092                             filter_type);
3093                 ret = -EINVAL;
3094                 break;
3095         }
3096
3097         if (ret) {
3098                 rte_flow_error_set(error, EINVAL,
3099                                 RTE_FLOW_ERROR_TYPE_HANDLE,
3100                                 NULL, "Failed to destroy flow");
3101                 return ret;
3102         }
3103
3104         TAILQ_FOREACH(txgbe_flow_mem_ptr, &txgbe_flow_list, entries) {
3105                 if (txgbe_flow_mem_ptr->flow == pmd_flow) {
3106                         TAILQ_REMOVE(&txgbe_flow_list,
3107                                 txgbe_flow_mem_ptr, entries);
3108                         rte_free(txgbe_flow_mem_ptr);
3109                 }
3110         }
3111         rte_free(flow);
3112
3113         return ret;
3114 }
3115
3116 /*  Destroy all flow rules associated with a port on txgbe. */
3117 static int
3118 txgbe_flow_flush(struct rte_eth_dev *dev,
3119                 struct rte_flow_error *error)
3120 {
3121         int ret = 0;
3122
3123         txgbe_clear_all_ntuple_filter(dev);
3124         txgbe_clear_all_ethertype_filter(dev);
3125         txgbe_clear_syn_filter(dev);
3126
3127         ret = txgbe_clear_all_fdir_filter(dev);
3128         if (ret < 0) {
3129                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3130                                         NULL, "Failed to flush rule");
3131                 return ret;
3132         }
3133
3134         ret = txgbe_clear_all_l2_tn_filter(dev);
3135         if (ret < 0) {
3136                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3137                                         NULL, "Failed to flush rule");
3138                 return ret;
3139         }
3140
3141         txgbe_clear_rss_filter(dev);
3142
3143         txgbe_filterlist_flush();
3144
3145         return 0;
3146 }
3147
3148 const struct rte_flow_ops txgbe_flow_ops = {
3149         .validate = txgbe_flow_validate,
3150         .create = txgbe_flow_create,
3151         .destroy = txgbe_flow_destroy,
3152         .flush = txgbe_flow_flush,
3153 };
3154