net/ixgbe: fix parsing FDIR NVGRE issue
[dpdk.git] / drivers / net / ixgbe / ixgbe_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <string.h>
10 #include <unistd.h>
11 #include <stdarg.h>
12 #include <inttypes.h>
13 #include <netinet/in.h>
14 #include <rte_byteorder.h>
15 #include <rte_common.h>
16 #include <rte_cycles.h>
17
18 #include <rte_interrupts.h>
19 #include <rte_log.h>
20 #include <rte_debug.h>
21 #include <rte_pci.h>
22 #include <rte_atomic.h>
23 #include <rte_branch_prediction.h>
24 #include <rte_memory.h>
25 #include <rte_eal.h>
26 #include <rte_alarm.h>
27 #include <rte_ether.h>
28 #include <rte_ethdev.h>
29 #include <rte_malloc.h>
30 #include <rte_random.h>
31 #include <rte_dev.h>
32 #include <rte_hash_crc.h>
33 #include <rte_flow.h>
34 #include <rte_flow_driver.h>
35
36 #include "ixgbe_logs.h"
37 #include "base/ixgbe_api.h"
38 #include "base/ixgbe_vf.h"
39 #include "base/ixgbe_common.h"
40 #include "ixgbe_ethdev.h"
41 #include "ixgbe_bypass.h"
42 #include "ixgbe_rxtx.h"
43 #include "base/ixgbe_type.h"
44 #include "base/ixgbe_phy.h"
45 #include "rte_pmd_ixgbe.h"
46
47
48 #define IXGBE_MIN_N_TUPLE_PRIO 1
49 #define IXGBE_MAX_N_TUPLE_PRIO 7
50 #define IXGBE_MAX_FLX_SOURCE_OFF 62
51
52 /* ntuple filter list structure */
53 struct ixgbe_ntuple_filter_ele {
54         TAILQ_ENTRY(ixgbe_ntuple_filter_ele) entries;
55         struct rte_eth_ntuple_filter filter_info;
56 };
57 /* ethertype filter list structure */
58 struct ixgbe_ethertype_filter_ele {
59         TAILQ_ENTRY(ixgbe_ethertype_filter_ele) entries;
60         struct rte_eth_ethertype_filter filter_info;
61 };
62 /* syn filter list structure */
63 struct ixgbe_eth_syn_filter_ele {
64         TAILQ_ENTRY(ixgbe_eth_syn_filter_ele) entries;
65         struct rte_eth_syn_filter filter_info;
66 };
67 /* fdir filter list structure */
68 struct ixgbe_fdir_rule_ele {
69         TAILQ_ENTRY(ixgbe_fdir_rule_ele) entries;
70         struct ixgbe_fdir_rule filter_info;
71 };
72 /* l2_tunnel filter list structure */
73 struct ixgbe_eth_l2_tunnel_conf_ele {
74         TAILQ_ENTRY(ixgbe_eth_l2_tunnel_conf_ele) entries;
75         struct rte_eth_l2_tunnel_conf filter_info;
76 };
77 /* ixgbe_flow memory list structure */
78 struct ixgbe_flow_mem {
79         TAILQ_ENTRY(ixgbe_flow_mem) entries;
80         struct rte_flow *flow;
81 };
82
83 TAILQ_HEAD(ixgbe_ntuple_filter_list, ixgbe_ntuple_filter_ele);
84 TAILQ_HEAD(ixgbe_ethertype_filter_list, ixgbe_ethertype_filter_ele);
85 TAILQ_HEAD(ixgbe_syn_filter_list, ixgbe_eth_syn_filter_ele);
86 TAILQ_HEAD(ixgbe_fdir_rule_filter_list, ixgbe_fdir_rule_ele);
87 TAILQ_HEAD(ixgbe_l2_tunnel_filter_list, ixgbe_eth_l2_tunnel_conf_ele);
88 TAILQ_HEAD(ixgbe_flow_mem_list, ixgbe_flow_mem);
89
90 static struct ixgbe_ntuple_filter_list filter_ntuple_list;
91 static struct ixgbe_ethertype_filter_list filter_ethertype_list;
92 static struct ixgbe_syn_filter_list filter_syn_list;
93 static struct ixgbe_fdir_rule_filter_list filter_fdir_list;
94 static struct ixgbe_l2_tunnel_filter_list filter_l2_tunnel_list;
95 static struct ixgbe_flow_mem_list ixgbe_flow_list;
96
97 /**
98  * Endless loop will never happen with below assumption
99  * 1. there is at least one no-void item(END)
100  * 2. cur is before END.
101  */
102 static inline
103 const struct rte_flow_item *next_no_void_pattern(
104                 const struct rte_flow_item pattern[],
105                 const struct rte_flow_item *cur)
106 {
107         const struct rte_flow_item *next =
108                 cur ? cur + 1 : &pattern[0];
109         while (1) {
110                 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
111                         return next;
112                 next++;
113         }
114 }
115
116 static inline
117 const struct rte_flow_action *next_no_void_action(
118                 const struct rte_flow_action actions[],
119                 const struct rte_flow_action *cur)
120 {
121         const struct rte_flow_action *next =
122                 cur ? cur + 1 : &actions[0];
123         while (1) {
124                 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
125                         return next;
126                 next++;
127         }
128 }
129
130 /**
131  * Please aware there's an asumption for all the parsers.
132  * rte_flow_item is using big endian, rte_flow_attr and
133  * rte_flow_action are using CPU order.
134  * Because the pattern is used to describe the packets,
135  * normally the packets should use network order.
136  */
137
138 /**
139  * Parse the rule to see if it is a n-tuple rule.
140  * And get the n-tuple filter info BTW.
141  * pattern:
142  * The first not void item can be ETH or IPV4.
143  * The second not void item must be IPV4 if the first one is ETH.
144  * The third not void item must be UDP or TCP.
145  * The next not void item must be END.
146  * action:
147  * The first not void action should be QUEUE.
148  * The next not void action should be END.
149  * pattern example:
150  * ITEM         Spec                    Mask
151  * ETH          NULL                    NULL
152  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
153  *              dst_addr 192.167.3.50   0xFFFFFFFF
154  *              next_proto_id   17      0xFF
155  * UDP/TCP/     src_port        80      0xFFFF
156  * SCTP         dst_port        80      0xFFFF
157  * END
158  * other members in mask and spec should set to 0x00.
159  * item->last should be NULL.
160  *
161  * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY.
162  *
163  */
164 static int
165 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
166                          const struct rte_flow_item pattern[],
167                          const struct rte_flow_action actions[],
168                          struct rte_eth_ntuple_filter *filter,
169                          struct rte_flow_error *error)
170 {
171         const struct rte_flow_item *item;
172         const struct rte_flow_action *act;
173         const struct rte_flow_item_ipv4 *ipv4_spec;
174         const struct rte_flow_item_ipv4 *ipv4_mask;
175         const struct rte_flow_item_tcp *tcp_spec;
176         const struct rte_flow_item_tcp *tcp_mask;
177         const struct rte_flow_item_udp *udp_spec;
178         const struct rte_flow_item_udp *udp_mask;
179         const struct rte_flow_item_sctp *sctp_spec;
180         const struct rte_flow_item_sctp *sctp_mask;
181         const struct rte_flow_item_eth *eth_spec;
182         const struct rte_flow_item_eth *eth_mask;
183         const struct rte_flow_item_vlan *vlan_spec;
184         const struct rte_flow_item_vlan *vlan_mask;
185         struct rte_flow_item_eth eth_null;
186         struct rte_flow_item_vlan vlan_null;
187
188         if (!pattern) {
189                 rte_flow_error_set(error,
190                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
191                         NULL, "NULL pattern.");
192                 return -rte_errno;
193         }
194
195         if (!actions) {
196                 rte_flow_error_set(error, EINVAL,
197                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
198                                    NULL, "NULL action.");
199                 return -rte_errno;
200         }
201         if (!attr) {
202                 rte_flow_error_set(error, EINVAL,
203                                    RTE_FLOW_ERROR_TYPE_ATTR,
204                                    NULL, "NULL attribute.");
205                 return -rte_errno;
206         }
207
208         memset(&eth_null, 0, sizeof(struct rte_flow_item_eth));
209         memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan));
210
211 #ifdef RTE_LIBRTE_SECURITY
212         /**
213          *  Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY
214          */
215         act = next_no_void_action(actions, NULL);
216         if (act->type == RTE_FLOW_ACTION_TYPE_SECURITY) {
217                 const void *conf = act->conf;
218                 /* check if the next not void item is END */
219                 act = next_no_void_action(actions, act);
220                 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
221                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
222                         rte_flow_error_set(error, EINVAL,
223                                 RTE_FLOW_ERROR_TYPE_ACTION,
224                                 act, "Not supported action.");
225                         return -rte_errno;
226                 }
227
228                 /* get the IP pattern*/
229                 item = next_no_void_pattern(pattern, NULL);
230                 while (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
231                                 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
232                         if (item->last ||
233                                         item->type == RTE_FLOW_ITEM_TYPE_END) {
234                                 rte_flow_error_set(error, EINVAL,
235                                         RTE_FLOW_ERROR_TYPE_ITEM,
236                                         item, "IP pattern missing.");
237                                 return -rte_errno;
238                         }
239                         item = next_no_void_pattern(pattern, item);
240                 }
241
242                 filter->proto = IPPROTO_ESP;
243                 return ixgbe_crypto_add_ingress_sa_from_flow(conf, item->spec,
244                                         item->type == RTE_FLOW_ITEM_TYPE_IPV6);
245         }
246 #endif
247
248         /* the first not void item can be MAC or IPv4 */
249         item = next_no_void_pattern(pattern, NULL);
250
251         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
252             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
253                 rte_flow_error_set(error, EINVAL,
254                         RTE_FLOW_ERROR_TYPE_ITEM,
255                         item, "Not supported by ntuple filter");
256                 return -rte_errno;
257         }
258         /* Skip Ethernet */
259         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
260                 eth_spec = (const struct rte_flow_item_eth *)item->spec;
261                 eth_mask = (const struct rte_flow_item_eth *)item->mask;
262                 /*Not supported last point for range*/
263                 if (item->last) {
264                         rte_flow_error_set(error,
265                           EINVAL,
266                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
267                           item, "Not supported last point for range");
268                         return -rte_errno;
269
270                 }
271                 /* if the first item is MAC, the content should be NULL */
272                 if ((item->spec || item->mask) &&
273                         (memcmp(eth_spec, &eth_null,
274                                 sizeof(struct rte_flow_item_eth)) ||
275                          memcmp(eth_mask, &eth_null,
276                                 sizeof(struct rte_flow_item_eth)))) {
277                         rte_flow_error_set(error, EINVAL,
278                                 RTE_FLOW_ERROR_TYPE_ITEM,
279                                 item, "Not supported by ntuple filter");
280                         return -rte_errno;
281                 }
282                 /* check if the next not void item is IPv4 or Vlan */
283                 item = next_no_void_pattern(pattern, item);
284                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
285                         item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
286                         rte_flow_error_set(error,
287                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
288                           item, "Not supported by ntuple filter");
289                           return -rte_errno;
290                 }
291         }
292
293         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
294                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
295                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
296                 /*Not supported last point for range*/
297                 if (item->last) {
298                         rte_flow_error_set(error,
299                           EINVAL,
300                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
301                           item, "Not supported last point for range");
302                         return -rte_errno;
303                 }
304                 /* the content should be NULL */
305                 if ((item->spec || item->mask) &&
306                         (memcmp(vlan_spec, &vlan_null,
307                                 sizeof(struct rte_flow_item_vlan)) ||
308                          memcmp(vlan_mask, &vlan_null,
309                                 sizeof(struct rte_flow_item_vlan)))) {
310
311                         rte_flow_error_set(error, EINVAL,
312                                 RTE_FLOW_ERROR_TYPE_ITEM,
313                                 item, "Not supported by ntuple filter");
314                         return -rte_errno;
315                 }
316                 /* check if the next not void item is IPv4 */
317                 item = next_no_void_pattern(pattern, item);
318                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
319                         rte_flow_error_set(error,
320                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
321                           item, "Not supported by ntuple filter");
322                         return -rte_errno;
323                 }
324         }
325
326         if (item->mask) {
327                 /* get the IPv4 info */
328                 if (!item->spec || !item->mask) {
329                         rte_flow_error_set(error, EINVAL,
330                                 RTE_FLOW_ERROR_TYPE_ITEM,
331                                 item, "Invalid ntuple mask");
332                         return -rte_errno;
333                 }
334                 /*Not supported last point for range*/
335                 if (item->last) {
336                         rte_flow_error_set(error, EINVAL,
337                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
338                                 item, "Not supported last point for range");
339                         return -rte_errno;
340                 }
341
342                 ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
343                 /**
344                  * Only support src & dst addresses, protocol,
345                  * others should be masked.
346                  */
347                 if (ipv4_mask->hdr.version_ihl ||
348                     ipv4_mask->hdr.type_of_service ||
349                     ipv4_mask->hdr.total_length ||
350                     ipv4_mask->hdr.packet_id ||
351                     ipv4_mask->hdr.fragment_offset ||
352                     ipv4_mask->hdr.time_to_live ||
353                     ipv4_mask->hdr.hdr_checksum) {
354                         rte_flow_error_set(error,
355                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
356                                 item, "Not supported by ntuple filter");
357                         return -rte_errno;
358                 }
359
360                 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
361                 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
362                 filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
363
364                 ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
365                 filter->dst_ip = ipv4_spec->hdr.dst_addr;
366                 filter->src_ip = ipv4_spec->hdr.src_addr;
367                 filter->proto  = ipv4_spec->hdr.next_proto_id;
368         }
369
370         /* check if the next not void item is TCP or UDP */
371         item = next_no_void_pattern(pattern, item);
372         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
373             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
374             item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
375             item->type != RTE_FLOW_ITEM_TYPE_END) {
376                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
377                 rte_flow_error_set(error, EINVAL,
378                         RTE_FLOW_ERROR_TYPE_ITEM,
379                         item, "Not supported by ntuple filter");
380                 return -rte_errno;
381         }
382
383         if ((item->type != RTE_FLOW_ITEM_TYPE_END) &&
384                 (!item->spec && !item->mask)) {
385                 goto action;
386         }
387
388         /* get the TCP/UDP/SCTP info */
389         if (item->type != RTE_FLOW_ITEM_TYPE_END &&
390                 (!item->spec || !item->mask)) {
391                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
392                 rte_flow_error_set(error, EINVAL,
393                         RTE_FLOW_ERROR_TYPE_ITEM,
394                         item, "Invalid ntuple mask");
395                 return -rte_errno;
396         }
397
398         /*Not supported last point for range*/
399         if (item->last) {
400                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
401                 rte_flow_error_set(error, EINVAL,
402                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
403                         item, "Not supported last point for range");
404                 return -rte_errno;
405
406         }
407
408         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
409                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
410
411                 /**
412                  * Only support src & dst ports, tcp flags,
413                  * others should be masked.
414                  */
415                 if (tcp_mask->hdr.sent_seq ||
416                     tcp_mask->hdr.recv_ack ||
417                     tcp_mask->hdr.data_off ||
418                     tcp_mask->hdr.rx_win ||
419                     tcp_mask->hdr.cksum ||
420                     tcp_mask->hdr.tcp_urp) {
421                         memset(filter, 0,
422                                 sizeof(struct rte_eth_ntuple_filter));
423                         rte_flow_error_set(error, EINVAL,
424                                 RTE_FLOW_ERROR_TYPE_ITEM,
425                                 item, "Not supported by ntuple filter");
426                         return -rte_errno;
427                 }
428
429                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
430                 filter->src_port_mask  = tcp_mask->hdr.src_port;
431                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
432                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
433                 } else if (!tcp_mask->hdr.tcp_flags) {
434                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
435                 } else {
436                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
437                         rte_flow_error_set(error, EINVAL,
438                                 RTE_FLOW_ERROR_TYPE_ITEM,
439                                 item, "Not supported by ntuple filter");
440                         return -rte_errno;
441                 }
442
443                 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
444                 filter->dst_port  = tcp_spec->hdr.dst_port;
445                 filter->src_port  = tcp_spec->hdr.src_port;
446                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
447         } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
448                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
449
450                 /**
451                  * Only support src & dst ports,
452                  * others should be masked.
453                  */
454                 if (udp_mask->hdr.dgram_len ||
455                     udp_mask->hdr.dgram_cksum) {
456                         memset(filter, 0,
457                                 sizeof(struct rte_eth_ntuple_filter));
458                         rte_flow_error_set(error, EINVAL,
459                                 RTE_FLOW_ERROR_TYPE_ITEM,
460                                 item, "Not supported by ntuple filter");
461                         return -rte_errno;
462                 }
463
464                 filter->dst_port_mask = udp_mask->hdr.dst_port;
465                 filter->src_port_mask = udp_mask->hdr.src_port;
466
467                 udp_spec = (const struct rte_flow_item_udp *)item->spec;
468                 filter->dst_port = udp_spec->hdr.dst_port;
469                 filter->src_port = udp_spec->hdr.src_port;
470         } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
471                 sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
472
473                 /**
474                  * Only support src & dst ports,
475                  * others should be masked.
476                  */
477                 if (sctp_mask->hdr.tag ||
478                     sctp_mask->hdr.cksum) {
479                         memset(filter, 0,
480                                 sizeof(struct rte_eth_ntuple_filter));
481                         rte_flow_error_set(error, EINVAL,
482                                 RTE_FLOW_ERROR_TYPE_ITEM,
483                                 item, "Not supported by ntuple filter");
484                         return -rte_errno;
485                 }
486
487                 filter->dst_port_mask = sctp_mask->hdr.dst_port;
488                 filter->src_port_mask = sctp_mask->hdr.src_port;
489
490                 sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
491                 filter->dst_port = sctp_spec->hdr.dst_port;
492                 filter->src_port = sctp_spec->hdr.src_port;
493         } else {
494                 goto action;
495         }
496
497         /* check if the next not void item is END */
498         item = next_no_void_pattern(pattern, item);
499         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
500                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
501                 rte_flow_error_set(error, EINVAL,
502                         RTE_FLOW_ERROR_TYPE_ITEM,
503                         item, "Not supported by ntuple filter");
504                 return -rte_errno;
505         }
506
507 action:
508
509         /**
510          * n-tuple only supports forwarding,
511          * check if the first not void action is QUEUE.
512          */
513         act = next_no_void_action(actions, NULL);
514         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
515                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
516                 rte_flow_error_set(error, EINVAL,
517                         RTE_FLOW_ERROR_TYPE_ACTION,
518                         item, "Not supported action.");
519                 return -rte_errno;
520         }
521         filter->queue =
522                 ((const struct rte_flow_action_queue *)act->conf)->index;
523
524         /* check if the next not void item is END */
525         act = next_no_void_action(actions, act);
526         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
527                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
528                 rte_flow_error_set(error, EINVAL,
529                         RTE_FLOW_ERROR_TYPE_ACTION,
530                         act, "Not supported action.");
531                 return -rte_errno;
532         }
533
534         /* parse attr */
535         /* must be input direction */
536         if (!attr->ingress) {
537                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
538                 rte_flow_error_set(error, EINVAL,
539                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
540                                    attr, "Only support ingress.");
541                 return -rte_errno;
542         }
543
544         /* not supported */
545         if (attr->egress) {
546                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
547                 rte_flow_error_set(error, EINVAL,
548                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
549                                    attr, "Not support egress.");
550                 return -rte_errno;
551         }
552
553         if (attr->priority > 0xFFFF) {
554                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
555                 rte_flow_error_set(error, EINVAL,
556                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
557                                    attr, "Error priority.");
558                 return -rte_errno;
559         }
560         filter->priority = (uint16_t)attr->priority;
561         if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
562             attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
563             filter->priority = 1;
564
565         return 0;
566 }
567
568 /* a specific function for ixgbe because the flags is specific */
569 static int
570 ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
571                           const struct rte_flow_attr *attr,
572                           const struct rte_flow_item pattern[],
573                           const struct rte_flow_action actions[],
574                           struct rte_eth_ntuple_filter *filter,
575                           struct rte_flow_error *error)
576 {
577         int ret;
578         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
579
580         MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
581
582         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
583
584         if (ret)
585                 return ret;
586
587 #ifdef RTE_LIBRTE_SECURITY
588         /* ESP flow not really a flow*/
589         if (filter->proto == IPPROTO_ESP)
590                 return 0;
591 #endif
592
593         /* Ixgbe doesn't support tcp flags. */
594         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
595                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
596                 rte_flow_error_set(error, EINVAL,
597                                    RTE_FLOW_ERROR_TYPE_ITEM,
598                                    NULL, "Not supported by ntuple filter");
599                 return -rte_errno;
600         }
601
602         /* Ixgbe doesn't support many priorities. */
603         if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
604             filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
605                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
606                 rte_flow_error_set(error, EINVAL,
607                         RTE_FLOW_ERROR_TYPE_ITEM,
608                         NULL, "Priority not supported by ntuple filter");
609                 return -rte_errno;
610         }
611
612         if (filter->queue >= dev->data->nb_rx_queues)
613                 return -rte_errno;
614
615         /* fixed value for ixgbe */
616         filter->flags = RTE_5TUPLE_FLAGS;
617         return 0;
618 }
619
620 /**
621  * Parse the rule to see if it is a ethertype rule.
622  * And get the ethertype filter info BTW.
623  * pattern:
624  * The first not void item can be ETH.
625  * The next not void item must be END.
626  * action:
627  * The first not void action should be QUEUE.
628  * The next not void action should be END.
629  * pattern example:
630  * ITEM         Spec                    Mask
631  * ETH          type    0x0807          0xFFFF
632  * END
633  * other members in mask and spec should set to 0x00.
634  * item->last should be NULL.
635  */
636 static int
637 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
638                             const struct rte_flow_item *pattern,
639                             const struct rte_flow_action *actions,
640                             struct rte_eth_ethertype_filter *filter,
641                             struct rte_flow_error *error)
642 {
643         const struct rte_flow_item *item;
644         const struct rte_flow_action *act;
645         const struct rte_flow_item_eth *eth_spec;
646         const struct rte_flow_item_eth *eth_mask;
647         const struct rte_flow_action_queue *act_q;
648
649         if (!pattern) {
650                 rte_flow_error_set(error, EINVAL,
651                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
652                                 NULL, "NULL pattern.");
653                 return -rte_errno;
654         }
655
656         if (!actions) {
657                 rte_flow_error_set(error, EINVAL,
658                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
659                                 NULL, "NULL action.");
660                 return -rte_errno;
661         }
662
663         if (!attr) {
664                 rte_flow_error_set(error, EINVAL,
665                                    RTE_FLOW_ERROR_TYPE_ATTR,
666                                    NULL, "NULL attribute.");
667                 return -rte_errno;
668         }
669
670         item = next_no_void_pattern(pattern, NULL);
671         /* The first non-void item should be MAC. */
672         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
673                 rte_flow_error_set(error, EINVAL,
674                         RTE_FLOW_ERROR_TYPE_ITEM,
675                         item, "Not supported by ethertype filter");
676                 return -rte_errno;
677         }
678
679         /*Not supported last point for range*/
680         if (item->last) {
681                 rte_flow_error_set(error, EINVAL,
682                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
683                         item, "Not supported last point for range");
684                 return -rte_errno;
685         }
686
687         /* Get the MAC info. */
688         if (!item->spec || !item->mask) {
689                 rte_flow_error_set(error, EINVAL,
690                                 RTE_FLOW_ERROR_TYPE_ITEM,
691                                 item, "Not supported by ethertype filter");
692                 return -rte_errno;
693         }
694
695         eth_spec = (const struct rte_flow_item_eth *)item->spec;
696         eth_mask = (const struct rte_flow_item_eth *)item->mask;
697
698         /* Mask bits of source MAC address must be full of 0.
699          * Mask bits of destination MAC address must be full
700          * of 1 or full of 0.
701          */
702         if (!is_zero_ether_addr(&eth_mask->src) ||
703             (!is_zero_ether_addr(&eth_mask->dst) &&
704              !is_broadcast_ether_addr(&eth_mask->dst))) {
705                 rte_flow_error_set(error, EINVAL,
706                                 RTE_FLOW_ERROR_TYPE_ITEM,
707                                 item, "Invalid ether address mask");
708                 return -rte_errno;
709         }
710
711         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
712                 rte_flow_error_set(error, EINVAL,
713                                 RTE_FLOW_ERROR_TYPE_ITEM,
714                                 item, "Invalid ethertype mask");
715                 return -rte_errno;
716         }
717
718         /* If mask bits of destination MAC address
719          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
720          */
721         if (is_broadcast_ether_addr(&eth_mask->dst)) {
722                 filter->mac_addr = eth_spec->dst;
723                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
724         } else {
725                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
726         }
727         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
728
729         /* Check if the next non-void item is END. */
730         item = next_no_void_pattern(pattern, item);
731         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
732                 rte_flow_error_set(error, EINVAL,
733                                 RTE_FLOW_ERROR_TYPE_ITEM,
734                                 item, "Not supported by ethertype filter.");
735                 return -rte_errno;
736         }
737
738         /* Parse action */
739
740         act = next_no_void_action(actions, NULL);
741         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
742             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
743                 rte_flow_error_set(error, EINVAL,
744                                 RTE_FLOW_ERROR_TYPE_ACTION,
745                                 act, "Not supported action.");
746                 return -rte_errno;
747         }
748
749         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
750                 act_q = (const struct rte_flow_action_queue *)act->conf;
751                 filter->queue = act_q->index;
752         } else {
753                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
754         }
755
756         /* Check if the next non-void item is END */
757         act = next_no_void_action(actions, act);
758         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
759                 rte_flow_error_set(error, EINVAL,
760                                 RTE_FLOW_ERROR_TYPE_ACTION,
761                                 act, "Not supported action.");
762                 return -rte_errno;
763         }
764
765         /* Parse attr */
766         /* Must be input direction */
767         if (!attr->ingress) {
768                 rte_flow_error_set(error, EINVAL,
769                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
770                                 attr, "Only support ingress.");
771                 return -rte_errno;
772         }
773
774         /* Not supported */
775         if (attr->egress) {
776                 rte_flow_error_set(error, EINVAL,
777                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
778                                 attr, "Not support egress.");
779                 return -rte_errno;
780         }
781
782         /* Not supported */
783         if (attr->priority) {
784                 rte_flow_error_set(error, EINVAL,
785                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
786                                 attr, "Not support priority.");
787                 return -rte_errno;
788         }
789
790         /* Not supported */
791         if (attr->group) {
792                 rte_flow_error_set(error, EINVAL,
793                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
794                                 attr, "Not support group.");
795                 return -rte_errno;
796         }
797
798         return 0;
799 }
800
801 static int
802 ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
803                                  const struct rte_flow_attr *attr,
804                              const struct rte_flow_item pattern[],
805                              const struct rte_flow_action actions[],
806                              struct rte_eth_ethertype_filter *filter,
807                              struct rte_flow_error *error)
808 {
809         int ret;
810         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
811
812         MAC_TYPE_FILTER_SUP(hw->mac.type);
813
814         ret = cons_parse_ethertype_filter(attr, pattern,
815                                         actions, filter, error);
816
817         if (ret)
818                 return ret;
819
820         /* Ixgbe doesn't support MAC address. */
821         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
822                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
823                 rte_flow_error_set(error, EINVAL,
824                         RTE_FLOW_ERROR_TYPE_ITEM,
825                         NULL, "Not supported by ethertype filter");
826                 return -rte_errno;
827         }
828
829         if (filter->queue >= dev->data->nb_rx_queues) {
830                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
831                 rte_flow_error_set(error, EINVAL,
832                         RTE_FLOW_ERROR_TYPE_ITEM,
833                         NULL, "queue index much too big");
834                 return -rte_errno;
835         }
836
837         if (filter->ether_type == ETHER_TYPE_IPv4 ||
838                 filter->ether_type == ETHER_TYPE_IPv6) {
839                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
840                 rte_flow_error_set(error, EINVAL,
841                         RTE_FLOW_ERROR_TYPE_ITEM,
842                         NULL, "IPv4/IPv6 not supported by ethertype filter");
843                 return -rte_errno;
844         }
845
846         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
847                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
848                 rte_flow_error_set(error, EINVAL,
849                         RTE_FLOW_ERROR_TYPE_ITEM,
850                         NULL, "mac compare is unsupported");
851                 return -rte_errno;
852         }
853
854         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
855                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
856                 rte_flow_error_set(error, EINVAL,
857                         RTE_FLOW_ERROR_TYPE_ITEM,
858                         NULL, "drop option is unsupported");
859                 return -rte_errno;
860         }
861
862         return 0;
863 }
864
865 /**
866  * Parse the rule to see if it is a TCP SYN rule.
867  * And get the TCP SYN filter info BTW.
868  * pattern:
869  * The first not void item must be ETH.
870  * The second not void item must be IPV4 or IPV6.
871  * The third not void item must be TCP.
872  * The next not void item must be END.
873  * action:
874  * The first not void action should be QUEUE.
875  * The next not void action should be END.
876  * pattern example:
877  * ITEM         Spec                    Mask
878  * ETH          NULL                    NULL
879  * IPV4/IPV6    NULL                    NULL
880  * TCP          tcp_flags       0x02    0xFF
881  * END
882  * other members in mask and spec should set to 0x00.
883  * item->last should be NULL.
884  */
885 static int
886 cons_parse_syn_filter(const struct rte_flow_attr *attr,
887                                 const struct rte_flow_item pattern[],
888                                 const struct rte_flow_action actions[],
889                                 struct rte_eth_syn_filter *filter,
890                                 struct rte_flow_error *error)
891 {
892         const struct rte_flow_item *item;
893         const struct rte_flow_action *act;
894         const struct rte_flow_item_tcp *tcp_spec;
895         const struct rte_flow_item_tcp *tcp_mask;
896         const struct rte_flow_action_queue *act_q;
897
898         if (!pattern) {
899                 rte_flow_error_set(error, EINVAL,
900                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
901                                 NULL, "NULL pattern.");
902                 return -rte_errno;
903         }
904
905         if (!actions) {
906                 rte_flow_error_set(error, EINVAL,
907                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
908                                 NULL, "NULL action.");
909                 return -rte_errno;
910         }
911
912         if (!attr) {
913                 rte_flow_error_set(error, EINVAL,
914                                    RTE_FLOW_ERROR_TYPE_ATTR,
915                                    NULL, "NULL attribute.");
916                 return -rte_errno;
917         }
918
919
920         /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
921         item = next_no_void_pattern(pattern, NULL);
922         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
923             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
924             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
925             item->type != RTE_FLOW_ITEM_TYPE_TCP) {
926                 rte_flow_error_set(error, EINVAL,
927                                 RTE_FLOW_ERROR_TYPE_ITEM,
928                                 item, "Not supported by syn filter");
929                 return -rte_errno;
930         }
931                 /*Not supported last point for range*/
932         if (item->last) {
933                 rte_flow_error_set(error, EINVAL,
934                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
935                         item, "Not supported last point for range");
936                 return -rte_errno;
937         }
938
939         /* Skip Ethernet */
940         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
941                 /* if the item is MAC, the content should be NULL */
942                 if (item->spec || item->mask) {
943                         rte_flow_error_set(error, EINVAL,
944                                 RTE_FLOW_ERROR_TYPE_ITEM,
945                                 item, "Invalid SYN address mask");
946                         return -rte_errno;
947                 }
948
949                 /* check if the next not void item is IPv4 or IPv6 */
950                 item = next_no_void_pattern(pattern, item);
951                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
952                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
953                         rte_flow_error_set(error, EINVAL,
954                                 RTE_FLOW_ERROR_TYPE_ITEM,
955                                 item, "Not supported by syn filter");
956                         return -rte_errno;
957                 }
958         }
959
960         /* Skip IP */
961         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
962             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
963                 /* if the item is IP, the content should be NULL */
964                 if (item->spec || item->mask) {
965                         rte_flow_error_set(error, EINVAL,
966                                 RTE_FLOW_ERROR_TYPE_ITEM,
967                                 item, "Invalid SYN mask");
968                         return -rte_errno;
969                 }
970
971                 /* check if the next not void item is TCP */
972                 item = next_no_void_pattern(pattern, item);
973                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
974                         rte_flow_error_set(error, EINVAL,
975                                 RTE_FLOW_ERROR_TYPE_ITEM,
976                                 item, "Not supported by syn filter");
977                         return -rte_errno;
978                 }
979         }
980
981         /* Get the TCP info. Only support SYN. */
982         if (!item->spec || !item->mask) {
983                 rte_flow_error_set(error, EINVAL,
984                                 RTE_FLOW_ERROR_TYPE_ITEM,
985                                 item, "Invalid SYN mask");
986                 return -rte_errno;
987         }
988         /*Not supported last point for range*/
989         if (item->last) {
990                 rte_flow_error_set(error, EINVAL,
991                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
992                         item, "Not supported last point for range");
993                 return -rte_errno;
994         }
995
996         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
997         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
998         if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
999             tcp_mask->hdr.src_port ||
1000             tcp_mask->hdr.dst_port ||
1001             tcp_mask->hdr.sent_seq ||
1002             tcp_mask->hdr.recv_ack ||
1003             tcp_mask->hdr.data_off ||
1004             tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
1005             tcp_mask->hdr.rx_win ||
1006             tcp_mask->hdr.cksum ||
1007             tcp_mask->hdr.tcp_urp) {
1008                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1009                 rte_flow_error_set(error, EINVAL,
1010                                 RTE_FLOW_ERROR_TYPE_ITEM,
1011                                 item, "Not supported by syn filter");
1012                 return -rte_errno;
1013         }
1014
1015         /* check if the next not void item is END */
1016         item = next_no_void_pattern(pattern, item);
1017         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1018                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1019                 rte_flow_error_set(error, EINVAL,
1020                                 RTE_FLOW_ERROR_TYPE_ITEM,
1021                                 item, "Not supported by syn filter");
1022                 return -rte_errno;
1023         }
1024
1025         /* check if the first not void action is QUEUE. */
1026         act = next_no_void_action(actions, NULL);
1027         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1028                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1029                 rte_flow_error_set(error, EINVAL,
1030                                 RTE_FLOW_ERROR_TYPE_ACTION,
1031                                 act, "Not supported action.");
1032                 return -rte_errno;
1033         }
1034
1035         act_q = (const struct rte_flow_action_queue *)act->conf;
1036         filter->queue = act_q->index;
1037         if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
1038                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1039                 rte_flow_error_set(error, EINVAL,
1040                                 RTE_FLOW_ERROR_TYPE_ACTION,
1041                                 act, "Not supported action.");
1042                 return -rte_errno;
1043         }
1044
1045         /* check if the next not void item is END */
1046         act = next_no_void_action(actions, act);
1047         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1048                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1049                 rte_flow_error_set(error, EINVAL,
1050                                 RTE_FLOW_ERROR_TYPE_ACTION,
1051                                 act, "Not supported action.");
1052                 return -rte_errno;
1053         }
1054
1055         /* parse attr */
1056         /* must be input direction */
1057         if (!attr->ingress) {
1058                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1059                 rte_flow_error_set(error, EINVAL,
1060                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1061                         attr, "Only support ingress.");
1062                 return -rte_errno;
1063         }
1064
1065         /* not supported */
1066         if (attr->egress) {
1067                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1068                 rte_flow_error_set(error, EINVAL,
1069                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1070                         attr, "Not support egress.");
1071                 return -rte_errno;
1072         }
1073
1074         /* Support 2 priorities, the lowest or highest. */
1075         if (!attr->priority) {
1076                 filter->hig_pri = 0;
1077         } else if (attr->priority == (uint32_t)~0U) {
1078                 filter->hig_pri = 1;
1079         } else {
1080                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1081                 rte_flow_error_set(error, EINVAL,
1082                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1083                         attr, "Not support priority.");
1084                 return -rte_errno;
1085         }
1086
1087         return 0;
1088 }
1089
1090 static int
1091 ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
1092                                  const struct rte_flow_attr *attr,
1093                              const struct rte_flow_item pattern[],
1094                              const struct rte_flow_action actions[],
1095                              struct rte_eth_syn_filter *filter,
1096                              struct rte_flow_error *error)
1097 {
1098         int ret;
1099         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1100
1101         MAC_TYPE_FILTER_SUP(hw->mac.type);
1102
1103         ret = cons_parse_syn_filter(attr, pattern,
1104                                         actions, filter, error);
1105
1106         if (filter->queue >= dev->data->nb_rx_queues)
1107                 return -rte_errno;
1108
1109         if (ret)
1110                 return ret;
1111
1112         return 0;
1113 }
1114
1115 /**
1116  * Parse the rule to see if it is a L2 tunnel rule.
1117  * And get the L2 tunnel filter info BTW.
1118  * Only support E-tag now.
1119  * pattern:
1120  * The first not void item can be E_TAG.
1121  * The next not void item must be END.
1122  * action:
1123  * The first not void action should be VF or PF.
1124  * The next not void action should be END.
1125  * pattern example:
1126  * ITEM         Spec                    Mask
1127  * E_TAG        grp             0x1     0x3
1128                 e_cid_base      0x309   0xFFF
1129  * END
1130  * other members in mask and spec should set to 0x00.
1131  * item->last should be NULL.
1132  */
1133 static int
1134 cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
1135                         const struct rte_flow_attr *attr,
1136                         const struct rte_flow_item pattern[],
1137                         const struct rte_flow_action actions[],
1138                         struct rte_eth_l2_tunnel_conf *filter,
1139                         struct rte_flow_error *error)
1140 {
1141         const struct rte_flow_item *item;
1142         const struct rte_flow_item_e_tag *e_tag_spec;
1143         const struct rte_flow_item_e_tag *e_tag_mask;
1144         const struct rte_flow_action *act;
1145         const struct rte_flow_action_vf *act_vf;
1146         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1147
1148         if (!pattern) {
1149                 rte_flow_error_set(error, EINVAL,
1150                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1151                         NULL, "NULL pattern.");
1152                 return -rte_errno;
1153         }
1154
1155         if (!actions) {
1156                 rte_flow_error_set(error, EINVAL,
1157                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1158                                    NULL, "NULL action.");
1159                 return -rte_errno;
1160         }
1161
1162         if (!attr) {
1163                 rte_flow_error_set(error, EINVAL,
1164                                    RTE_FLOW_ERROR_TYPE_ATTR,
1165                                    NULL, "NULL attribute.");
1166                 return -rte_errno;
1167         }
1168
1169         /* The first not void item should be e-tag. */
1170         item = next_no_void_pattern(pattern, NULL);
1171         if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1172                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1173                 rte_flow_error_set(error, EINVAL,
1174                         RTE_FLOW_ERROR_TYPE_ITEM,
1175                         item, "Not supported by L2 tunnel filter");
1176                 return -rte_errno;
1177         }
1178
1179         if (!item->spec || !item->mask) {
1180                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1181                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1182                         item, "Not supported by L2 tunnel filter");
1183                 return -rte_errno;
1184         }
1185
1186         /*Not supported last point for range*/
1187         if (item->last) {
1188                 rte_flow_error_set(error, EINVAL,
1189                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1190                         item, "Not supported last point for range");
1191                 return -rte_errno;
1192         }
1193
1194         e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
1195         e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
1196
1197         /* Only care about GRP and E cid base. */
1198         if (e_tag_mask->epcp_edei_in_ecid_b ||
1199             e_tag_mask->in_ecid_e ||
1200             e_tag_mask->ecid_e ||
1201             e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1202                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1203                 rte_flow_error_set(error, EINVAL,
1204                         RTE_FLOW_ERROR_TYPE_ITEM,
1205                         item, "Not supported by L2 tunnel filter");
1206                 return -rte_errno;
1207         }
1208
1209         filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1210         /**
1211          * grp and e_cid_base are bit fields and only use 14 bits.
1212          * e-tag id is taken as little endian by HW.
1213          */
1214         filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1215
1216         /* check if the next not void item is END */
1217         item = next_no_void_pattern(pattern, item);
1218         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1219                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1220                 rte_flow_error_set(error, EINVAL,
1221                         RTE_FLOW_ERROR_TYPE_ITEM,
1222                         item, "Not supported by L2 tunnel filter");
1223                 return -rte_errno;
1224         }
1225
1226         /* parse attr */
1227         /* must be input direction */
1228         if (!attr->ingress) {
1229                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1230                 rte_flow_error_set(error, EINVAL,
1231                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1232                         attr, "Only support ingress.");
1233                 return -rte_errno;
1234         }
1235
1236         /* not supported */
1237         if (attr->egress) {
1238                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1239                 rte_flow_error_set(error, EINVAL,
1240                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1241                         attr, "Not support egress.");
1242                 return -rte_errno;
1243         }
1244
1245         /* not supported */
1246         if (attr->priority) {
1247                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1248                 rte_flow_error_set(error, EINVAL,
1249                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1250                         attr, "Not support priority.");
1251                 return -rte_errno;
1252         }
1253
1254         /* check if the first not void action is VF or PF. */
1255         act = next_no_void_action(actions, NULL);
1256         if (act->type != RTE_FLOW_ACTION_TYPE_VF &&
1257                         act->type != RTE_FLOW_ACTION_TYPE_PF) {
1258                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1259                 rte_flow_error_set(error, EINVAL,
1260                         RTE_FLOW_ERROR_TYPE_ACTION,
1261                         act, "Not supported action.");
1262                 return -rte_errno;
1263         }
1264
1265         if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
1266                 act_vf = (const struct rte_flow_action_vf *)act->conf;
1267                 filter->pool = act_vf->id;
1268         } else {
1269                 filter->pool = pci_dev->max_vfs;
1270         }
1271
1272         /* check if the next not void item is END */
1273         act = next_no_void_action(actions, act);
1274         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1275                 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1276                 rte_flow_error_set(error, EINVAL,
1277                         RTE_FLOW_ERROR_TYPE_ACTION,
1278                         act, "Not supported action.");
1279                 return -rte_errno;
1280         }
1281
1282         return 0;
1283 }
1284
1285 static int
1286 ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1287                         const struct rte_flow_attr *attr,
1288                         const struct rte_flow_item pattern[],
1289                         const struct rte_flow_action actions[],
1290                         struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1291                         struct rte_flow_error *error)
1292 {
1293         int ret = 0;
1294         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1295         struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1296         uint16_t vf_num;
1297
1298         ret = cons_parse_l2_tn_filter(dev, attr, pattern,
1299                                 actions, l2_tn_filter, error);
1300
1301         if (hw->mac.type != ixgbe_mac_X550 &&
1302                 hw->mac.type != ixgbe_mac_X550EM_x &&
1303                 hw->mac.type != ixgbe_mac_X550EM_a) {
1304                 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1305                 rte_flow_error_set(error, EINVAL,
1306                         RTE_FLOW_ERROR_TYPE_ITEM,
1307                         NULL, "Not supported by L2 tunnel filter");
1308                 return -rte_errno;
1309         }
1310
1311         vf_num = pci_dev->max_vfs;
1312
1313         if (l2_tn_filter->pool > vf_num)
1314                 return -rte_errno;
1315
1316         return ret;
1317 }
1318
1319 /* Parse to get the attr and action info of flow director rule. */
1320 static int
1321 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1322                           const struct rte_flow_action actions[],
1323                           struct ixgbe_fdir_rule *rule,
1324                           struct rte_flow_error *error)
1325 {
1326         const struct rte_flow_action *act;
1327         const struct rte_flow_action_queue *act_q;
1328         const struct rte_flow_action_mark *mark;
1329
1330         /* parse attr */
1331         /* must be input direction */
1332         if (!attr->ingress) {
1333                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1334                 rte_flow_error_set(error, EINVAL,
1335                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1336                         attr, "Only support ingress.");
1337                 return -rte_errno;
1338         }
1339
1340         /* not supported */
1341         if (attr->egress) {
1342                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1343                 rte_flow_error_set(error, EINVAL,
1344                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1345                         attr, "Not support egress.");
1346                 return -rte_errno;
1347         }
1348
1349         /* not supported */
1350         if (attr->priority) {
1351                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1352                 rte_flow_error_set(error, EINVAL,
1353                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1354                         attr, "Not support priority.");
1355                 return -rte_errno;
1356         }
1357
1358         /* check if the first not void action is QUEUE or DROP. */
1359         act = next_no_void_action(actions, NULL);
1360         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1361             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1362                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1363                 rte_flow_error_set(error, EINVAL,
1364                         RTE_FLOW_ERROR_TYPE_ACTION,
1365                         act, "Not supported action.");
1366                 return -rte_errno;
1367         }
1368
1369         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1370                 act_q = (const struct rte_flow_action_queue *)act->conf;
1371                 rule->queue = act_q->index;
1372         } else { /* drop */
1373                 /* signature mode does not support drop action. */
1374                 if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1375                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1376                         rte_flow_error_set(error, EINVAL,
1377                                 RTE_FLOW_ERROR_TYPE_ACTION,
1378                                 act, "Not supported action.");
1379                         return -rte_errno;
1380                 }
1381                 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1382         }
1383
1384         /* check if the next not void item is MARK */
1385         act = next_no_void_action(actions, act);
1386         if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1387                 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1388                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1389                 rte_flow_error_set(error, EINVAL,
1390                         RTE_FLOW_ERROR_TYPE_ACTION,
1391                         act, "Not supported action.");
1392                 return -rte_errno;
1393         }
1394
1395         rule->soft_id = 0;
1396
1397         if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1398                 mark = (const struct rte_flow_action_mark *)act->conf;
1399                 rule->soft_id = mark->id;
1400                 act = next_no_void_action(actions, act);
1401         }
1402
1403         /* check if the next not void item is END */
1404         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1405                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1406                 rte_flow_error_set(error, EINVAL,
1407                         RTE_FLOW_ERROR_TYPE_ACTION,
1408                         act, "Not supported action.");
1409                 return -rte_errno;
1410         }
1411
1412         return 0;
1413 }
1414
1415 /* search next no void pattern and skip fuzzy */
1416 static inline
1417 const struct rte_flow_item *next_no_fuzzy_pattern(
1418                 const struct rte_flow_item pattern[],
1419                 const struct rte_flow_item *cur)
1420 {
1421         const struct rte_flow_item *next =
1422                 next_no_void_pattern(pattern, cur);
1423         while (1) {
1424                 if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
1425                         return next;
1426                 next = next_no_void_pattern(pattern, next);
1427         }
1428 }
1429
1430 static inline uint8_t signature_match(const struct rte_flow_item pattern[])
1431 {
1432         const struct rte_flow_item_fuzzy *spec, *last, *mask;
1433         const struct rte_flow_item *item;
1434         uint32_t sh, lh, mh;
1435         int i = 0;
1436
1437         while (1) {
1438                 item = pattern + i;
1439                 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1440                         break;
1441
1442                 if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
1443                         spec =
1444                         (const struct rte_flow_item_fuzzy *)item->spec;
1445                         last =
1446                         (const struct rte_flow_item_fuzzy *)item->last;
1447                         mask =
1448                         (const struct rte_flow_item_fuzzy *)item->mask;
1449
1450                         if (!spec || !mask)
1451                                 return 0;
1452
1453                         sh = spec->thresh;
1454
1455                         if (!last)
1456                                 lh = sh;
1457                         else
1458                                 lh = last->thresh;
1459
1460                         mh = mask->thresh;
1461                         sh = sh & mh;
1462                         lh = lh & mh;
1463
1464                         if (!sh || sh > lh)
1465                                 return 0;
1466
1467                         return 1;
1468                 }
1469
1470                 i++;
1471         }
1472
1473         return 0;
1474 }
1475
1476 /**
1477  * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1478  * And get the flow director filter info BTW.
1479  * UDP/TCP/SCTP PATTERN:
1480  * The first not void item can be ETH or IPV4 or IPV6
1481  * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1482  * The next not void item could be UDP or TCP or SCTP (optional)
1483  * The next not void item could be RAW (for flexbyte, optional)
1484  * The next not void item must be END.
1485  * A Fuzzy Match pattern can appear at any place before END.
1486  * Fuzzy Match is optional for IPV4 but is required for IPV6
1487  * MAC VLAN PATTERN:
1488  * The first not void item must be ETH.
1489  * The second not void item must be MAC VLAN.
1490  * The next not void item must be END.
1491  * ACTION:
1492  * The first not void action should be QUEUE or DROP.
1493  * The second not void optional action should be MARK,
1494  * mark_id is a uint32_t number.
1495  * The next not void action should be END.
1496  * UDP/TCP/SCTP pattern example:
1497  * ITEM         Spec                    Mask
1498  * ETH          NULL                    NULL
1499  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
1500  *              dst_addr 192.167.3.50   0xFFFFFFFF
1501  * UDP/TCP/SCTP src_port        80      0xFFFF
1502  *              dst_port        80      0xFFFF
1503  * FLEX relative        0       0x1
1504  *              search          0       0x1
1505  *              reserved        0       0
1506  *              offset          12      0xFFFFFFFF
1507  *              limit           0       0xFFFF
1508  *              length          2       0xFFFF
1509  *              pattern[0]      0x86    0xFF
1510  *              pattern[1]      0xDD    0xFF
1511  * END
1512  * MAC VLAN pattern example:
1513  * ITEM         Spec                    Mask
1514  * ETH          dst_addr
1515                 {0xAC, 0x7B, 0xA1,      {0xFF, 0xFF, 0xFF,
1516                 0x2C, 0x6D, 0x36}       0xFF, 0xFF, 0xFF}
1517  * MAC VLAN     tci     0x2016          0xEFFF
1518  * END
1519  * Other members in mask and spec should set to 0x00.
1520  * Item->last should be NULL.
1521  */
1522 static int
1523 ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
1524                                const struct rte_flow_attr *attr,
1525                                const struct rte_flow_item pattern[],
1526                                const struct rte_flow_action actions[],
1527                                struct ixgbe_fdir_rule *rule,
1528                                struct rte_flow_error *error)
1529 {
1530         const struct rte_flow_item *item;
1531         const struct rte_flow_item_eth *eth_spec;
1532         const struct rte_flow_item_eth *eth_mask;
1533         const struct rte_flow_item_ipv4 *ipv4_spec;
1534         const struct rte_flow_item_ipv4 *ipv4_mask;
1535         const struct rte_flow_item_ipv6 *ipv6_spec;
1536         const struct rte_flow_item_ipv6 *ipv6_mask;
1537         const struct rte_flow_item_tcp *tcp_spec;
1538         const struct rte_flow_item_tcp *tcp_mask;
1539         const struct rte_flow_item_udp *udp_spec;
1540         const struct rte_flow_item_udp *udp_mask;
1541         const struct rte_flow_item_sctp *sctp_spec;
1542         const struct rte_flow_item_sctp *sctp_mask;
1543         const struct rte_flow_item_vlan *vlan_spec;
1544         const struct rte_flow_item_vlan *vlan_mask;
1545         const struct rte_flow_item_raw *raw_mask;
1546         const struct rte_flow_item_raw *raw_spec;
1547         uint8_t j;
1548
1549         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1550
1551         if (!pattern) {
1552                 rte_flow_error_set(error, EINVAL,
1553                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1554                         NULL, "NULL pattern.");
1555                 return -rte_errno;
1556         }
1557
1558         if (!actions) {
1559                 rte_flow_error_set(error, EINVAL,
1560                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1561                                    NULL, "NULL action.");
1562                 return -rte_errno;
1563         }
1564
1565         if (!attr) {
1566                 rte_flow_error_set(error, EINVAL,
1567                                    RTE_FLOW_ERROR_TYPE_ATTR,
1568                                    NULL, "NULL attribute.");
1569                 return -rte_errno;
1570         }
1571
1572         /**
1573          * Some fields may not be provided. Set spec to 0 and mask to default
1574          * value. So, we need not do anything for the not provided fields later.
1575          */
1576         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1577         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1578         rule->mask.vlan_tci_mask = 0;
1579         rule->mask.flex_bytes_mask = 0;
1580
1581         /**
1582          * The first not void item should be
1583          * MAC or IPv4 or TCP or UDP or SCTP.
1584          */
1585         item = next_no_fuzzy_pattern(pattern, NULL);
1586         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1587             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1588             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1589             item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1590             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1591             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1592                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1593                 rte_flow_error_set(error, EINVAL,
1594                         RTE_FLOW_ERROR_TYPE_ITEM,
1595                         item, "Not supported by fdir filter");
1596                 return -rte_errno;
1597         }
1598
1599         if (signature_match(pattern))
1600                 rule->mode = RTE_FDIR_MODE_SIGNATURE;
1601         else
1602                 rule->mode = RTE_FDIR_MODE_PERFECT;
1603
1604         /*Not supported last point for range*/
1605         if (item->last) {
1606                 rte_flow_error_set(error, EINVAL,
1607                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1608                         item, "Not supported last point for range");
1609                 return -rte_errno;
1610         }
1611
1612         /* Get the MAC info. */
1613         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1614                 /**
1615                  * Only support vlan and dst MAC address,
1616                  * others should be masked.
1617                  */
1618                 if (item->spec && !item->mask) {
1619                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1620                         rte_flow_error_set(error, EINVAL,
1621                                 RTE_FLOW_ERROR_TYPE_ITEM,
1622                                 item, "Not supported by fdir filter");
1623                         return -rte_errno;
1624                 }
1625
1626                 if (item->spec) {
1627                         rule->b_spec = TRUE;
1628                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
1629
1630                         /* Get the dst MAC. */
1631                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1632                                 rule->ixgbe_fdir.formatted.inner_mac[j] =
1633                                         eth_spec->dst.addr_bytes[j];
1634                         }
1635                 }
1636
1637
1638                 if (item->mask) {
1639
1640                         rule->b_mask = TRUE;
1641                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
1642
1643                         /* Ether type should be masked. */
1644                         if (eth_mask->type ||
1645                             rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1646                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1647                                 rte_flow_error_set(error, EINVAL,
1648                                         RTE_FLOW_ERROR_TYPE_ITEM,
1649                                         item, "Not supported by fdir filter");
1650                                 return -rte_errno;
1651                         }
1652
1653                         /* If ethernet has meaning, it means MAC VLAN mode. */
1654                         rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1655
1656                         /**
1657                          * src MAC address must be masked,
1658                          * and don't support dst MAC address mask.
1659                          */
1660                         for (j = 0; j < ETHER_ADDR_LEN; j++) {
1661                                 if (eth_mask->src.addr_bytes[j] ||
1662                                         eth_mask->dst.addr_bytes[j] != 0xFF) {
1663                                         memset(rule, 0,
1664                                         sizeof(struct ixgbe_fdir_rule));
1665                                         rte_flow_error_set(error, EINVAL,
1666                                         RTE_FLOW_ERROR_TYPE_ITEM,
1667                                         item, "Not supported by fdir filter");
1668                                         return -rte_errno;
1669                                 }
1670                         }
1671
1672                         /* When no VLAN, considered as full mask. */
1673                         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1674                 }
1675                 /*** If both spec and mask are item,
1676                  * it means don't care about ETH.
1677                  * Do nothing.
1678                  */
1679
1680                 /**
1681                  * Check if the next not void item is vlan or ipv4.
1682                  * IPv6 is not supported.
1683                  */
1684                 item = next_no_fuzzy_pattern(pattern, item);
1685                 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1686                         if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1687                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1688                                 rte_flow_error_set(error, EINVAL,
1689                                         RTE_FLOW_ERROR_TYPE_ITEM,
1690                                         item, "Not supported by fdir filter");
1691                                 return -rte_errno;
1692                         }
1693                 } else {
1694                         if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
1695                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1696                                 rte_flow_error_set(error, EINVAL,
1697                                         RTE_FLOW_ERROR_TYPE_ITEM,
1698                                         item, "Not supported by fdir filter");
1699                                 return -rte_errno;
1700                         }
1701                 }
1702         }
1703
1704         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1705                 if (!(item->spec && item->mask)) {
1706                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1707                         rte_flow_error_set(error, EINVAL,
1708                                 RTE_FLOW_ERROR_TYPE_ITEM,
1709                                 item, "Not supported by fdir filter");
1710                         return -rte_errno;
1711                 }
1712
1713                 /*Not supported last point for range*/
1714                 if (item->last) {
1715                         rte_flow_error_set(error, EINVAL,
1716                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1717                                 item, "Not supported last point for range");
1718                         return -rte_errno;
1719                 }
1720
1721                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
1722                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
1723
1724                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1725
1726                 rule->mask.vlan_tci_mask = vlan_mask->tci;
1727                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
1728                 /* More than one tags are not supported. */
1729
1730                 /* Next not void item must be END */
1731                 item = next_no_fuzzy_pattern(pattern, item);
1732                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1733                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1734                         rte_flow_error_set(error, EINVAL,
1735                                 RTE_FLOW_ERROR_TYPE_ITEM,
1736                                 item, "Not supported by fdir filter");
1737                         return -rte_errno;
1738                 }
1739         }
1740
1741         /* Get the IPV4 info. */
1742         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1743                 /**
1744                  * Set the flow type even if there's no content
1745                  * as we must have a flow type.
1746                  */
1747                 rule->ixgbe_fdir.formatted.flow_type =
1748                         IXGBE_ATR_FLOW_TYPE_IPV4;
1749                 /*Not supported last point for range*/
1750                 if (item->last) {
1751                         rte_flow_error_set(error, EINVAL,
1752                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1753                                 item, "Not supported last point for range");
1754                         return -rte_errno;
1755                 }
1756                 /**
1757                  * Only care about src & dst addresses,
1758                  * others should be masked.
1759                  */
1760                 if (!item->mask) {
1761                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1762                         rte_flow_error_set(error, EINVAL,
1763                                 RTE_FLOW_ERROR_TYPE_ITEM,
1764                                 item, "Not supported by fdir filter");
1765                         return -rte_errno;
1766                 }
1767                 rule->b_mask = TRUE;
1768                 ipv4_mask =
1769                         (const struct rte_flow_item_ipv4 *)item->mask;
1770                 if (ipv4_mask->hdr.version_ihl ||
1771                     ipv4_mask->hdr.type_of_service ||
1772                     ipv4_mask->hdr.total_length ||
1773                     ipv4_mask->hdr.packet_id ||
1774                     ipv4_mask->hdr.fragment_offset ||
1775                     ipv4_mask->hdr.time_to_live ||
1776                     ipv4_mask->hdr.next_proto_id ||
1777                     ipv4_mask->hdr.hdr_checksum) {
1778                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1779                         rte_flow_error_set(error, EINVAL,
1780                                 RTE_FLOW_ERROR_TYPE_ITEM,
1781                                 item, "Not supported by fdir filter");
1782                         return -rte_errno;
1783                 }
1784                 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1785                 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1786
1787                 if (item->spec) {
1788                         rule->b_spec = TRUE;
1789                         ipv4_spec =
1790                                 (const struct rte_flow_item_ipv4 *)item->spec;
1791                         rule->ixgbe_fdir.formatted.dst_ip[0] =
1792                                 ipv4_spec->hdr.dst_addr;
1793                         rule->ixgbe_fdir.formatted.src_ip[0] =
1794                                 ipv4_spec->hdr.src_addr;
1795                 }
1796
1797                 /**
1798                  * Check if the next not void item is
1799                  * TCP or UDP or SCTP or END.
1800                  */
1801                 item = next_no_fuzzy_pattern(pattern, item);
1802                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1803                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1804                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1805                     item->type != RTE_FLOW_ITEM_TYPE_END &&
1806                     item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1807                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1808                         rte_flow_error_set(error, EINVAL,
1809                                 RTE_FLOW_ERROR_TYPE_ITEM,
1810                                 item, "Not supported by fdir filter");
1811                         return -rte_errno;
1812                 }
1813         }
1814
1815         /* Get the IPV6 info. */
1816         if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1817                 /**
1818                  * Set the flow type even if there's no content
1819                  * as we must have a flow type.
1820                  */
1821                 rule->ixgbe_fdir.formatted.flow_type =
1822                         IXGBE_ATR_FLOW_TYPE_IPV6;
1823
1824                 /**
1825                  * 1. must signature match
1826                  * 2. not support last
1827                  * 3. mask must not null
1828                  */
1829                 if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
1830                     item->last ||
1831                     !item->mask) {
1832                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1833                         rte_flow_error_set(error, EINVAL,
1834                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1835                                 item, "Not supported last point for range");
1836                         return -rte_errno;
1837                 }
1838
1839                 rule->b_mask = TRUE;
1840                 ipv6_mask =
1841                         (const struct rte_flow_item_ipv6 *)item->mask;
1842                 if (ipv6_mask->hdr.vtc_flow ||
1843                     ipv6_mask->hdr.payload_len ||
1844                     ipv6_mask->hdr.proto ||
1845                     ipv6_mask->hdr.hop_limits) {
1846                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1847                         rte_flow_error_set(error, EINVAL,
1848                                 RTE_FLOW_ERROR_TYPE_ITEM,
1849                                 item, "Not supported by fdir filter");
1850                         return -rte_errno;
1851                 }
1852
1853                 /* check src addr mask */
1854                 for (j = 0; j < 16; j++) {
1855                         if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
1856                                 rule->mask.src_ipv6_mask |= 1 << j;
1857                         } else if (ipv6_mask->hdr.src_addr[j] != 0) {
1858                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1859                                 rte_flow_error_set(error, EINVAL,
1860                                         RTE_FLOW_ERROR_TYPE_ITEM,
1861                                         item, "Not supported by fdir filter");
1862                                 return -rte_errno;
1863                         }
1864                 }
1865
1866                 /* check dst addr mask */
1867                 for (j = 0; j < 16; j++) {
1868                         if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
1869                                 rule->mask.dst_ipv6_mask |= 1 << j;
1870                         } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
1871                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1872                                 rte_flow_error_set(error, EINVAL,
1873                                         RTE_FLOW_ERROR_TYPE_ITEM,
1874                                         item, "Not supported by fdir filter");
1875                                 return -rte_errno;
1876                         }
1877                 }
1878
1879                 if (item->spec) {
1880                         rule->b_spec = TRUE;
1881                         ipv6_spec =
1882                                 (const struct rte_flow_item_ipv6 *)item->spec;
1883                         rte_memcpy(rule->ixgbe_fdir.formatted.src_ip,
1884                                    ipv6_spec->hdr.src_addr, 16);
1885                         rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip,
1886                                    ipv6_spec->hdr.dst_addr, 16);
1887                 }
1888
1889                 /**
1890                  * Check if the next not void item is
1891                  * TCP or UDP or SCTP or END.
1892                  */
1893                 item = next_no_fuzzy_pattern(pattern, item);
1894                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1895                     item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1896                     item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1897                     item->type != RTE_FLOW_ITEM_TYPE_END &&
1898                     item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1899                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1900                         rte_flow_error_set(error, EINVAL,
1901                                 RTE_FLOW_ERROR_TYPE_ITEM,
1902                                 item, "Not supported by fdir filter");
1903                         return -rte_errno;
1904                 }
1905         }
1906
1907         /* Get the TCP info. */
1908         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1909                 /**
1910                  * Set the flow type even if there's no content
1911                  * as we must have a flow type.
1912                  */
1913                 rule->ixgbe_fdir.formatted.flow_type |=
1914                         IXGBE_ATR_L4TYPE_TCP;
1915                 /*Not supported last point for range*/
1916                 if (item->last) {
1917                         rte_flow_error_set(error, EINVAL,
1918                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1919                                 item, "Not supported last point for range");
1920                         return -rte_errno;
1921                 }
1922                 /**
1923                  * Only care about src & dst ports,
1924                  * others should be masked.
1925                  */
1926                 if (!item->mask) {
1927                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1928                         rte_flow_error_set(error, EINVAL,
1929                                 RTE_FLOW_ERROR_TYPE_ITEM,
1930                                 item, "Not supported by fdir filter");
1931                         return -rte_errno;
1932                 }
1933                 rule->b_mask = TRUE;
1934                 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1935                 if (tcp_mask->hdr.sent_seq ||
1936                     tcp_mask->hdr.recv_ack ||
1937                     tcp_mask->hdr.data_off ||
1938                     tcp_mask->hdr.tcp_flags ||
1939                     tcp_mask->hdr.rx_win ||
1940                     tcp_mask->hdr.cksum ||
1941                     tcp_mask->hdr.tcp_urp) {
1942                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1943                         rte_flow_error_set(error, EINVAL,
1944                                 RTE_FLOW_ERROR_TYPE_ITEM,
1945                                 item, "Not supported by fdir filter");
1946                         return -rte_errno;
1947                 }
1948                 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1949                 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1950
1951                 if (item->spec) {
1952                         rule->b_spec = TRUE;
1953                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1954                         rule->ixgbe_fdir.formatted.src_port =
1955                                 tcp_spec->hdr.src_port;
1956                         rule->ixgbe_fdir.formatted.dst_port =
1957                                 tcp_spec->hdr.dst_port;
1958                 }
1959
1960                 item = next_no_fuzzy_pattern(pattern, item);
1961                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1962                     item->type != RTE_FLOW_ITEM_TYPE_END) {
1963                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1964                         rte_flow_error_set(error, EINVAL,
1965                                 RTE_FLOW_ERROR_TYPE_ITEM,
1966                                 item, "Not supported by fdir filter");
1967                         return -rte_errno;
1968                 }
1969
1970         }
1971
1972         /* Get the UDP info */
1973         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1974                 /**
1975                  * Set the flow type even if there's no content
1976                  * as we must have a flow type.
1977                  */
1978                 rule->ixgbe_fdir.formatted.flow_type |=
1979                         IXGBE_ATR_L4TYPE_UDP;
1980                 /*Not supported last point for range*/
1981                 if (item->last) {
1982                         rte_flow_error_set(error, EINVAL,
1983                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1984                                 item, "Not supported last point for range");
1985                         return -rte_errno;
1986                 }
1987                 /**
1988                  * Only care about src & dst ports,
1989                  * others should be masked.
1990                  */
1991                 if (!item->mask) {
1992                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1993                         rte_flow_error_set(error, EINVAL,
1994                                 RTE_FLOW_ERROR_TYPE_ITEM,
1995                                 item, "Not supported by fdir filter");
1996                         return -rte_errno;
1997                 }
1998                 rule->b_mask = TRUE;
1999                 udp_mask = (const struct rte_flow_item_udp *)item->mask;
2000                 if (udp_mask->hdr.dgram_len ||
2001                     udp_mask->hdr.dgram_cksum) {
2002                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2003                         rte_flow_error_set(error, EINVAL,
2004                                 RTE_FLOW_ERROR_TYPE_ITEM,
2005                                 item, "Not supported by fdir filter");
2006                         return -rte_errno;
2007                 }
2008                 rule->mask.src_port_mask = udp_mask->hdr.src_port;
2009                 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
2010
2011                 if (item->spec) {
2012                         rule->b_spec = TRUE;
2013                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
2014                         rule->ixgbe_fdir.formatted.src_port =
2015                                 udp_spec->hdr.src_port;
2016                         rule->ixgbe_fdir.formatted.dst_port =
2017                                 udp_spec->hdr.dst_port;
2018                 }
2019
2020                 item = next_no_fuzzy_pattern(pattern, item);
2021                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
2022                     item->type != RTE_FLOW_ITEM_TYPE_END) {
2023                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2024                         rte_flow_error_set(error, EINVAL,
2025                                 RTE_FLOW_ERROR_TYPE_ITEM,
2026                                 item, "Not supported by fdir filter");
2027                         return -rte_errno;
2028                 }
2029
2030         }
2031
2032         /* Get the SCTP info */
2033         if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
2034                 /**
2035                  * Set the flow type even if there's no content
2036                  * as we must have a flow type.
2037                  */
2038                 rule->ixgbe_fdir.formatted.flow_type |=
2039                         IXGBE_ATR_L4TYPE_SCTP;
2040                 /*Not supported last point for range*/
2041                 if (item->last) {
2042                         rte_flow_error_set(error, EINVAL,
2043                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2044                                 item, "Not supported last point for range");
2045                         return -rte_errno;
2046                 }
2047
2048                 /* only x550 family only support sctp port */
2049                 if (hw->mac.type == ixgbe_mac_X550 ||
2050                     hw->mac.type == ixgbe_mac_X550EM_x ||
2051                     hw->mac.type == ixgbe_mac_X550EM_a) {
2052                         /**
2053                          * Only care about src & dst ports,
2054                          * others should be masked.
2055                          */
2056                         if (!item->mask) {
2057                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2058                                 rte_flow_error_set(error, EINVAL,
2059                                         RTE_FLOW_ERROR_TYPE_ITEM,
2060                                         item, "Not supported by fdir filter");
2061                                 return -rte_errno;
2062                         }
2063                         rule->b_mask = TRUE;
2064                         sctp_mask =
2065                                 (const struct rte_flow_item_sctp *)item->mask;
2066                         if (sctp_mask->hdr.tag ||
2067                                 sctp_mask->hdr.cksum) {
2068                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2069                                 rte_flow_error_set(error, EINVAL,
2070                                         RTE_FLOW_ERROR_TYPE_ITEM,
2071                                         item, "Not supported by fdir filter");
2072                                 return -rte_errno;
2073                         }
2074                         rule->mask.src_port_mask = sctp_mask->hdr.src_port;
2075                         rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
2076
2077                         if (item->spec) {
2078                                 rule->b_spec = TRUE;
2079                                 sctp_spec =
2080                                 (const struct rte_flow_item_sctp *)item->spec;
2081                                 rule->ixgbe_fdir.formatted.src_port =
2082                                         sctp_spec->hdr.src_port;
2083                                 rule->ixgbe_fdir.formatted.dst_port =
2084                                         sctp_spec->hdr.dst_port;
2085                         }
2086                 /* others even sctp port is not supported */
2087                 } else {
2088                         sctp_mask =
2089                                 (const struct rte_flow_item_sctp *)item->mask;
2090                         if (sctp_mask &&
2091                                 (sctp_mask->hdr.src_port ||
2092                                  sctp_mask->hdr.dst_port ||
2093                                  sctp_mask->hdr.tag ||
2094                                  sctp_mask->hdr.cksum)) {
2095                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2096                                 rte_flow_error_set(error, EINVAL,
2097                                         RTE_FLOW_ERROR_TYPE_ITEM,
2098                                         item, "Not supported by fdir filter");
2099                                 return -rte_errno;
2100                         }
2101                 }
2102
2103                 item = next_no_fuzzy_pattern(pattern, item);
2104                 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
2105                         item->type != RTE_FLOW_ITEM_TYPE_END) {
2106                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2107                         rte_flow_error_set(error, EINVAL,
2108                                 RTE_FLOW_ERROR_TYPE_ITEM,
2109                                 item, "Not supported by fdir filter");
2110                         return -rte_errno;
2111                 }
2112         }
2113
2114         /* Get the flex byte info */
2115         if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
2116                 /* Not supported last point for range*/
2117                 if (item->last) {
2118                         rte_flow_error_set(error, EINVAL,
2119                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2120                                 item, "Not supported last point for range");
2121                         return -rte_errno;
2122                 }
2123                 /* mask should not be null */
2124                 if (!item->mask || !item->spec) {
2125                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2126                         rte_flow_error_set(error, EINVAL,
2127                                 RTE_FLOW_ERROR_TYPE_ITEM,
2128                                 item, "Not supported by fdir filter");
2129                         return -rte_errno;
2130                 }
2131
2132                 raw_mask = (const struct rte_flow_item_raw *)item->mask;
2133
2134                 /* check mask */
2135                 if (raw_mask->relative != 0x1 ||
2136                     raw_mask->search != 0x1 ||
2137                     raw_mask->reserved != 0x0 ||
2138                     (uint32_t)raw_mask->offset != 0xffffffff ||
2139                     raw_mask->limit != 0xffff ||
2140                     raw_mask->length != 0xffff) {
2141                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2142                         rte_flow_error_set(error, EINVAL,
2143                                 RTE_FLOW_ERROR_TYPE_ITEM,
2144                                 item, "Not supported by fdir filter");
2145                         return -rte_errno;
2146                 }
2147
2148                 raw_spec = (const struct rte_flow_item_raw *)item->spec;
2149
2150                 /* check spec */
2151                 if (raw_spec->relative != 0 ||
2152                     raw_spec->search != 0 ||
2153                     raw_spec->reserved != 0 ||
2154                     raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
2155                     raw_spec->offset % 2 ||
2156                     raw_spec->limit != 0 ||
2157                     raw_spec->length != 2 ||
2158                     /* pattern can't be 0xffff */
2159                     (raw_spec->pattern[0] == 0xff &&
2160                      raw_spec->pattern[1] == 0xff)) {
2161                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2162                         rte_flow_error_set(error, EINVAL,
2163                                 RTE_FLOW_ERROR_TYPE_ITEM,
2164                                 item, "Not supported by fdir filter");
2165                         return -rte_errno;
2166                 }
2167
2168                 /* check pattern mask */
2169                 if (raw_mask->pattern[0] != 0xff ||
2170                     raw_mask->pattern[1] != 0xff) {
2171                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2172                         rte_flow_error_set(error, EINVAL,
2173                                 RTE_FLOW_ERROR_TYPE_ITEM,
2174                                 item, "Not supported by fdir filter");
2175                         return -rte_errno;
2176                 }
2177
2178                 rule->mask.flex_bytes_mask = 0xffff;
2179                 rule->ixgbe_fdir.formatted.flex_bytes =
2180                         (((uint16_t)raw_spec->pattern[1]) << 8) |
2181                         raw_spec->pattern[0];
2182                 rule->flex_bytes_offset = raw_spec->offset;
2183         }
2184
2185         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2186                 /* check if the next not void item is END */
2187                 item = next_no_fuzzy_pattern(pattern, item);
2188                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2189                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2190                         rte_flow_error_set(error, EINVAL,
2191                                 RTE_FLOW_ERROR_TYPE_ITEM,
2192                                 item, "Not supported by fdir filter");
2193                         return -rte_errno;
2194                 }
2195         }
2196
2197         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2198 }
2199
2200 #define NVGRE_PROTOCOL 0x6558
2201
2202 /**
2203  * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
2204  * And get the flow director filter info BTW.
2205  * VxLAN PATTERN:
2206  * The first not void item must be ETH.
2207  * The second not void item must be IPV4/ IPV6.
2208  * The third not void item must be NVGRE.
2209  * The next not void item must be END.
2210  * NVGRE PATTERN:
2211  * The first not void item must be ETH.
2212  * The second not void item must be IPV4/ IPV6.
2213  * The third not void item must be NVGRE.
2214  * The next not void item must be END.
2215  * ACTION:
2216  * The first not void action should be QUEUE or DROP.
2217  * The second not void optional action should be MARK,
2218  * mark_id is a uint32_t number.
2219  * The next not void action should be END.
2220  * VxLAN pattern example:
2221  * ITEM         Spec                    Mask
2222  * ETH          NULL                    NULL
2223  * IPV4/IPV6    NULL                    NULL
2224  * UDP          NULL                    NULL
2225  * VxLAN        vni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
2226  * MAC VLAN     tci     0x2016          0xEFFF
2227  * END
2228  * NEGRV pattern example:
2229  * ITEM         Spec                    Mask
2230  * ETH          NULL                    NULL
2231  * IPV4/IPV6    NULL                    NULL
2232  * NVGRE        protocol        0x6558  0xFFFF
2233  *              tni{0x00, 0x32, 0x54}   {0xFF, 0xFF, 0xFF}
2234  * MAC VLAN     tci     0x2016          0xEFFF
2235  * END
2236  * other members in mask and spec should set to 0x00.
2237  * item->last should be NULL.
2238  */
2239 static int
2240 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
2241                                const struct rte_flow_item pattern[],
2242                                const struct rte_flow_action actions[],
2243                                struct ixgbe_fdir_rule *rule,
2244                                struct rte_flow_error *error)
2245 {
2246         const struct rte_flow_item *item;
2247         const struct rte_flow_item_vxlan *vxlan_spec;
2248         const struct rte_flow_item_vxlan *vxlan_mask;
2249         const struct rte_flow_item_nvgre *nvgre_spec;
2250         const struct rte_flow_item_nvgre *nvgre_mask;
2251         const struct rte_flow_item_eth *eth_spec;
2252         const struct rte_flow_item_eth *eth_mask;
2253         const struct rte_flow_item_vlan *vlan_spec;
2254         const struct rte_flow_item_vlan *vlan_mask;
2255         uint32_t j;
2256
2257         if (!pattern) {
2258                 rte_flow_error_set(error, EINVAL,
2259                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2260                                    NULL, "NULL pattern.");
2261                 return -rte_errno;
2262         }
2263
2264         if (!actions) {
2265                 rte_flow_error_set(error, EINVAL,
2266                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2267                                    NULL, "NULL action.");
2268                 return -rte_errno;
2269         }
2270
2271         if (!attr) {
2272                 rte_flow_error_set(error, EINVAL,
2273                                    RTE_FLOW_ERROR_TYPE_ATTR,
2274                                    NULL, "NULL attribute.");
2275                 return -rte_errno;
2276         }
2277
2278         /**
2279          * Some fields may not be provided. Set spec to 0 and mask to default
2280          * value. So, we need not do anything for the not provided fields later.
2281          */
2282         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2283         memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
2284         rule->mask.vlan_tci_mask = 0;
2285
2286         /**
2287          * The first not void item should be
2288          * MAC or IPv4 or IPv6 or UDP or VxLAN.
2289          */
2290         item = next_no_void_pattern(pattern, NULL);
2291         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
2292             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2293             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
2294             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2295             item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
2296             item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2297                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2298                 rte_flow_error_set(error, EINVAL,
2299                         RTE_FLOW_ERROR_TYPE_ITEM,
2300                         item, "Not supported by fdir filter");
2301                 return -rte_errno;
2302         }
2303
2304         rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
2305
2306         /* Skip MAC. */
2307         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
2308                 /* Only used to describe the protocol stack. */
2309                 if (item->spec || item->mask) {
2310                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2311                         rte_flow_error_set(error, EINVAL,
2312                                 RTE_FLOW_ERROR_TYPE_ITEM,
2313                                 item, "Not supported by fdir filter");
2314                         return -rte_errno;
2315                 }
2316                 /* Not supported last point for range*/
2317                 if (item->last) {
2318                         rte_flow_error_set(error, EINVAL,
2319                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2320                                 item, "Not supported last point for range");
2321                         return -rte_errno;
2322                 }
2323
2324                 /* Check if the next not void item is IPv4 or IPv6. */
2325                 item = next_no_void_pattern(pattern, item);
2326                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2327                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2328                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2329                         rte_flow_error_set(error, EINVAL,
2330                                 RTE_FLOW_ERROR_TYPE_ITEM,
2331                                 item, "Not supported by fdir filter");
2332                         return -rte_errno;
2333                 }
2334         }
2335
2336         /* Skip IP. */
2337         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2338             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2339                 /* Only used to describe the protocol stack. */
2340                 if (item->spec || item->mask) {
2341                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2342                         rte_flow_error_set(error, EINVAL,
2343                                 RTE_FLOW_ERROR_TYPE_ITEM,
2344                                 item, "Not supported by fdir filter");
2345                         return -rte_errno;
2346                 }
2347                 /*Not supported last point for range*/
2348                 if (item->last) {
2349                         rte_flow_error_set(error, EINVAL,
2350                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2351                                 item, "Not supported last point for range");
2352                         return -rte_errno;
2353                 }
2354
2355                 /* Check if the next not void item is UDP or NVGRE. */
2356                 item = next_no_void_pattern(pattern, item);
2357                 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2358                     item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2359                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2360                         rte_flow_error_set(error, EINVAL,
2361                                 RTE_FLOW_ERROR_TYPE_ITEM,
2362                                 item, "Not supported by fdir filter");
2363                         return -rte_errno;
2364                 }
2365         }
2366
2367         /* Skip UDP. */
2368         if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2369                 /* Only used to describe the protocol stack. */
2370                 if (item->spec || item->mask) {
2371                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2372                         rte_flow_error_set(error, EINVAL,
2373                                 RTE_FLOW_ERROR_TYPE_ITEM,
2374                                 item, "Not supported by fdir filter");
2375                         return -rte_errno;
2376                 }
2377                 /*Not supported last point for range*/
2378                 if (item->last) {
2379                         rte_flow_error_set(error, EINVAL,
2380                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2381                                 item, "Not supported last point for range");
2382                         return -rte_errno;
2383                 }
2384
2385                 /* Check if the next not void item is VxLAN. */
2386                 item = next_no_void_pattern(pattern, item);
2387                 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2388                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2389                         rte_flow_error_set(error, EINVAL,
2390                                 RTE_FLOW_ERROR_TYPE_ITEM,
2391                                 item, "Not supported by fdir filter");
2392                         return -rte_errno;
2393                 }
2394         }
2395
2396         /* Get the VxLAN info */
2397         if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2398                 rule->ixgbe_fdir.formatted.tunnel_type =
2399                         RTE_FDIR_TUNNEL_TYPE_VXLAN;
2400
2401                 /* Only care about VNI, others should be masked. */
2402                 if (!item->mask) {
2403                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2404                         rte_flow_error_set(error, EINVAL,
2405                                 RTE_FLOW_ERROR_TYPE_ITEM,
2406                                 item, "Not supported by fdir filter");
2407                         return -rte_errno;
2408                 }
2409                 /*Not supported last point for range*/
2410                 if (item->last) {
2411                         rte_flow_error_set(error, EINVAL,
2412                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2413                                 item, "Not supported last point for range");
2414                         return -rte_errno;
2415                 }
2416                 rule->b_mask = TRUE;
2417
2418                 /* Tunnel type is always meaningful. */
2419                 rule->mask.tunnel_type_mask = 1;
2420
2421                 vxlan_mask =
2422                         (const struct rte_flow_item_vxlan *)item->mask;
2423                 if (vxlan_mask->flags) {
2424                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2425                         rte_flow_error_set(error, EINVAL,
2426                                 RTE_FLOW_ERROR_TYPE_ITEM,
2427                                 item, "Not supported by fdir filter");
2428                         return -rte_errno;
2429                 }
2430                 /* VNI must be totally masked or not. */
2431                 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2432                         vxlan_mask->vni[2]) &&
2433                         ((vxlan_mask->vni[0] != 0xFF) ||
2434                         (vxlan_mask->vni[1] != 0xFF) ||
2435                                 (vxlan_mask->vni[2] != 0xFF))) {
2436                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2437                         rte_flow_error_set(error, EINVAL,
2438                                 RTE_FLOW_ERROR_TYPE_ITEM,
2439                                 item, "Not supported by fdir filter");
2440                         return -rte_errno;
2441                 }
2442
2443                 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2444                         RTE_DIM(vxlan_mask->vni));
2445
2446                 if (item->spec) {
2447                         rule->b_spec = TRUE;
2448                         vxlan_spec = (const struct rte_flow_item_vxlan *)
2449                                         item->spec;
2450                         rte_memcpy(((uint8_t *)
2451                                 &rule->ixgbe_fdir.formatted.tni_vni + 1),
2452                                 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2453                         rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
2454                                 rule->ixgbe_fdir.formatted.tni_vni);
2455                 }
2456         }
2457
2458         /* Get the NVGRE info */
2459         if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2460                 rule->ixgbe_fdir.formatted.tunnel_type =
2461                         RTE_FDIR_TUNNEL_TYPE_NVGRE;
2462
2463                 /**
2464                  * Only care about flags0, flags1, protocol and TNI,
2465                  * others should be masked.
2466                  */
2467                 if (!item->mask) {
2468                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2469                         rte_flow_error_set(error, EINVAL,
2470                                 RTE_FLOW_ERROR_TYPE_ITEM,
2471                                 item, "Not supported by fdir filter");
2472                         return -rte_errno;
2473                 }
2474                 /*Not supported last point for range*/
2475                 if (item->last) {
2476                         rte_flow_error_set(error, EINVAL,
2477                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2478                                 item, "Not supported last point for range");
2479                         return -rte_errno;
2480                 }
2481                 rule->b_mask = TRUE;
2482
2483                 /* Tunnel type is always meaningful. */
2484                 rule->mask.tunnel_type_mask = 1;
2485
2486                 nvgre_mask =
2487                         (const struct rte_flow_item_nvgre *)item->mask;
2488                 if (nvgre_mask->flow_id) {
2489                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2490                         rte_flow_error_set(error, EINVAL,
2491                                 RTE_FLOW_ERROR_TYPE_ITEM,
2492                                 item, "Not supported by fdir filter");
2493                         return -rte_errno;
2494                 }
2495                 if (nvgre_mask->protocol &&
2496                     nvgre_mask->protocol != 0xFFFF) {
2497                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2498                         rte_flow_error_set(error, EINVAL,
2499                                 RTE_FLOW_ERROR_TYPE_ITEM,
2500                                 item, "Not supported by fdir filter");
2501                         return -rte_errno;
2502                 }
2503                 if (nvgre_mask->c_k_s_rsvd0_ver &&
2504                     nvgre_mask->c_k_s_rsvd0_ver !=
2505                         rte_cpu_to_be_16(0xFFFF)) {
2506                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2507                         rte_flow_error_set(error, EINVAL,
2508                                 RTE_FLOW_ERROR_TYPE_ITEM,
2509                                 item, "Not supported by fdir filter");
2510                         return -rte_errno;
2511                 }
2512                 /* TNI must be totally masked or not. */
2513                 if (nvgre_mask->tni[0] &&
2514                     ((nvgre_mask->tni[0] != 0xFF) ||
2515                     (nvgre_mask->tni[1] != 0xFF) ||
2516                     (nvgre_mask->tni[2] != 0xFF))) {
2517                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2518                         rte_flow_error_set(error, EINVAL,
2519                                 RTE_FLOW_ERROR_TYPE_ITEM,
2520                                 item, "Not supported by fdir filter");
2521                         return -rte_errno;
2522                 }
2523                 /* tni is a 24-bits bit field */
2524                 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2525                         RTE_DIM(nvgre_mask->tni));
2526                 rule->mask.tunnel_id_mask <<= 8;
2527
2528                 if (item->spec) {
2529                         rule->b_spec = TRUE;
2530                         nvgre_spec =
2531                                 (const struct rte_flow_item_nvgre *)item->spec;
2532                         if (nvgre_spec->c_k_s_rsvd0_ver !=
2533                             rte_cpu_to_be_16(0x2000) &&
2534                                 nvgre_mask->c_k_s_rsvd0_ver) {
2535                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2536                                 rte_flow_error_set(error, EINVAL,
2537                                         RTE_FLOW_ERROR_TYPE_ITEM,
2538                                         item, "Not supported by fdir filter");
2539                                 return -rte_errno;
2540                         }
2541                         if (nvgre_mask->protocol &&
2542                             nvgre_spec->protocol !=
2543                             rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2544                                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2545                                 rte_flow_error_set(error, EINVAL,
2546                                         RTE_FLOW_ERROR_TYPE_ITEM,
2547                                         item, "Not supported by fdir filter");
2548                                 return -rte_errno;
2549                         }
2550                         /* tni is a 24-bits bit field */
2551                         rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2552                         nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2553                         rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2554                 }
2555         }
2556
2557         /* check if the next not void item is MAC */
2558         item = next_no_void_pattern(pattern, item);
2559         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2560                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2561                 rte_flow_error_set(error, EINVAL,
2562                         RTE_FLOW_ERROR_TYPE_ITEM,
2563                         item, "Not supported by fdir filter");
2564                 return -rte_errno;
2565         }
2566
2567         /**
2568          * Only support vlan and dst MAC address,
2569          * others should be masked.
2570          */
2571
2572         if (!item->mask) {
2573                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2574                 rte_flow_error_set(error, EINVAL,
2575                         RTE_FLOW_ERROR_TYPE_ITEM,
2576                         item, "Not supported by fdir filter");
2577                 return -rte_errno;
2578         }
2579         /*Not supported last point for range*/
2580         if (item->last) {
2581                 rte_flow_error_set(error, EINVAL,
2582                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2583                         item, "Not supported last point for range");
2584                 return -rte_errno;
2585         }
2586         rule->b_mask = TRUE;
2587         eth_mask = (const struct rte_flow_item_eth *)item->mask;
2588
2589         /* Ether type should be masked. */
2590         if (eth_mask->type) {
2591                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2592                 rte_flow_error_set(error, EINVAL,
2593                         RTE_FLOW_ERROR_TYPE_ITEM,
2594                         item, "Not supported by fdir filter");
2595                 return -rte_errno;
2596         }
2597
2598         /* src MAC address should be masked. */
2599         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2600                 if (eth_mask->src.addr_bytes[j]) {
2601                         memset(rule, 0,
2602                                sizeof(struct ixgbe_fdir_rule));
2603                         rte_flow_error_set(error, EINVAL,
2604                                 RTE_FLOW_ERROR_TYPE_ITEM,
2605                                 item, "Not supported by fdir filter");
2606                         return -rte_errno;
2607                 }
2608         }
2609         rule->mask.mac_addr_byte_mask = 0;
2610         for (j = 0; j < ETHER_ADDR_LEN; j++) {
2611                 /* It's a per byte mask. */
2612                 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2613                         rule->mask.mac_addr_byte_mask |= 0x1 << j;
2614                 } else if (eth_mask->dst.addr_bytes[j]) {
2615                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2616                         rte_flow_error_set(error, EINVAL,
2617                                 RTE_FLOW_ERROR_TYPE_ITEM,
2618                                 item, "Not supported by fdir filter");
2619                         return -rte_errno;
2620                 }
2621         }
2622
2623         /* When no vlan, considered as full mask. */
2624         rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2625
2626         if (item->spec) {
2627                 rule->b_spec = TRUE;
2628                 eth_spec = (const struct rte_flow_item_eth *)item->spec;
2629
2630                 /* Get the dst MAC. */
2631                 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2632                         rule->ixgbe_fdir.formatted.inner_mac[j] =
2633                                 eth_spec->dst.addr_bytes[j];
2634                 }
2635         }
2636
2637         /**
2638          * Check if the next not void item is vlan or ipv4.
2639          * IPv6 is not supported.
2640          */
2641         item = next_no_void_pattern(pattern, item);
2642         if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2643                 (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
2644                 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2645                 rte_flow_error_set(error, EINVAL,
2646                         RTE_FLOW_ERROR_TYPE_ITEM,
2647                         item, "Not supported by fdir filter");
2648                 return -rte_errno;
2649         }
2650         /*Not supported last point for range*/
2651         if (item->last) {
2652                 rte_flow_error_set(error, EINVAL,
2653                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2654                         item, "Not supported last point for range");
2655                 return -rte_errno;
2656         }
2657
2658         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2659                 if (!(item->spec && item->mask)) {
2660                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2661                         rte_flow_error_set(error, EINVAL,
2662                                 RTE_FLOW_ERROR_TYPE_ITEM,
2663                                 item, "Not supported by fdir filter");
2664                         return -rte_errno;
2665                 }
2666
2667                 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
2668                 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
2669
2670                 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2671
2672                 rule->mask.vlan_tci_mask = vlan_mask->tci;
2673                 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
2674                 /* More than one tags are not supported. */
2675
2676                 /* check if the next not void item is END */
2677                 item = next_no_void_pattern(pattern, item);
2678
2679                 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2680                         memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2681                         rte_flow_error_set(error, EINVAL,
2682                                 RTE_FLOW_ERROR_TYPE_ITEM,
2683                                 item, "Not supported by fdir filter");
2684                         return -rte_errno;
2685                 }
2686         }
2687
2688         /**
2689          * If the tags is 0, it means don't care about the VLAN.
2690          * Do nothing.
2691          */
2692
2693         return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2694 }
2695
2696 static int
2697 ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2698                         const struct rte_flow_attr *attr,
2699                         const struct rte_flow_item pattern[],
2700                         const struct rte_flow_action actions[],
2701                         struct ixgbe_fdir_rule *rule,
2702                         struct rte_flow_error *error)
2703 {
2704         int ret;
2705         struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2706         enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2707
2708         if (hw->mac.type != ixgbe_mac_82599EB &&
2709                 hw->mac.type != ixgbe_mac_X540 &&
2710                 hw->mac.type != ixgbe_mac_X550 &&
2711                 hw->mac.type != ixgbe_mac_X550EM_x &&
2712                 hw->mac.type != ixgbe_mac_X550EM_a)
2713                 return -ENOTSUP;
2714
2715         ret = ixgbe_parse_fdir_filter_normal(dev, attr, pattern,
2716                                         actions, rule, error);
2717
2718         if (!ret)
2719                 goto step_next;
2720
2721         ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2722                                         actions, rule, error);
2723
2724         if (ret)
2725                 return ret;
2726
2727 step_next:
2728
2729         if (hw->mac.type == ixgbe_mac_82599EB &&
2730                 rule->fdirflags == IXGBE_FDIRCMD_DROP &&
2731                 (rule->ixgbe_fdir.formatted.src_port != 0 ||
2732                 rule->ixgbe_fdir.formatted.dst_port != 0))
2733                 return -ENOTSUP;
2734
2735         if (fdir_mode == RTE_FDIR_MODE_NONE ||
2736             fdir_mode != rule->mode)
2737                 return -ENOTSUP;
2738
2739         if (rule->queue >= dev->data->nb_rx_queues)
2740                 return -ENOTSUP;
2741
2742         return ret;
2743 }
2744
2745 void
2746 ixgbe_filterlist_init(void)
2747 {
2748         TAILQ_INIT(&filter_ntuple_list);
2749         TAILQ_INIT(&filter_ethertype_list);
2750         TAILQ_INIT(&filter_syn_list);
2751         TAILQ_INIT(&filter_fdir_list);
2752         TAILQ_INIT(&filter_l2_tunnel_list);
2753         TAILQ_INIT(&ixgbe_flow_list);
2754 }
2755
2756 void
2757 ixgbe_filterlist_flush(void)
2758 {
2759         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2760         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2761         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2762         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2763         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2764         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2765
2766         while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2767                 TAILQ_REMOVE(&filter_ntuple_list,
2768                                  ntuple_filter_ptr,
2769                                  entries);
2770                 rte_free(ntuple_filter_ptr);
2771         }
2772
2773         while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2774                 TAILQ_REMOVE(&filter_ethertype_list,
2775                                  ethertype_filter_ptr,
2776                                  entries);
2777                 rte_free(ethertype_filter_ptr);
2778         }
2779
2780         while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2781                 TAILQ_REMOVE(&filter_syn_list,
2782                                  syn_filter_ptr,
2783                                  entries);
2784                 rte_free(syn_filter_ptr);
2785         }
2786
2787         while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2788                 TAILQ_REMOVE(&filter_l2_tunnel_list,
2789                                  l2_tn_filter_ptr,
2790                                  entries);
2791                 rte_free(l2_tn_filter_ptr);
2792         }
2793
2794         while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2795                 TAILQ_REMOVE(&filter_fdir_list,
2796                                  fdir_rule_ptr,
2797                                  entries);
2798                 rte_free(fdir_rule_ptr);
2799         }
2800
2801         while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
2802                 TAILQ_REMOVE(&ixgbe_flow_list,
2803                                  ixgbe_flow_mem_ptr,
2804                                  entries);
2805                 rte_free(ixgbe_flow_mem_ptr->flow);
2806                 rte_free(ixgbe_flow_mem_ptr);
2807         }
2808 }
2809
2810 /**
2811  * Create or destroy a flow rule.
2812  * Theorically one rule can match more than one filters.
2813  * We will let it use the filter which it hitt first.
2814  * So, the sequence matters.
2815  */
2816 static struct rte_flow *
2817 ixgbe_flow_create(struct rte_eth_dev *dev,
2818                   const struct rte_flow_attr *attr,
2819                   const struct rte_flow_item pattern[],
2820                   const struct rte_flow_action actions[],
2821                   struct rte_flow_error *error)
2822 {
2823         int ret;
2824         struct rte_eth_ntuple_filter ntuple_filter;
2825         struct rte_eth_ethertype_filter ethertype_filter;
2826         struct rte_eth_syn_filter syn_filter;
2827         struct ixgbe_fdir_rule fdir_rule;
2828         struct rte_eth_l2_tunnel_conf l2_tn_filter;
2829         struct ixgbe_hw_fdir_info *fdir_info =
2830                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2831         struct rte_flow *flow = NULL;
2832         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2833         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2834         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2835         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2836         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2837         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2838         uint8_t first_mask = FALSE;
2839
2840         flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
2841         if (!flow) {
2842                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2843                 return (struct rte_flow *)flow;
2844         }
2845         ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
2846                         sizeof(struct ixgbe_flow_mem), 0);
2847         if (!ixgbe_flow_mem_ptr) {
2848                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2849                 rte_free(flow);
2850                 return NULL;
2851         }
2852         ixgbe_flow_mem_ptr->flow = flow;
2853         TAILQ_INSERT_TAIL(&ixgbe_flow_list,
2854                                 ixgbe_flow_mem_ptr, entries);
2855
2856         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2857         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2858                         actions, &ntuple_filter, error);
2859
2860 #ifdef RTE_LIBRTE_SECURITY
2861         /* ESP flow not really a flow*/
2862         if (ntuple_filter.proto == IPPROTO_ESP)
2863                 return flow;
2864 #endif
2865
2866         if (!ret) {
2867                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2868                 if (!ret) {
2869                         ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
2870                                 sizeof(struct ixgbe_ntuple_filter_ele), 0);
2871                         if (!ntuple_filter_ptr) {
2872                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2873                                 goto out;
2874                         }
2875                         rte_memcpy(&ntuple_filter_ptr->filter_info,
2876                                 &ntuple_filter,
2877                                 sizeof(struct rte_eth_ntuple_filter));
2878                         TAILQ_INSERT_TAIL(&filter_ntuple_list,
2879                                 ntuple_filter_ptr, entries);
2880                         flow->rule = ntuple_filter_ptr;
2881                         flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2882                         return flow;
2883                 }
2884                 goto out;
2885         }
2886
2887         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2888         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2889                                 actions, &ethertype_filter, error);
2890         if (!ret) {
2891                 ret = ixgbe_add_del_ethertype_filter(dev,
2892                                 &ethertype_filter, TRUE);
2893                 if (!ret) {
2894                         ethertype_filter_ptr = rte_zmalloc(
2895                                 "ixgbe_ethertype_filter",
2896                                 sizeof(struct ixgbe_ethertype_filter_ele), 0);
2897                         if (!ethertype_filter_ptr) {
2898                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2899                                 goto out;
2900                         }
2901                         rte_memcpy(&ethertype_filter_ptr->filter_info,
2902                                 &ethertype_filter,
2903                                 sizeof(struct rte_eth_ethertype_filter));
2904                         TAILQ_INSERT_TAIL(&filter_ethertype_list,
2905                                 ethertype_filter_ptr, entries);
2906                         flow->rule = ethertype_filter_ptr;
2907                         flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2908                         return flow;
2909                 }
2910                 goto out;
2911         }
2912
2913         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2914         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2915                                 actions, &syn_filter, error);
2916         if (!ret) {
2917                 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
2918                 if (!ret) {
2919                         syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
2920                                 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
2921                         if (!syn_filter_ptr) {
2922                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
2923                                 goto out;
2924                         }
2925                         rte_memcpy(&syn_filter_ptr->filter_info,
2926                                 &syn_filter,
2927                                 sizeof(struct rte_eth_syn_filter));
2928                         TAILQ_INSERT_TAIL(&filter_syn_list,
2929                                 syn_filter_ptr,
2930                                 entries);
2931                         flow->rule = syn_filter_ptr;
2932                         flow->filter_type = RTE_ETH_FILTER_SYN;
2933                         return flow;
2934                 }
2935                 goto out;
2936         }
2937
2938         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2939         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2940                                 actions, &fdir_rule, error);
2941         if (!ret) {
2942                 /* A mask cannot be deleted. */
2943                 if (fdir_rule.b_mask) {
2944                         if (!fdir_info->mask_added) {
2945                                 /* It's the first time the mask is set. */
2946                                 rte_memcpy(&fdir_info->mask,
2947                                         &fdir_rule.mask,
2948                                         sizeof(struct ixgbe_hw_fdir_mask));
2949                                 fdir_info->flex_bytes_offset =
2950                                         fdir_rule.flex_bytes_offset;
2951
2952                                 if (fdir_rule.mask.flex_bytes_mask)
2953                                         ixgbe_fdir_set_flexbytes_offset(dev,
2954                                                 fdir_rule.flex_bytes_offset);
2955
2956                                 ret = ixgbe_fdir_set_input_mask(dev);
2957                                 if (ret)
2958                                         goto out;
2959
2960                                 fdir_info->mask_added = TRUE;
2961                                 first_mask = TRUE;
2962                         } else {
2963                                 /**
2964                                  * Only support one global mask,
2965                                  * all the masks should be the same.
2966                                  */
2967                                 ret = memcmp(&fdir_info->mask,
2968                                         &fdir_rule.mask,
2969                                         sizeof(struct ixgbe_hw_fdir_mask));
2970                                 if (ret)
2971                                         goto out;
2972
2973                                 if (fdir_info->flex_bytes_offset !=
2974                                                 fdir_rule.flex_bytes_offset)
2975                                         goto out;
2976                         }
2977                 }
2978
2979                 if (fdir_rule.b_spec) {
2980                         ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
2981                                         FALSE, FALSE);
2982                         if (!ret) {
2983                                 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
2984                                         sizeof(struct ixgbe_fdir_rule_ele), 0);
2985                                 if (!fdir_rule_ptr) {
2986                                         PMD_DRV_LOG(ERR, "failed to allocate memory");
2987                                         goto out;
2988                                 }
2989                                 rte_memcpy(&fdir_rule_ptr->filter_info,
2990                                         &fdir_rule,
2991                                         sizeof(struct ixgbe_fdir_rule));
2992                                 TAILQ_INSERT_TAIL(&filter_fdir_list,
2993                                         fdir_rule_ptr, entries);
2994                                 flow->rule = fdir_rule_ptr;
2995                                 flow->filter_type = RTE_ETH_FILTER_FDIR;
2996
2997                                 return flow;
2998                         }
2999
3000                         if (ret) {
3001                                 /**
3002                                  * clean the mask_added flag if fail to
3003                                  * program
3004                                  **/
3005                                 if (first_mask)
3006                                         fdir_info->mask_added = FALSE;
3007                                 goto out;
3008                         }
3009                 }
3010
3011                 goto out;
3012         }
3013
3014         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
3015         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
3016                                         actions, &l2_tn_filter, error);
3017         if (!ret) {
3018                 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
3019                 if (!ret) {
3020                         l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
3021                                 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
3022                         if (!l2_tn_filter_ptr) {
3023                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
3024                                 goto out;
3025                         }
3026                         rte_memcpy(&l2_tn_filter_ptr->filter_info,
3027                                 &l2_tn_filter,
3028                                 sizeof(struct rte_eth_l2_tunnel_conf));
3029                         TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
3030                                 l2_tn_filter_ptr, entries);
3031                         flow->rule = l2_tn_filter_ptr;
3032                         flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
3033                         return flow;
3034                 }
3035         }
3036
3037 out:
3038         TAILQ_REMOVE(&ixgbe_flow_list,
3039                 ixgbe_flow_mem_ptr, entries);
3040         rte_flow_error_set(error, -ret,
3041                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
3042                            "Failed to create flow.");
3043         rte_free(ixgbe_flow_mem_ptr);
3044         rte_free(flow);
3045         return NULL;
3046 }
3047
3048 /**
3049  * Check if the flow rule is supported by ixgbe.
3050  * It only checkes the format. Don't guarantee the rule can be programmed into
3051  * the HW. Because there can be no enough room for the rule.
3052  */
3053 static int
3054 ixgbe_flow_validate(struct rte_eth_dev *dev,
3055                 const struct rte_flow_attr *attr,
3056                 const struct rte_flow_item pattern[],
3057                 const struct rte_flow_action actions[],
3058                 struct rte_flow_error *error)
3059 {
3060         struct rte_eth_ntuple_filter ntuple_filter;
3061         struct rte_eth_ethertype_filter ethertype_filter;
3062         struct rte_eth_syn_filter syn_filter;
3063         struct rte_eth_l2_tunnel_conf l2_tn_filter;
3064         struct ixgbe_fdir_rule fdir_rule;
3065         int ret;
3066
3067         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
3068         ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
3069                                 actions, &ntuple_filter, error);
3070         if (!ret)
3071                 return 0;
3072
3073         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
3074         ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
3075                                 actions, &ethertype_filter, error);
3076         if (!ret)
3077                 return 0;
3078
3079         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
3080         ret = ixgbe_parse_syn_filter(dev, attr, pattern,
3081                                 actions, &syn_filter, error);
3082         if (!ret)
3083                 return 0;
3084
3085         memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
3086         ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
3087                                 actions, &fdir_rule, error);
3088         if (!ret)
3089                 return 0;
3090
3091         memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
3092         ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
3093                                 actions, &l2_tn_filter, error);
3094
3095         return ret;
3096 }
3097
3098 /* Destroy a flow rule on ixgbe. */
3099 static int
3100 ixgbe_flow_destroy(struct rte_eth_dev *dev,
3101                 struct rte_flow *flow,
3102                 struct rte_flow_error *error)
3103 {
3104         int ret;
3105         struct rte_flow *pmd_flow = flow;
3106         enum rte_filter_type filter_type = pmd_flow->filter_type;
3107         struct rte_eth_ntuple_filter ntuple_filter;
3108         struct rte_eth_ethertype_filter ethertype_filter;
3109         struct rte_eth_syn_filter syn_filter;
3110         struct ixgbe_fdir_rule fdir_rule;
3111         struct rte_eth_l2_tunnel_conf l2_tn_filter;
3112         struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
3113         struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
3114         struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
3115         struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
3116         struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
3117         struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
3118         struct ixgbe_hw_fdir_info *fdir_info =
3119                 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
3120
3121         switch (filter_type) {
3122         case RTE_ETH_FILTER_NTUPLE:
3123                 ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
3124                                         pmd_flow->rule;
3125                 rte_memcpy(&ntuple_filter,
3126                         &ntuple_filter_ptr->filter_info,
3127                         sizeof(struct rte_eth_ntuple_filter));
3128                 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
3129                 if (!ret) {
3130                         TAILQ_REMOVE(&filter_ntuple_list,
3131                         ntuple_filter_ptr, entries);
3132                         rte_free(ntuple_filter_ptr);
3133                 }
3134                 break;
3135         case RTE_ETH_FILTER_ETHERTYPE:
3136                 ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
3137                                         pmd_flow->rule;
3138                 rte_memcpy(&ethertype_filter,
3139                         &ethertype_filter_ptr->filter_info,
3140                         sizeof(struct rte_eth_ethertype_filter));
3141                 ret = ixgbe_add_del_ethertype_filter(dev,
3142                                 &ethertype_filter, FALSE);
3143                 if (!ret) {
3144                         TAILQ_REMOVE(&filter_ethertype_list,
3145                                 ethertype_filter_ptr, entries);
3146                         rte_free(ethertype_filter_ptr);
3147                 }
3148                 break;
3149         case RTE_ETH_FILTER_SYN:
3150                 syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
3151                                 pmd_flow->rule;
3152                 rte_memcpy(&syn_filter,
3153                         &syn_filter_ptr->filter_info,
3154                         sizeof(struct rte_eth_syn_filter));
3155                 ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
3156                 if (!ret) {
3157                         TAILQ_REMOVE(&filter_syn_list,
3158                                 syn_filter_ptr, entries);
3159                         rte_free(syn_filter_ptr);
3160                 }
3161                 break;
3162         case RTE_ETH_FILTER_FDIR:
3163                 fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
3164                 rte_memcpy(&fdir_rule,
3165                         &fdir_rule_ptr->filter_info,
3166                         sizeof(struct ixgbe_fdir_rule));
3167                 ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
3168                 if (!ret) {
3169                         TAILQ_REMOVE(&filter_fdir_list,
3170                                 fdir_rule_ptr, entries);
3171                         rte_free(fdir_rule_ptr);
3172                         if (TAILQ_EMPTY(&filter_fdir_list))
3173                                 fdir_info->mask_added = false;
3174                 }
3175                 break;
3176         case RTE_ETH_FILTER_L2_TUNNEL:
3177                 l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
3178                                 pmd_flow->rule;
3179                 rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
3180                         sizeof(struct rte_eth_l2_tunnel_conf));
3181                 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
3182                 if (!ret) {
3183                         TAILQ_REMOVE(&filter_l2_tunnel_list,
3184                                 l2_tn_filter_ptr, entries);
3185                         rte_free(l2_tn_filter_ptr);
3186                 }
3187                 break;
3188         default:
3189                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
3190                             filter_type);
3191                 ret = -EINVAL;
3192                 break;
3193         }
3194
3195         if (ret) {
3196                 rte_flow_error_set(error, EINVAL,
3197                                 RTE_FLOW_ERROR_TYPE_HANDLE,
3198                                 NULL, "Failed to destroy flow");
3199                 return ret;
3200         }
3201
3202         TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
3203                 if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
3204                         TAILQ_REMOVE(&ixgbe_flow_list,
3205                                 ixgbe_flow_mem_ptr, entries);
3206                         rte_free(ixgbe_flow_mem_ptr);
3207                 }
3208         }
3209         rte_free(flow);
3210
3211         return ret;
3212 }
3213
3214 /*  Destroy all flow rules associated with a port on ixgbe. */
3215 static int
3216 ixgbe_flow_flush(struct rte_eth_dev *dev,
3217                 struct rte_flow_error *error)
3218 {
3219         int ret = 0;
3220
3221         ixgbe_clear_all_ntuple_filter(dev);
3222         ixgbe_clear_all_ethertype_filter(dev);
3223         ixgbe_clear_syn_filter(dev);
3224
3225         ret = ixgbe_clear_all_fdir_filter(dev);
3226         if (ret < 0) {
3227                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3228                                         NULL, "Failed to flush rule");
3229                 return ret;
3230         }
3231
3232         ret = ixgbe_clear_all_l2_tn_filter(dev);
3233         if (ret < 0) {
3234                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3235                                         NULL, "Failed to flush rule");
3236                 return ret;
3237         }
3238
3239         ixgbe_filterlist_flush();
3240
3241         return 0;
3242 }
3243
3244 const struct rte_flow_ops ixgbe_flow_ops = {
3245         .validate = ixgbe_flow_validate,
3246         .create = ixgbe_flow_create,
3247         .destroy = ixgbe_flow_destroy,
3248         .flush = ixgbe_flow_flush,
3249 };