a142759687c664890c178ee439b2fac61ecb0499
[dpdk.git] / drivers / net / e1000 / igb_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <stdarg.h>
10
11 #include <rte_common.h>
12 #include <rte_interrupts.h>
13 #include <rte_byteorder.h>
14 #include <rte_log.h>
15 #include <rte_debug.h>
16 #include <rte_pci.h>
17 #include <rte_ether.h>
18 #include <rte_ethdev_driver.h>
19 #include <rte_ethdev_pci.h>
20 #include <rte_memory.h>
21 #include <rte_eal.h>
22 #include <rte_atomic.h>
23 #include <rte_malloc.h>
24 #include <rte_dev.h>
25 #include <rte_flow.h>
26 #include <rte_flow_driver.h>
27
28 #include "e1000_logs.h"
29 #include "base/e1000_api.h"
30 #include "e1000_ethdev.h"
31
32 #define NEXT_ITEM_OF_PATTERN(item, pattern, index)              \
33         do {                                                    \
34                 item = (pattern) + (index);                     \
35                 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) { \
36                 (index)++;                                      \
37                 item = (pattern) + (index);                     \
38                 }                                               \
39         } while (0)
40
41 #define NEXT_ITEM_OF_ACTION(act, actions, index)                \
42         do {                                                    \
43                 act = (actions) + (index);                      \
44                 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
45                 (index)++;                                      \
46                 act = (actions) + (index);                      \
47                 }                                               \
48         } while (0)
49
50 #define IGB_FLEX_RAW_NUM        12
51
52 /**
53  * Please aware there's an asumption for all the parsers.
54  * rte_flow_item is using big endian, rte_flow_attr and
55  * rte_flow_action are using CPU order.
56  * Because the pattern is used to describe the packets,
57  * normally the packets should use network order.
58  */
59
60 /**
61  * Parse the rule to see if it is a n-tuple rule.
62  * And get the n-tuple filter info BTW.
63  * pattern:
64  * The first not void item can be ETH or IPV4.
65  * The second not void item must be IPV4 if the first one is ETH.
66  * The third not void item must be UDP or TCP or SCTP
67  * The next not void item must be END.
68  * action:
69  * The first not void action should be QUEUE.
70  * The next not void action should be END.
71  * pattern example:
72  * ITEM         Spec                    Mask
73  * ETH          NULL                    NULL
74  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
75  *                      dst_addr 192.167.3.50   0xFFFFFFFF
76  *                      next_proto_id   17      0xFF
77  * UDP/TCP/     src_port        80      0xFFFF
78  * SCTP         dst_port        80      0xFFFF
79  * END
80  * other members in mask and spec should set to 0x00.
81  * item->last should be NULL.
82  */
83 static int
84 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
85                          const struct rte_flow_item pattern[],
86                          const struct rte_flow_action actions[],
87                          struct rte_eth_ntuple_filter *filter,
88                          struct rte_flow_error *error)
89 {
90         const struct rte_flow_item *item;
91         const struct rte_flow_action *act;
92         const struct rte_flow_item_ipv4 *ipv4_spec;
93         const struct rte_flow_item_ipv4 *ipv4_mask;
94         const struct rte_flow_item_tcp *tcp_spec;
95         const struct rte_flow_item_tcp *tcp_mask;
96         const struct rte_flow_item_udp *udp_spec;
97         const struct rte_flow_item_udp *udp_mask;
98         const struct rte_flow_item_sctp *sctp_spec;
99         const struct rte_flow_item_sctp *sctp_mask;
100         uint32_t index;
101
102         if (!pattern) {
103                 rte_flow_error_set(error,
104                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
105                         NULL, "NULL pattern.");
106                 return -rte_errno;
107         }
108
109         if (!actions) {
110                 rte_flow_error_set(error, EINVAL,
111                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
112                                    NULL, "NULL action.");
113                 return -rte_errno;
114         }
115         if (!attr) {
116                 rte_flow_error_set(error, EINVAL,
117                                    RTE_FLOW_ERROR_TYPE_ATTR,
118                                    NULL, "NULL attribute.");
119                 return -rte_errno;
120         }
121
122         /* parse pattern */
123         index = 0;
124
125         /* the first not void item can be MAC or IPv4 */
126         NEXT_ITEM_OF_PATTERN(item, pattern, index);
127
128         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
129             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
130                 rte_flow_error_set(error, EINVAL,
131                         RTE_FLOW_ERROR_TYPE_ITEM,
132                         item, "Not supported by ntuple filter");
133                 return -rte_errno;
134         }
135         /* Skip Ethernet */
136         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
137                 /*Not supported last point for range*/
138                 if (item->last) {
139                         rte_flow_error_set(error,
140                           EINVAL,
141                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
142                           item, "Not supported last point for range");
143                         return -rte_errno;
144                 }
145                 /* if the first item is MAC, the content should be NULL */
146                 if (item->spec || item->mask) {
147                         rte_flow_error_set(error, EINVAL,
148                                 RTE_FLOW_ERROR_TYPE_ITEM,
149                                 item, "Not supported by ntuple filter");
150                         return -rte_errno;
151                 }
152                 /* check if the next not void item is IPv4 */
153                 index++;
154                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
155                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
156                         rte_flow_error_set(error,
157                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
158                           item, "Not supported by ntuple filter");
159                         return -rte_errno;
160                 }
161         }
162
163         /* get the IPv4 info */
164         if (!item->spec || !item->mask) {
165                 rte_flow_error_set(error, EINVAL,
166                         RTE_FLOW_ERROR_TYPE_ITEM,
167                         item, "Invalid ntuple mask");
168                 return -rte_errno;
169         }
170         /* Not supported last point for range */
171         if (item->last) {
172                 rte_flow_error_set(error, EINVAL,
173                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
174                         item, "Not supported last point for range");
175                 return -rte_errno;
176         }
177
178         ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
179         /**
180          * Only support src & dst addresses, protocol,
181          * others should be masked.
182          */
183
184         if (ipv4_mask->hdr.version_ihl ||
185                 ipv4_mask->hdr.type_of_service ||
186                 ipv4_mask->hdr.total_length ||
187                 ipv4_mask->hdr.packet_id ||
188                 ipv4_mask->hdr.fragment_offset ||
189                 ipv4_mask->hdr.time_to_live ||
190                 ipv4_mask->hdr.hdr_checksum) {
191                 rte_flow_error_set(error,
192                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
193                         item, "Not supported by ntuple filter");
194                 return -rte_errno;
195         }
196
197         filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
198         filter->src_ip_mask = ipv4_mask->hdr.src_addr;
199         filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
200
201         ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
202         filter->dst_ip = ipv4_spec->hdr.dst_addr;
203         filter->src_ip = ipv4_spec->hdr.src_addr;
204         filter->proto  = ipv4_spec->hdr.next_proto_id;
205
206         /* check if the next not void item is TCP or UDP or SCTP */
207         index++;
208         NEXT_ITEM_OF_PATTERN(item, pattern, index);
209         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
210             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
211             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
212                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
213                 rte_flow_error_set(error, EINVAL,
214                         RTE_FLOW_ERROR_TYPE_ITEM,
215                         item, "Not supported by ntuple filter");
216                 return -rte_errno;
217         }
218
219         /* Not supported last point for range */
220         if (item->last) {
221                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
222                 rte_flow_error_set(error, EINVAL,
223                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
224                         item, "Not supported last point for range");
225                 return -rte_errno;
226         }
227
228         /* get the TCP/UDP/SCTP info */
229         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
230                 if (item->spec && item->mask) {
231                         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
232
233                         /**
234                          * Only support src & dst ports, tcp flags,
235                          * others should be masked.
236                          */
237                         if (tcp_mask->hdr.sent_seq ||
238                                 tcp_mask->hdr.recv_ack ||
239                                 tcp_mask->hdr.data_off ||
240                                 tcp_mask->hdr.rx_win ||
241                                 tcp_mask->hdr.cksum ||
242                                 tcp_mask->hdr.tcp_urp) {
243                                 memset(filter, 0,
244                                         sizeof(struct rte_eth_ntuple_filter));
245                                 rte_flow_error_set(error, EINVAL,
246                                         RTE_FLOW_ERROR_TYPE_ITEM,
247                                         item, "Not supported by ntuple filter");
248                                 return -rte_errno;
249                         }
250
251                         filter->dst_port_mask  = tcp_mask->hdr.dst_port;
252                         filter->src_port_mask  = tcp_mask->hdr.src_port;
253                         if (tcp_mask->hdr.tcp_flags == 0xFF) {
254                                 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
255                         } else if (!tcp_mask->hdr.tcp_flags) {
256                                 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
257                         } else {
258                                 memset(filter, 0,
259                                         sizeof(struct rte_eth_ntuple_filter));
260                                 rte_flow_error_set(error, EINVAL,
261                                         RTE_FLOW_ERROR_TYPE_ITEM,
262                                         item, "Not supported by ntuple filter");
263                                 return -rte_errno;
264                         }
265
266                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
267                         filter->dst_port  = tcp_spec->hdr.dst_port;
268                         filter->src_port  = tcp_spec->hdr.src_port;
269                         filter->tcp_flags = tcp_spec->hdr.tcp_flags;
270                 }
271         } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
272                 if (item->spec && item->mask) {
273                         udp_mask = (const struct rte_flow_item_udp *)item->mask;
274
275                         /**
276                          * Only support src & dst ports,
277                          * others should be masked.
278                          */
279                         if (udp_mask->hdr.dgram_len ||
280                             udp_mask->hdr.dgram_cksum) {
281                                 memset(filter, 0,
282                                         sizeof(struct rte_eth_ntuple_filter));
283                                 rte_flow_error_set(error, EINVAL,
284                                         RTE_FLOW_ERROR_TYPE_ITEM,
285                                         item, "Not supported by ntuple filter");
286                                 return -rte_errno;
287                         }
288
289                         filter->dst_port_mask = udp_mask->hdr.dst_port;
290                         filter->src_port_mask = udp_mask->hdr.src_port;
291
292                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
293                         filter->dst_port = udp_spec->hdr.dst_port;
294                         filter->src_port = udp_spec->hdr.src_port;
295                 }
296         } else {
297                 if (item->spec && item->mask) {
298                         sctp_mask = (const struct rte_flow_item_sctp *)
299                                         item->mask;
300
301                         /**
302                          * Only support src & dst ports,
303                          * others should be masked.
304                          */
305                         if (sctp_mask->hdr.tag ||
306                             sctp_mask->hdr.cksum) {
307                                 memset(filter, 0,
308                                         sizeof(struct rte_eth_ntuple_filter));
309                                 rte_flow_error_set(error, EINVAL,
310                                         RTE_FLOW_ERROR_TYPE_ITEM,
311                                         item, "Not supported by ntuple filter");
312                                 return -rte_errno;
313                         }
314
315                         filter->dst_port_mask = sctp_mask->hdr.dst_port;
316                         filter->src_port_mask = sctp_mask->hdr.src_port;
317
318                         sctp_spec = (const struct rte_flow_item_sctp *)
319                                         item->spec;
320                         filter->dst_port = sctp_spec->hdr.dst_port;
321                         filter->src_port = sctp_spec->hdr.src_port;
322                 }
323         }
324         /* check if the next not void item is END */
325         index++;
326         NEXT_ITEM_OF_PATTERN(item, pattern, index);
327         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
328                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
329                 rte_flow_error_set(error, EINVAL,
330                         RTE_FLOW_ERROR_TYPE_ITEM,
331                         item, "Not supported by ntuple filter");
332                 return -rte_errno;
333         }
334
335         /* parse action */
336         index = 0;
337
338         /**
339          * n-tuple only supports forwarding,
340          * check if the first not void action is QUEUE.
341          */
342         NEXT_ITEM_OF_ACTION(act, actions, index);
343         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
344                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
345                 rte_flow_error_set(error, EINVAL,
346                         RTE_FLOW_ERROR_TYPE_ACTION,
347                         item, "Not supported action.");
348                 return -rte_errno;
349         }
350         filter->queue =
351                 ((const struct rte_flow_action_queue *)act->conf)->index;
352
353         /* check if the next not void item is END */
354         index++;
355         NEXT_ITEM_OF_ACTION(act, actions, index);
356         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
357                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
358                 rte_flow_error_set(error, EINVAL,
359                         RTE_FLOW_ERROR_TYPE_ACTION,
360                         act, "Not supported action.");
361                 return -rte_errno;
362         }
363
364         /* parse attr */
365         /* must be input direction */
366         if (!attr->ingress) {
367                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
368                 rte_flow_error_set(error, EINVAL,
369                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
370                                    attr, "Only support ingress.");
371                 return -rte_errno;
372         }
373
374         /* not supported */
375         if (attr->egress) {
376                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
377                 rte_flow_error_set(error, EINVAL,
378                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
379                                    attr, "Not support egress.");
380                 return -rte_errno;
381         }
382
383         if (attr->priority > 0xFFFF) {
384                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
385                 rte_flow_error_set(error, EINVAL,
386                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
387                                    attr, "Error priority.");
388                 return -rte_errno;
389         }
390         filter->priority = (uint16_t)attr->priority;
391
392         return 0;
393 }
394
395 /* a specific function for igb because the flags is specific */
396 static int
397 igb_parse_ntuple_filter(struct rte_eth_dev *dev,
398                           const struct rte_flow_attr *attr,
399                           const struct rte_flow_item pattern[],
400                           const struct rte_flow_action actions[],
401                           struct rte_eth_ntuple_filter *filter,
402                           struct rte_flow_error *error)
403 {
404         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
405         int ret;
406
407         MAC_TYPE_FILTER_SUP(hw->mac.type);
408
409         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
410
411         if (ret)
412                 return ret;
413
414         /* Igb doesn't support many priorities. */
415         if (filter->priority > E1000_2TUPLE_MAX_PRI) {
416                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
417                 rte_flow_error_set(error, EINVAL,
418                         RTE_FLOW_ERROR_TYPE_ITEM,
419                         NULL, "Priority not supported by ntuple filter");
420                 return -rte_errno;
421         }
422
423         if (hw->mac.type == e1000_82576) {
424                 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) {
425                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
426                         rte_flow_error_set(error, EINVAL,
427                                 RTE_FLOW_ERROR_TYPE_ITEM,
428                                 NULL, "queue number not "
429                                 "supported by ntuple filter");
430                         return -rte_errno;
431                 }
432                 filter->flags |= RTE_5TUPLE_FLAGS;
433         } else {
434                 if (filter->src_ip_mask || filter->dst_ip_mask ||
435                         filter->src_port_mask) {
436                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
437                         rte_flow_error_set(error, EINVAL,
438                                 RTE_FLOW_ERROR_TYPE_ITEM,
439                                 NULL, "only two tuple are "
440                                 "supported by this filter");
441                         return -rte_errno;
442                 }
443                 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
444                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
445                         rte_flow_error_set(error, EINVAL,
446                                 RTE_FLOW_ERROR_TYPE_ITEM,
447                                 NULL, "queue number not "
448                                 "supported by ntuple filter");
449                         return -rte_errno;
450                 }
451                 filter->flags |= RTE_2TUPLE_FLAGS;
452         }
453
454         return 0;
455 }
456
457 /**
458  * Parse the rule to see if it is a ethertype rule.
459  * And get the ethertype filter info BTW.
460  * pattern:
461  * The first not void item can be ETH.
462  * The next not void item must be END.
463  * action:
464  * The first not void action should be QUEUE.
465  * The next not void action should be END.
466  * pattern example:
467  * ITEM         Spec                    Mask
468  * ETH          type    0x0807          0xFFFF
469  * END
470  * other members in mask and spec should set to 0x00.
471  * item->last should be NULL.
472  */
473 static int
474 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
475                             const struct rte_flow_item *pattern,
476                             const struct rte_flow_action *actions,
477                             struct rte_eth_ethertype_filter *filter,
478                             struct rte_flow_error *error)
479 {
480         const struct rte_flow_item *item;
481         const struct rte_flow_action *act;
482         const struct rte_flow_item_eth *eth_spec;
483         const struct rte_flow_item_eth *eth_mask;
484         const struct rte_flow_action_queue *act_q;
485         uint32_t index;
486
487         if (!pattern) {
488                 rte_flow_error_set(error, EINVAL,
489                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
490                                 NULL, "NULL pattern.");
491                 return -rte_errno;
492         }
493
494         if (!actions) {
495                 rte_flow_error_set(error, EINVAL,
496                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
497                                 NULL, "NULL action.");
498                 return -rte_errno;
499         }
500
501         if (!attr) {
502                 rte_flow_error_set(error, EINVAL,
503                                    RTE_FLOW_ERROR_TYPE_ATTR,
504                                    NULL, "NULL attribute.");
505                 return -rte_errno;
506         }
507
508         /* Parse pattern */
509         index = 0;
510
511         /* The first non-void item should be MAC. */
512         NEXT_ITEM_OF_PATTERN(item, pattern, index);
513         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
514                 rte_flow_error_set(error, EINVAL,
515                         RTE_FLOW_ERROR_TYPE_ITEM,
516                         item, "Not supported by ethertype filter");
517                 return -rte_errno;
518         }
519
520         /*Not supported last point for range*/
521         if (item->last) {
522                 rte_flow_error_set(error, EINVAL,
523                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
524                         item, "Not supported last point for range");
525                 return -rte_errno;
526         }
527
528         /* Get the MAC info. */
529         if (!item->spec || !item->mask) {
530                 rte_flow_error_set(error, EINVAL,
531                                 RTE_FLOW_ERROR_TYPE_ITEM,
532                                 item, "Not supported by ethertype filter");
533                 return -rte_errno;
534         }
535
536         eth_spec = (const struct rte_flow_item_eth *)item->spec;
537         eth_mask = (const struct rte_flow_item_eth *)item->mask;
538
539         /* Mask bits of source MAC address must be full of 0.
540          * Mask bits of destination MAC address must be full
541          * of 1 or full of 0.
542          */
543         if (!is_zero_ether_addr(&eth_mask->src) ||
544             (!is_zero_ether_addr(&eth_mask->dst) &&
545              !is_broadcast_ether_addr(&eth_mask->dst))) {
546                 rte_flow_error_set(error, EINVAL,
547                                 RTE_FLOW_ERROR_TYPE_ITEM,
548                                 item, "Invalid ether address mask");
549                 return -rte_errno;
550         }
551
552         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
553                 rte_flow_error_set(error, EINVAL,
554                                 RTE_FLOW_ERROR_TYPE_ITEM,
555                                 item, "Invalid ethertype mask");
556                 return -rte_errno;
557         }
558
559         /* If mask bits of destination MAC address
560          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
561          */
562         if (is_broadcast_ether_addr(&eth_mask->dst)) {
563                 filter->mac_addr = eth_spec->dst;
564                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
565         } else {
566                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
567         }
568         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
569
570         /* Check if the next non-void item is END. */
571         index++;
572         NEXT_ITEM_OF_PATTERN(item, pattern, index);
573         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
574                 rte_flow_error_set(error, EINVAL,
575                                 RTE_FLOW_ERROR_TYPE_ITEM,
576                                 item, "Not supported by ethertype filter.");
577                 return -rte_errno;
578         }
579
580         /* Parse action */
581
582         index = 0;
583         /* Check if the first non-void action is QUEUE or DROP. */
584         NEXT_ITEM_OF_ACTION(act, actions, index);
585         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
586             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
587                 rte_flow_error_set(error, EINVAL,
588                                 RTE_FLOW_ERROR_TYPE_ACTION,
589                                 act, "Not supported action.");
590                 return -rte_errno;
591         }
592
593         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
594                 act_q = (const struct rte_flow_action_queue *)act->conf;
595                 filter->queue = act_q->index;
596         } else {
597                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
598         }
599
600         /* Check if the next non-void item is END */
601         index++;
602         NEXT_ITEM_OF_ACTION(act, actions, index);
603         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
604                 rte_flow_error_set(error, EINVAL,
605                                 RTE_FLOW_ERROR_TYPE_ACTION,
606                                 act, "Not supported action.");
607                 return -rte_errno;
608         }
609
610         /* Parse attr */
611         /* Must be input direction */
612         if (!attr->ingress) {
613                 rte_flow_error_set(error, EINVAL,
614                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
615                                 attr, "Only support ingress.");
616                 return -rte_errno;
617         }
618
619         /* Not supported */
620         if (attr->egress) {
621                 rte_flow_error_set(error, EINVAL,
622                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
623                                 attr, "Not support egress.");
624                 return -rte_errno;
625         }
626
627         /* Not supported */
628         if (attr->priority) {
629                 rte_flow_error_set(error, EINVAL,
630                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
631                                 attr, "Not support priority.");
632                 return -rte_errno;
633         }
634
635         /* Not supported */
636         if (attr->group) {
637                 rte_flow_error_set(error, EINVAL,
638                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
639                                 attr, "Not support group.");
640                 return -rte_errno;
641         }
642
643         return 0;
644 }
645
646 static int
647 igb_parse_ethertype_filter(struct rte_eth_dev *dev,
648                                  const struct rte_flow_attr *attr,
649                              const struct rte_flow_item pattern[],
650                              const struct rte_flow_action actions[],
651                              struct rte_eth_ethertype_filter *filter,
652                              struct rte_flow_error *error)
653 {
654         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
655         int ret;
656
657         MAC_TYPE_FILTER_SUP(hw->mac.type);
658
659         ret = cons_parse_ethertype_filter(attr, pattern,
660                                         actions, filter, error);
661
662         if (ret)
663                 return ret;
664
665         if (hw->mac.type == e1000_82576) {
666                 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) {
667                         memset(filter, 0, sizeof(
668                                         struct rte_eth_ethertype_filter));
669                         rte_flow_error_set(error, EINVAL,
670                                 RTE_FLOW_ERROR_TYPE_ITEM,
671                                 NULL, "queue number not supported "
672                                         "by ethertype filter");
673                         return -rte_errno;
674                 }
675         } else {
676                 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
677                         memset(filter, 0, sizeof(
678                                         struct rte_eth_ethertype_filter));
679                         rte_flow_error_set(error, EINVAL,
680                                 RTE_FLOW_ERROR_TYPE_ITEM,
681                                 NULL, "queue number not supported "
682                                         "by ethertype filter");
683                         return -rte_errno;
684                 }
685         }
686
687         if (filter->ether_type == ETHER_TYPE_IPv4 ||
688                 filter->ether_type == ETHER_TYPE_IPv6) {
689                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
690                 rte_flow_error_set(error, EINVAL,
691                         RTE_FLOW_ERROR_TYPE_ITEM,
692                         NULL, "IPv4/IPv6 not supported by ethertype filter");
693                 return -rte_errno;
694         }
695
696         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
697                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
698                 rte_flow_error_set(error, EINVAL,
699                         RTE_FLOW_ERROR_TYPE_ITEM,
700                         NULL, "mac compare is unsupported");
701                 return -rte_errno;
702         }
703
704         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
705                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
706                 rte_flow_error_set(error, EINVAL,
707                         RTE_FLOW_ERROR_TYPE_ITEM,
708                         NULL, "drop option is unsupported");
709                 return -rte_errno;
710         }
711
712         return 0;
713 }
714
715 /**
716  * Parse the rule to see if it is a TCP SYN rule.
717  * And get the TCP SYN filter info BTW.
718  * pattern:
719  * The first not void item must be ETH.
720  * The second not void item must be IPV4 or IPV6.
721  * The third not void item must be TCP.
722  * The next not void item must be END.
723  * action:
724  * The first not void action should be QUEUE.
725  * The next not void action should be END.
726  * pattern example:
727  * ITEM         Spec                    Mask
728  * ETH          NULL                    NULL
729  * IPV4/IPV6    NULL                    NULL
730  * TCP          tcp_flags       0x02    0xFF
731  * END
732  * other members in mask and spec should set to 0x00.
733  * item->last should be NULL.
734  */
735 static int
736 cons_parse_syn_filter(const struct rte_flow_attr *attr,
737                                 const struct rte_flow_item pattern[],
738                                 const struct rte_flow_action actions[],
739                                 struct rte_eth_syn_filter *filter,
740                                 struct rte_flow_error *error)
741 {
742         const struct rte_flow_item *item;
743         const struct rte_flow_action *act;
744         const struct rte_flow_item_tcp *tcp_spec;
745         const struct rte_flow_item_tcp *tcp_mask;
746         const struct rte_flow_action_queue *act_q;
747         uint32_t index;
748
749         if (!pattern) {
750                 rte_flow_error_set(error, EINVAL,
751                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
752                                 NULL, "NULL pattern.");
753                 return -rte_errno;
754         }
755
756         if (!actions) {
757                 rte_flow_error_set(error, EINVAL,
758                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
759                                 NULL, "NULL action.");
760                 return -rte_errno;
761         }
762
763         if (!attr) {
764                 rte_flow_error_set(error, EINVAL,
765                                    RTE_FLOW_ERROR_TYPE_ATTR,
766                                    NULL, "NULL attribute.");
767                 return -rte_errno;
768         }
769
770         /* parse pattern */
771         index = 0;
772
773         /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
774         NEXT_ITEM_OF_PATTERN(item, pattern, index);
775         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
776             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
777             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
778             item->type != RTE_FLOW_ITEM_TYPE_TCP) {
779                 rte_flow_error_set(error, EINVAL,
780                                 RTE_FLOW_ERROR_TYPE_ITEM,
781                                 item, "Not supported by syn filter");
782                 return -rte_errno;
783         }
784                 /*Not supported last point for range*/
785         if (item->last) {
786                 rte_flow_error_set(error, EINVAL,
787                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
788                         item, "Not supported last point for range");
789                 return -rte_errno;
790         }
791
792         /* Skip Ethernet */
793         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
794                 /* if the item is MAC, the content should be NULL */
795                 if (item->spec || item->mask) {
796                         rte_flow_error_set(error, EINVAL,
797                                 RTE_FLOW_ERROR_TYPE_ITEM,
798                                 item, "Invalid SYN address mask");
799                         return -rte_errno;
800                 }
801
802                 /* check if the next not void item is IPv4 or IPv6 */
803                 index++;
804                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
805                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
806                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
807                         rte_flow_error_set(error, EINVAL,
808                                 RTE_FLOW_ERROR_TYPE_ITEM,
809                                 item, "Not supported by syn filter");
810                         return -rte_errno;
811                 }
812         }
813
814         /* Skip IP */
815         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
816             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
817                 /* if the item is IP, the content should be NULL */
818                 if (item->spec || item->mask) {
819                         rte_flow_error_set(error, EINVAL,
820                                 RTE_FLOW_ERROR_TYPE_ITEM,
821                                 item, "Invalid SYN mask");
822                         return -rte_errno;
823                 }
824
825                 /* check if the next not void item is TCP */
826                 index++;
827                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
828                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
829                         rte_flow_error_set(error, EINVAL,
830                                 RTE_FLOW_ERROR_TYPE_ITEM,
831                                 item, "Not supported by syn filter");
832                         return -rte_errno;
833                 }
834         }
835
836         /* Get the TCP info. Only support SYN. */
837         if (!item->spec || !item->mask) {
838                 rte_flow_error_set(error, EINVAL,
839                                 RTE_FLOW_ERROR_TYPE_ITEM,
840                                 item, "Invalid SYN mask");
841                 return -rte_errno;
842         }
843         /*Not supported last point for range*/
844         if (item->last) {
845                 rte_flow_error_set(error, EINVAL,
846                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
847                         item, "Not supported last point for range");
848                 return -rte_errno;
849         }
850
851         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
852         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
853         if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
854             tcp_mask->hdr.src_port ||
855             tcp_mask->hdr.dst_port ||
856             tcp_mask->hdr.sent_seq ||
857             tcp_mask->hdr.recv_ack ||
858             tcp_mask->hdr.data_off ||
859             tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
860             tcp_mask->hdr.rx_win ||
861             tcp_mask->hdr.cksum ||
862             tcp_mask->hdr.tcp_urp) {
863                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
864                 rte_flow_error_set(error, EINVAL,
865                                 RTE_FLOW_ERROR_TYPE_ITEM,
866                                 item, "Not supported by syn filter");
867                 return -rte_errno;
868         }
869
870         /* check if the next not void item is END */
871         index++;
872         NEXT_ITEM_OF_PATTERN(item, pattern, index);
873         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
874                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
875                 rte_flow_error_set(error, EINVAL,
876                                 RTE_FLOW_ERROR_TYPE_ITEM,
877                                 item, "Not supported by syn filter");
878                 return -rte_errno;
879         }
880
881         /* parse action */
882         index = 0;
883
884         /* check if the first not void action is QUEUE. */
885         NEXT_ITEM_OF_ACTION(act, actions, index);
886         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
887                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
888                 rte_flow_error_set(error, EINVAL,
889                                 RTE_FLOW_ERROR_TYPE_ACTION,
890                                 act, "Not supported action.");
891                 return -rte_errno;
892         }
893
894         act_q = (const struct rte_flow_action_queue *)act->conf;
895         filter->queue = act_q->index;
896
897         /* check if the next not void item is END */
898         index++;
899         NEXT_ITEM_OF_ACTION(act, actions, index);
900         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
901                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
902                 rte_flow_error_set(error, EINVAL,
903                                 RTE_FLOW_ERROR_TYPE_ACTION,
904                                 act, "Not supported action.");
905                 return -rte_errno;
906         }
907
908         /* parse attr */
909         /* must be input direction */
910         if (!attr->ingress) {
911                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
912                 rte_flow_error_set(error, EINVAL,
913                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
914                         attr, "Only support ingress.");
915                 return -rte_errno;
916         }
917
918         /* not supported */
919         if (attr->egress) {
920                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
921                 rte_flow_error_set(error, EINVAL,
922                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
923                         attr, "Not support egress.");
924                 return -rte_errno;
925         }
926
927         /* Support 2 priorities, the lowest or highest. */
928         if (!attr->priority) {
929                 filter->hig_pri = 0;
930         } else if (attr->priority == (uint32_t)~0U) {
931                 filter->hig_pri = 1;
932         } else {
933                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
934                 rte_flow_error_set(error, EINVAL,
935                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
936                         attr, "Not support priority.");
937                 return -rte_errno;
938         }
939
940         return 0;
941 }
942
943 static int
944 igb_parse_syn_filter(struct rte_eth_dev *dev,
945                                  const struct rte_flow_attr *attr,
946                              const struct rte_flow_item pattern[],
947                              const struct rte_flow_action actions[],
948                              struct rte_eth_syn_filter *filter,
949                              struct rte_flow_error *error)
950 {
951         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
952         int ret;
953
954         MAC_TYPE_FILTER_SUP(hw->mac.type);
955
956         ret = cons_parse_syn_filter(attr, pattern,
957                                         actions, filter, error);
958
959         if (hw->mac.type == e1000_82576) {
960                 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) {
961                         memset(filter, 0, sizeof(struct rte_eth_syn_filter));
962                         rte_flow_error_set(error, EINVAL,
963                                 RTE_FLOW_ERROR_TYPE_ITEM,
964                                 NULL, "queue number not "
965                                         "supported by syn filter");
966                         return -rte_errno;
967                 }
968         } else {
969                 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
970                         memset(filter, 0, sizeof(struct rte_eth_syn_filter));
971                         rte_flow_error_set(error, EINVAL,
972                                 RTE_FLOW_ERROR_TYPE_ITEM,
973                                 NULL, "queue number not "
974                                         "supported by syn filter");
975                         return -rte_errno;
976                 }
977         }
978
979         if (ret)
980                 return ret;
981
982         return 0;
983 }
984
985 /**
986  * Parse the rule to see if it is a flex byte rule.
987  * And get the flex byte filter info BTW.
988  * pattern:
989  * The first not void item must be RAW.
990  * The second not void item can be RAW or END.
991  * The third not void item can be RAW or END.
992  * The last not void item must be END.
993  * action:
994  * The first not void action should be QUEUE.
995  * The next not void action should be END.
996  * pattern example:
997  * ITEM         Spec                    Mask
998  * RAW          relative        0               0x1
999  *                      offset  0               0xFFFFFFFF
1000  *                      pattern {0x08, 0x06}            {0xFF, 0xFF}
1001  * RAW          relative        1               0x1
1002  *                      offset  100             0xFFFFFFFF
1003  *                      pattern {0x11, 0x22, 0x33}      {0xFF, 0xFF, 0xFF}
1004  * END
1005  * other members in mask and spec should set to 0x00.
1006  * item->last should be NULL.
1007  */
1008 static int
1009 cons_parse_flex_filter(const struct rte_flow_attr *attr,
1010                                 const struct rte_flow_item pattern[],
1011                                 const struct rte_flow_action actions[],
1012                                 struct rte_eth_flex_filter *filter,
1013                                 struct rte_flow_error *error)
1014 {
1015         const struct rte_flow_item *item;
1016         const struct rte_flow_action *act;
1017         const struct rte_flow_item_raw *raw_spec;
1018         const struct rte_flow_item_raw *raw_mask;
1019         const struct rte_flow_action_queue *act_q;
1020         uint32_t index, i, offset, total_offset;
1021         uint32_t max_offset = 0;
1022         int32_t shift, j, raw_index = 0;
1023         int32_t relative[IGB_FLEX_RAW_NUM] = {0};
1024         int32_t raw_offset[IGB_FLEX_RAW_NUM] = {0};
1025
1026         if (!pattern) {
1027                 rte_flow_error_set(error, EINVAL,
1028                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1029                                 NULL, "NULL pattern.");
1030                 return -rte_errno;
1031         }
1032
1033         if (!actions) {
1034                 rte_flow_error_set(error, EINVAL,
1035                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1036                                 NULL, "NULL action.");
1037                 return -rte_errno;
1038         }
1039
1040         if (!attr) {
1041                 rte_flow_error_set(error, EINVAL,
1042                                    RTE_FLOW_ERROR_TYPE_ATTR,
1043                                    NULL, "NULL attribute.");
1044                 return -rte_errno;
1045         }
1046
1047         /* parse pattern */
1048         index = 0;
1049
1050 item_loop:
1051
1052         /* the first not void item should be RAW */
1053         NEXT_ITEM_OF_PATTERN(item, pattern, index);
1054         if (item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1055                 rte_flow_error_set(error, EINVAL,
1056                                 RTE_FLOW_ERROR_TYPE_ITEM,
1057                                 item, "Not supported by flex filter");
1058                 return -rte_errno;
1059         }
1060                 /*Not supported last point for range*/
1061         if (item->last) {
1062                 rte_flow_error_set(error, EINVAL,
1063                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1064                         item, "Not supported last point for range");
1065                 return -rte_errno;
1066         }
1067
1068         raw_spec = (const struct rte_flow_item_raw *)item->spec;
1069         raw_mask = (const struct rte_flow_item_raw *)item->mask;
1070
1071         if (!raw_mask->length ||
1072             !raw_mask->relative) {
1073                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1074                 rte_flow_error_set(error, EINVAL,
1075                                 RTE_FLOW_ERROR_TYPE_ITEM,
1076                                 item, "Not supported by flex filter");
1077                 return -rte_errno;
1078         }
1079
1080         if (raw_mask->offset)
1081                 offset = raw_spec->offset;
1082         else
1083                 offset = 0;
1084
1085         for (j = 0; j < raw_spec->length; j++) {
1086                 if (raw_mask->pattern[j] != 0xFF) {
1087                         memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1088                         rte_flow_error_set(error, EINVAL,
1089                                         RTE_FLOW_ERROR_TYPE_ITEM,
1090                                         item, "Not supported by flex filter");
1091                         return -rte_errno;
1092                 }
1093         }
1094
1095         total_offset = 0;
1096
1097         if (raw_spec->relative) {
1098                 for (j = raw_index; j > 0; j--) {
1099                         total_offset += raw_offset[j - 1];
1100                         if (!relative[j - 1])
1101                                 break;
1102                 }
1103                 if (total_offset + raw_spec->length + offset > max_offset)
1104                         max_offset = total_offset + raw_spec->length + offset;
1105         } else {
1106                 if (raw_spec->length + offset > max_offset)
1107                         max_offset = raw_spec->length + offset;
1108         }
1109
1110         if ((raw_spec->length + offset + total_offset) >
1111                         RTE_FLEX_FILTER_MAXLEN) {
1112                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1113                 rte_flow_error_set(error, EINVAL,
1114                                 RTE_FLOW_ERROR_TYPE_ITEM,
1115                                 item, "Not supported by flex filter");
1116                 return -rte_errno;
1117         }
1118
1119         if (raw_spec->relative == 0) {
1120                 for (j = 0; j < raw_spec->length; j++)
1121                         filter->bytes[offset + j] =
1122                         raw_spec->pattern[j];
1123                 j = offset / CHAR_BIT;
1124                 shift = offset % CHAR_BIT;
1125         } else {
1126                 for (j = 0; j < raw_spec->length; j++)
1127                         filter->bytes[total_offset + offset + j] =
1128                                 raw_spec->pattern[j];
1129                 j = (total_offset + offset) / CHAR_BIT;
1130                 shift = (total_offset + offset) % CHAR_BIT;
1131         }
1132
1133         i = 0;
1134
1135         for ( ; shift < CHAR_BIT; shift++) {
1136                 filter->mask[j] |= (0x80 >> shift);
1137                 i++;
1138                 if (i == raw_spec->length)
1139                         break;
1140                 if (shift == (CHAR_BIT - 1)) {
1141                         j++;
1142                         shift = -1;
1143                 }
1144         }
1145
1146         relative[raw_index] = raw_spec->relative;
1147         raw_offset[raw_index] = offset + raw_spec->length;
1148         raw_index++;
1149
1150         /* check if the next not void item is RAW */
1151         index++;
1152         NEXT_ITEM_OF_PATTERN(item, pattern, index);
1153         if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1154                 item->type != RTE_FLOW_ITEM_TYPE_END) {
1155                 rte_flow_error_set(error, EINVAL,
1156                                 RTE_FLOW_ERROR_TYPE_ITEM,
1157                                 item, "Not supported by flex filter");
1158                 return -rte_errno;
1159         }
1160
1161         /* go back to parser */
1162         if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
1163                 /* if the item is RAW, the content should be parse */
1164                 goto item_loop;
1165         }
1166
1167         filter->len = RTE_ALIGN(max_offset, 8);
1168
1169         /* parse action */
1170         index = 0;
1171
1172         /* check if the first not void action is QUEUE. */
1173         NEXT_ITEM_OF_ACTION(act, actions, index);
1174         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1175                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1176                 rte_flow_error_set(error, EINVAL,
1177                                 RTE_FLOW_ERROR_TYPE_ACTION,
1178                                 act, "Not supported action.");
1179                 return -rte_errno;
1180         }
1181
1182         act_q = (const struct rte_flow_action_queue *)act->conf;
1183         filter->queue = act_q->index;
1184
1185         /* check if the next not void item is END */
1186         index++;
1187         NEXT_ITEM_OF_ACTION(act, actions, index);
1188         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1189                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1190                 rte_flow_error_set(error, EINVAL,
1191                                 RTE_FLOW_ERROR_TYPE_ACTION,
1192                                 act, "Not supported action.");
1193                 return -rte_errno;
1194         }
1195
1196         /* parse attr */
1197         /* must be input direction */
1198         if (!attr->ingress) {
1199                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1200                 rte_flow_error_set(error, EINVAL,
1201                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1202                         attr, "Only support ingress.");
1203                 return -rte_errno;
1204         }
1205
1206         /* not supported */
1207         if (attr->egress) {
1208                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1209                 rte_flow_error_set(error, EINVAL,
1210                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1211                         attr, "Not support egress.");
1212                 return -rte_errno;
1213         }
1214
1215         if (attr->priority > 0xFFFF) {
1216                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1217                 rte_flow_error_set(error, EINVAL,
1218                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1219                                    attr, "Error priority.");
1220                 return -rte_errno;
1221         }
1222
1223         filter->priority = (uint16_t)attr->priority;
1224
1225         return 0;
1226 }
1227
1228 static int
1229 igb_parse_flex_filter(struct rte_eth_dev *dev,
1230                                  const struct rte_flow_attr *attr,
1231                              const struct rte_flow_item pattern[],
1232                              const struct rte_flow_action actions[],
1233                              struct rte_eth_flex_filter *filter,
1234                              struct rte_flow_error *error)
1235 {
1236         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1237         int ret;
1238
1239         MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
1240
1241         ret = cons_parse_flex_filter(attr, pattern,
1242                                         actions, filter, error);
1243
1244         if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
1245                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1246                 rte_flow_error_set(error, EINVAL,
1247                         RTE_FLOW_ERROR_TYPE_ITEM,
1248                         NULL, "queue number not supported by flex filter");
1249                 return -rte_errno;
1250         }
1251
1252         if (filter->len == 0 || filter->len > E1000_MAX_FLEX_FILTER_LEN ||
1253                 filter->len % sizeof(uint64_t) != 0) {
1254                 PMD_DRV_LOG(ERR, "filter's length is out of range");
1255                 return -EINVAL;
1256         }
1257
1258         if (filter->priority > E1000_MAX_FLEX_FILTER_PRI) {
1259                 PMD_DRV_LOG(ERR, "filter's priority is out of range");
1260                 return -EINVAL;
1261         }
1262
1263         if (ret)
1264                 return ret;
1265
1266         return 0;
1267 }
1268
1269 static int
1270 igb_parse_rss_filter(struct rte_eth_dev *dev,
1271                         const struct rte_flow_attr *attr,
1272                         const struct rte_flow_action actions[],
1273                         struct igb_rte_flow_rss_conf *rss_conf,
1274                         struct rte_flow_error *error)
1275 {
1276         const struct rte_flow_action *act;
1277         const struct rte_flow_action_rss *rss;
1278         uint16_t n, index;
1279
1280         /**
1281          * rss only supports forwarding,
1282          * check if the first not void action is RSS.
1283          */
1284         index = 0;
1285         NEXT_ITEM_OF_ACTION(act, actions, index);
1286         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
1287                 memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
1288                 rte_flow_error_set(error, EINVAL,
1289                         RTE_FLOW_ERROR_TYPE_ACTION,
1290                         act, "Not supported action.");
1291                 return -rte_errno;
1292         }
1293
1294         rss = (const struct rte_flow_action_rss *)act->conf;
1295
1296         if (!rss || !rss->num) {
1297                 rte_flow_error_set(error, EINVAL,
1298                                 RTE_FLOW_ERROR_TYPE_ACTION,
1299                                 act,
1300                            "no valid queues");
1301                 return -rte_errno;
1302         }
1303
1304         for (n = 0; n < rss->num; n++) {
1305                 if (rss->queue[n] >= dev->data->nb_rx_queues) {
1306                         rte_flow_error_set(error, EINVAL,
1307                                    RTE_FLOW_ERROR_TYPE_ACTION,
1308                                    act,
1309                                    "queue id > max number of queues");
1310                         return -rte_errno;
1311                 }
1312         }
1313
1314         if (rss->rss_conf)
1315                 rss_conf->rss_conf = *rss->rss_conf;
1316         else
1317                 rss_conf->rss_conf.rss_hf = IGB_RSS_OFFLOAD_ALL;
1318
1319         for (n = 0; n < rss->num; ++n)
1320                 rss_conf->queue[n] = rss->queue[n];
1321         rss_conf->num = rss->num;
1322
1323         /* check if the next not void item is END */
1324         index++;
1325         NEXT_ITEM_OF_ACTION(act, actions, index);
1326         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1327                 memset(rss_conf, 0, sizeof(struct rte_eth_rss_conf));
1328                 rte_flow_error_set(error, EINVAL,
1329                         RTE_FLOW_ERROR_TYPE_ACTION,
1330                         act, "Not supported action.");
1331                 return -rte_errno;
1332         }
1333
1334         /* parse attr */
1335         /* must be input direction */
1336         if (!attr->ingress) {
1337                 memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
1338                 rte_flow_error_set(error, EINVAL,
1339                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1340                                    attr, "Only support ingress.");
1341                 return -rte_errno;
1342         }
1343
1344         /* not supported */
1345         if (attr->egress) {
1346                 memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
1347                 rte_flow_error_set(error, EINVAL,
1348                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1349                                    attr, "Not support egress.");
1350                 return -rte_errno;
1351         }
1352
1353         if (attr->priority > 0xFFFF) {
1354                 memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
1355                 rte_flow_error_set(error, EINVAL,
1356                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1357                                    attr, "Error priority.");
1358                 return -rte_errno;
1359         }
1360
1361         return 0;
1362 }
1363
1364 /**
1365  * Create a flow rule.
1366  * Theorically one rule can match more than one filters.
1367  * We will let it use the filter which it hitt first.
1368  * So, the sequence matters.
1369  */
1370 static struct rte_flow *
1371 igb_flow_create(struct rte_eth_dev *dev,
1372                   const struct rte_flow_attr *attr,
1373                   const struct rte_flow_item pattern[],
1374                   const struct rte_flow_action actions[],
1375                   struct rte_flow_error *error)
1376 {
1377         int ret;
1378         struct rte_eth_ntuple_filter ntuple_filter;
1379         struct rte_eth_ethertype_filter ethertype_filter;
1380         struct rte_eth_syn_filter syn_filter;
1381         struct rte_eth_flex_filter flex_filter;
1382         struct igb_rte_flow_rss_conf rss_conf;
1383         struct rte_flow *flow = NULL;
1384         struct igb_ntuple_filter_ele *ntuple_filter_ptr;
1385         struct igb_ethertype_filter_ele *ethertype_filter_ptr;
1386         struct igb_eth_syn_filter_ele *syn_filter_ptr;
1387         struct igb_flex_filter_ele *flex_filter_ptr;
1388         struct igb_rss_conf_ele *rss_filter_ptr;
1389         struct igb_flow_mem *igb_flow_mem_ptr;
1390
1391         flow = rte_zmalloc("igb_rte_flow", sizeof(struct rte_flow), 0);
1392         if (!flow) {
1393                 PMD_DRV_LOG(ERR, "failed to allocate memory");
1394                 return (struct rte_flow *)flow;
1395         }
1396         igb_flow_mem_ptr = rte_zmalloc("igb_flow_mem",
1397                         sizeof(struct igb_flow_mem), 0);
1398         if (!igb_flow_mem_ptr) {
1399                 PMD_DRV_LOG(ERR, "failed to allocate memory");
1400                 rte_free(flow);
1401                 return NULL;
1402         }
1403         igb_flow_mem_ptr->flow = flow;
1404         igb_flow_mem_ptr->dev = dev;
1405         TAILQ_INSERT_TAIL(&igb_flow_list,
1406                                 igb_flow_mem_ptr, entries);
1407
1408         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
1409         ret = igb_parse_ntuple_filter(dev, attr, pattern,
1410                         actions, &ntuple_filter, error);
1411         if (!ret) {
1412                 ret = igb_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
1413                 if (!ret) {
1414                         ntuple_filter_ptr = rte_zmalloc("igb_ntuple_filter",
1415                                 sizeof(struct igb_ntuple_filter_ele), 0);
1416                         if (!ntuple_filter_ptr) {
1417                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
1418                                 goto out;
1419                         }
1420
1421                         rte_memcpy(&ntuple_filter_ptr->filter_info,
1422                                 &ntuple_filter,
1423                                 sizeof(struct rte_eth_ntuple_filter));
1424                         TAILQ_INSERT_TAIL(&igb_filter_ntuple_list,
1425                                 ntuple_filter_ptr, entries);
1426                         flow->rule = ntuple_filter_ptr;
1427                         flow->filter_type = RTE_ETH_FILTER_NTUPLE;
1428                         return flow;
1429                 }
1430                 goto out;
1431         }
1432
1433         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
1434         ret = igb_parse_ethertype_filter(dev, attr, pattern,
1435                                 actions, &ethertype_filter, error);
1436         if (!ret) {
1437                 ret = igb_add_del_ethertype_filter(dev,
1438                                 &ethertype_filter, TRUE);
1439                 if (!ret) {
1440                         ethertype_filter_ptr = rte_zmalloc(
1441                                 "igb_ethertype_filter",
1442                                 sizeof(struct igb_ethertype_filter_ele), 0);
1443                         if (!ethertype_filter_ptr) {
1444                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
1445                                 goto out;
1446                         }
1447
1448                         rte_memcpy(&ethertype_filter_ptr->filter_info,
1449                                 &ethertype_filter,
1450                                 sizeof(struct rte_eth_ethertype_filter));
1451                         TAILQ_INSERT_TAIL(&igb_filter_ethertype_list,
1452                                 ethertype_filter_ptr, entries);
1453                         flow->rule = ethertype_filter_ptr;
1454                         flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
1455                         return flow;
1456                 }
1457                 goto out;
1458         }
1459
1460         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
1461         ret = igb_parse_syn_filter(dev, attr, pattern,
1462                                 actions, &syn_filter, error);
1463         if (!ret) {
1464                 ret = eth_igb_syn_filter_set(dev, &syn_filter, TRUE);
1465                 if (!ret) {
1466                         syn_filter_ptr = rte_zmalloc("igb_syn_filter",
1467                                 sizeof(struct igb_eth_syn_filter_ele), 0);
1468                         if (!syn_filter_ptr) {
1469                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
1470                                 goto out;
1471                         }
1472
1473                         rte_memcpy(&syn_filter_ptr->filter_info,
1474                                 &syn_filter,
1475                                 sizeof(struct rte_eth_syn_filter));
1476                         TAILQ_INSERT_TAIL(&igb_filter_syn_list,
1477                                 syn_filter_ptr,
1478                                 entries);
1479                         flow->rule = syn_filter_ptr;
1480                         flow->filter_type = RTE_ETH_FILTER_SYN;
1481                         return flow;
1482                 }
1483                 goto out;
1484         }
1485
1486         memset(&flex_filter, 0, sizeof(struct rte_eth_flex_filter));
1487         ret = igb_parse_flex_filter(dev, attr, pattern,
1488                                         actions, &flex_filter, error);
1489         if (!ret) {
1490                 ret = eth_igb_add_del_flex_filter(dev, &flex_filter, TRUE);
1491                 if (!ret) {
1492                         flex_filter_ptr = rte_zmalloc("igb_flex_filter",
1493                                 sizeof(struct igb_flex_filter_ele), 0);
1494                         if (!flex_filter_ptr) {
1495                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
1496                                 goto out;
1497                         }
1498
1499                         rte_memcpy(&flex_filter_ptr->filter_info,
1500                                 &flex_filter,
1501                                 sizeof(struct rte_eth_flex_filter));
1502                         TAILQ_INSERT_TAIL(&igb_filter_flex_list,
1503                                 flex_filter_ptr, entries);
1504                         flow->rule = flex_filter_ptr;
1505                         flow->filter_type = RTE_ETH_FILTER_FLEXIBLE;
1506                         return flow;
1507                 }
1508         }
1509
1510         memset(&rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
1511         ret = igb_parse_rss_filter(dev, attr,
1512                                         actions, &rss_conf, error);
1513         if (!ret) {
1514                 ret = igb_config_rss_filter(dev, &rss_conf, TRUE);
1515                 if (!ret) {
1516                         rss_filter_ptr = rte_zmalloc("igb_rss_filter",
1517                                 sizeof(struct igb_rss_conf_ele), 0);
1518                         if (!rss_filter_ptr) {
1519                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
1520                                 goto out;
1521                         }
1522                         rte_memcpy(&rss_filter_ptr->filter_info,
1523                                 &rss_conf,
1524                                 sizeof(struct igb_rte_flow_rss_conf));
1525                         TAILQ_INSERT_TAIL(&igb_filter_rss_list,
1526                                 rss_filter_ptr, entries);
1527                         flow->rule = rss_filter_ptr;
1528                         flow->filter_type = RTE_ETH_FILTER_HASH;
1529                         return flow;
1530                 }
1531         }
1532
1533 out:
1534         TAILQ_REMOVE(&igb_flow_list,
1535                 igb_flow_mem_ptr, entries);
1536         rte_flow_error_set(error, -ret,
1537                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1538                            "Failed to create flow.");
1539         rte_free(igb_flow_mem_ptr);
1540         rte_free(flow);
1541         return NULL;
1542 }
1543
1544 /**
1545  * Check if the flow rule is supported by igb.
1546  * It only checkes the format. Don't guarantee the rule can be programmed into
1547  * the HW. Because there can be no enough room for the rule.
1548  */
1549 static int
1550 igb_flow_validate(__rte_unused struct rte_eth_dev *dev,
1551                 const struct rte_flow_attr *attr,
1552                 const struct rte_flow_item pattern[],
1553                 const struct rte_flow_action actions[],
1554                 struct rte_flow_error *error)
1555 {
1556         struct rte_eth_ntuple_filter ntuple_filter;
1557         struct rte_eth_ethertype_filter ethertype_filter;
1558         struct rte_eth_syn_filter syn_filter;
1559         struct rte_eth_flex_filter flex_filter;
1560         struct igb_rte_flow_rss_conf rss_conf;
1561         int ret;
1562
1563         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
1564         ret = igb_parse_ntuple_filter(dev, attr, pattern,
1565                                 actions, &ntuple_filter, error);
1566         if (!ret)
1567                 return 0;
1568
1569         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
1570         ret = igb_parse_ethertype_filter(dev, attr, pattern,
1571                                 actions, &ethertype_filter, error);
1572         if (!ret)
1573                 return 0;
1574
1575         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
1576         ret = igb_parse_syn_filter(dev, attr, pattern,
1577                                 actions, &syn_filter, error);
1578         if (!ret)
1579                 return 0;
1580
1581         memset(&flex_filter, 0, sizeof(struct rte_eth_flex_filter));
1582         ret = igb_parse_flex_filter(dev, attr, pattern,
1583                                 actions, &flex_filter, error);
1584         if (!ret)
1585                 return 0;
1586
1587         memset(&rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
1588         ret = igb_parse_rss_filter(dev, attr,
1589                                         actions, &rss_conf, error);
1590
1591         return ret;
1592 }
1593
1594 /* Destroy a flow rule on igb. */
1595 static int
1596 igb_flow_destroy(struct rte_eth_dev *dev,
1597                 struct rte_flow *flow,
1598                 struct rte_flow_error *error)
1599 {
1600         int ret;
1601         struct rte_flow *pmd_flow = flow;
1602         enum rte_filter_type filter_type = pmd_flow->filter_type;
1603         struct igb_ntuple_filter_ele *ntuple_filter_ptr;
1604         struct igb_ethertype_filter_ele *ethertype_filter_ptr;
1605         struct igb_eth_syn_filter_ele *syn_filter_ptr;
1606         struct igb_flex_filter_ele *flex_filter_ptr;
1607         struct igb_flow_mem *igb_flow_mem_ptr;
1608         struct igb_rss_conf_ele *rss_filter_ptr;
1609
1610         switch (filter_type) {
1611         case RTE_ETH_FILTER_NTUPLE:
1612                 ntuple_filter_ptr = (struct igb_ntuple_filter_ele *)
1613                                         pmd_flow->rule;
1614                 ret = igb_add_del_ntuple_filter(dev,
1615                                 &ntuple_filter_ptr->filter_info, FALSE);
1616                 if (!ret) {
1617                         TAILQ_REMOVE(&igb_filter_ntuple_list,
1618                         ntuple_filter_ptr, entries);
1619                         rte_free(ntuple_filter_ptr);
1620                 }
1621                 break;
1622         case RTE_ETH_FILTER_ETHERTYPE:
1623                 ethertype_filter_ptr = (struct igb_ethertype_filter_ele *)
1624                                         pmd_flow->rule;
1625                 ret = igb_add_del_ethertype_filter(dev,
1626                                 &ethertype_filter_ptr->filter_info, FALSE);
1627                 if (!ret) {
1628                         TAILQ_REMOVE(&igb_filter_ethertype_list,
1629                                 ethertype_filter_ptr, entries);
1630                         rte_free(ethertype_filter_ptr);
1631                 }
1632                 break;
1633         case RTE_ETH_FILTER_SYN:
1634                 syn_filter_ptr = (struct igb_eth_syn_filter_ele *)
1635                                 pmd_flow->rule;
1636                 ret = eth_igb_syn_filter_set(dev,
1637                                 &syn_filter_ptr->filter_info, FALSE);
1638                 if (!ret) {
1639                         TAILQ_REMOVE(&igb_filter_syn_list,
1640                                 syn_filter_ptr, entries);
1641                         rte_free(syn_filter_ptr);
1642                 }
1643                 break;
1644         case RTE_ETH_FILTER_FLEXIBLE:
1645                 flex_filter_ptr = (struct igb_flex_filter_ele *)
1646                                 pmd_flow->rule;
1647                 ret = eth_igb_add_del_flex_filter(dev,
1648                                 &flex_filter_ptr->filter_info, FALSE);
1649                 if (!ret) {
1650                         TAILQ_REMOVE(&igb_filter_flex_list,
1651                                 flex_filter_ptr, entries);
1652                         rte_free(flex_filter_ptr);
1653                 }
1654                 break;
1655         case RTE_ETH_FILTER_HASH:
1656                 rss_filter_ptr = (struct igb_rss_conf_ele *)
1657                                 pmd_flow->rule;
1658                 ret = igb_config_rss_filter(dev,
1659                                         &rss_filter_ptr->filter_info, FALSE);
1660                 if (!ret) {
1661                         TAILQ_REMOVE(&igb_filter_rss_list,
1662                                 rss_filter_ptr, entries);
1663                         rte_free(rss_filter_ptr);
1664                 }
1665                 break;
1666         default:
1667                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
1668                             filter_type);
1669                 ret = -EINVAL;
1670                 break;
1671         }
1672
1673         if (ret) {
1674                 rte_flow_error_set(error, EINVAL,
1675                                 RTE_FLOW_ERROR_TYPE_HANDLE,
1676                                 NULL, "Failed to destroy flow");
1677                 return ret;
1678         }
1679
1680         TAILQ_FOREACH(igb_flow_mem_ptr, &igb_flow_list, entries) {
1681                 if (igb_flow_mem_ptr->flow == pmd_flow) {
1682                         TAILQ_REMOVE(&igb_flow_list,
1683                                 igb_flow_mem_ptr, entries);
1684                         rte_free(igb_flow_mem_ptr);
1685                 }
1686         }
1687         rte_free(flow);
1688
1689         return ret;
1690 }
1691
1692 /* remove all the n-tuple filters */
1693 static void
1694 igb_clear_all_ntuple_filter(struct rte_eth_dev *dev)
1695 {
1696         struct e1000_filter_info *filter_info =
1697                 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1698         struct e1000_5tuple_filter *p_5tuple;
1699         struct e1000_2tuple_filter *p_2tuple;
1700
1701         while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
1702                 igb_delete_5tuple_filter_82576(dev, p_5tuple);
1703
1704         while ((p_2tuple = TAILQ_FIRST(&filter_info->twotuple_list)))
1705                 igb_delete_2tuple_filter(dev, p_2tuple);
1706 }
1707
1708 /* remove all the ether type filters */
1709 static void
1710 igb_clear_all_ethertype_filter(struct rte_eth_dev *dev)
1711 {
1712         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1713         struct e1000_filter_info *filter_info =
1714                 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1715         int i;
1716
1717         for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
1718                 if (filter_info->ethertype_mask & (1 << i)) {
1719                         (void)igb_ethertype_filter_remove(filter_info,
1720                                                             (uint8_t)i);
1721                         E1000_WRITE_REG(hw, E1000_ETQF(i), 0);
1722                         E1000_WRITE_FLUSH(hw);
1723                 }
1724         }
1725 }
1726
1727 /* remove the SYN filter */
1728 static void
1729 igb_clear_syn_filter(struct rte_eth_dev *dev)
1730 {
1731         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1732         struct e1000_filter_info *filter_info =
1733                 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1734
1735         if (filter_info->syn_info & E1000_SYN_FILTER_ENABLE) {
1736                 filter_info->syn_info = 0;
1737                 E1000_WRITE_REG(hw, E1000_SYNQF(0), 0);
1738                 E1000_WRITE_FLUSH(hw);
1739         }
1740 }
1741
1742 /* remove all the flex filters */
1743 static void
1744 igb_clear_all_flex_filter(struct rte_eth_dev *dev)
1745 {
1746         struct e1000_filter_info *filter_info =
1747                 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1748         struct e1000_flex_filter *flex_filter;
1749
1750         while ((flex_filter = TAILQ_FIRST(&filter_info->flex_list)))
1751                 igb_remove_flex_filter(dev, flex_filter);
1752 }
1753
1754 /* remove the rss filter */
1755 static void
1756 igb_clear_rss_filter(struct rte_eth_dev *dev)
1757 {
1758         struct e1000_filter_info *filter =
1759                 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1760
1761         if (filter->rss_info.num)
1762                 igb_config_rss_filter(dev, &filter->rss_info, FALSE);
1763 }
1764
1765 void
1766 igb_filterlist_flush(struct rte_eth_dev *dev)
1767 {
1768         struct igb_ntuple_filter_ele *ntuple_filter_ptr;
1769         struct igb_ethertype_filter_ele *ethertype_filter_ptr;
1770         struct igb_eth_syn_filter_ele *syn_filter_ptr;
1771         struct igb_flex_filter_ele *flex_filter_ptr;
1772         struct igb_rss_conf_ele  *rss_filter_ptr;
1773         struct igb_flow_mem *igb_flow_mem_ptr;
1774         enum rte_filter_type filter_type;
1775         struct rte_flow *pmd_flow;
1776
1777         TAILQ_FOREACH(igb_flow_mem_ptr, &igb_flow_list, entries) {
1778                 if (igb_flow_mem_ptr->dev == dev) {
1779                         pmd_flow = igb_flow_mem_ptr->flow;
1780                         filter_type = pmd_flow->filter_type;
1781
1782                         switch (filter_type) {
1783                         case RTE_ETH_FILTER_NTUPLE:
1784                                 ntuple_filter_ptr =
1785                                 (struct igb_ntuple_filter_ele *)
1786                                         pmd_flow->rule;
1787                                 TAILQ_REMOVE(&igb_filter_ntuple_list,
1788                                                 ntuple_filter_ptr, entries);
1789                                 rte_free(ntuple_filter_ptr);
1790                                 break;
1791                         case RTE_ETH_FILTER_ETHERTYPE:
1792                                 ethertype_filter_ptr =
1793                                 (struct igb_ethertype_filter_ele *)
1794                                         pmd_flow->rule;
1795                                 TAILQ_REMOVE(&igb_filter_ethertype_list,
1796                                                 ethertype_filter_ptr, entries);
1797                                 rte_free(ethertype_filter_ptr);
1798                                 break;
1799                         case RTE_ETH_FILTER_SYN:
1800                                 syn_filter_ptr =
1801                                         (struct igb_eth_syn_filter_ele *)
1802                                                 pmd_flow->rule;
1803                                 TAILQ_REMOVE(&igb_filter_syn_list,
1804                                                 syn_filter_ptr, entries);
1805                                 rte_free(syn_filter_ptr);
1806                                 break;
1807                         case RTE_ETH_FILTER_FLEXIBLE:
1808                                 flex_filter_ptr =
1809                                         (struct igb_flex_filter_ele *)
1810                                                 pmd_flow->rule;
1811                                 TAILQ_REMOVE(&igb_filter_flex_list,
1812                                                 flex_filter_ptr, entries);
1813                                 rte_free(flex_filter_ptr);
1814                                 break;
1815                         case RTE_ETH_FILTER_HASH:
1816                                 rss_filter_ptr =
1817                                         (struct igb_rss_conf_ele *)
1818                                                 pmd_flow->rule;
1819                                 TAILQ_REMOVE(&igb_filter_rss_list,
1820                                                 rss_filter_ptr, entries);
1821                                 rte_free(rss_filter_ptr);
1822                                 break;
1823                         default:
1824                                 PMD_DRV_LOG(WARNING, "Filter type"
1825                                         "(%d) not supported", filter_type);
1826                                 break;
1827                         }
1828                         TAILQ_REMOVE(&igb_flow_list,
1829                                  igb_flow_mem_ptr,
1830                                  entries);
1831                         rte_free(igb_flow_mem_ptr->flow);
1832                         rte_free(igb_flow_mem_ptr);
1833                 }
1834         }
1835 }
1836
1837 /*  Destroy all flow rules associated with a port on igb. */
1838 static int
1839 igb_flow_flush(struct rte_eth_dev *dev,
1840                 __rte_unused struct rte_flow_error *error)
1841 {
1842         igb_clear_all_ntuple_filter(dev);
1843         igb_clear_all_ethertype_filter(dev);
1844         igb_clear_syn_filter(dev);
1845         igb_clear_all_flex_filter(dev);
1846         igb_clear_rss_filter(dev);
1847         igb_filterlist_flush(dev);
1848
1849         return 0;
1850 }
1851
1852 const struct rte_flow_ops igb_flow_ops = {
1853         .validate = igb_flow_validate,
1854         .create = igb_flow_create,
1855         .destroy = igb_flow_destroy,
1856         .flush = igb_flow_flush,
1857 };