ethdev: add hash function to RSS flow API action
[dpdk.git] / drivers / net / e1000 / igb_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <stdarg.h>
10
11 #include <rte_common.h>
12 #include <rte_interrupts.h>
13 #include <rte_byteorder.h>
14 #include <rte_log.h>
15 #include <rte_debug.h>
16 #include <rte_pci.h>
17 #include <rte_ether.h>
18 #include <rte_ethdev_driver.h>
19 #include <rte_ethdev_pci.h>
20 #include <rte_memory.h>
21 #include <rte_eal.h>
22 #include <rte_atomic.h>
23 #include <rte_malloc.h>
24 #include <rte_dev.h>
25 #include <rte_flow.h>
26 #include <rte_flow_driver.h>
27
28 #include "e1000_logs.h"
29 #include "base/e1000_api.h"
30 #include "e1000_ethdev.h"
31
32 #define NEXT_ITEM_OF_PATTERN(item, pattern, index)              \
33         do {                                                    \
34                 item = (pattern) + (index);                     \
35                 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) { \
36                 (index)++;                                      \
37                 item = (pattern) + (index);                     \
38                 }                                               \
39         } while (0)
40
41 #define NEXT_ITEM_OF_ACTION(act, actions, index)                \
42         do {                                                    \
43                 act = (actions) + (index);                      \
44                 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
45                 (index)++;                                      \
46                 act = (actions) + (index);                      \
47                 }                                               \
48         } while (0)
49
50 #define IGB_FLEX_RAW_NUM        12
51
52 /**
53  * Please aware there's an asumption for all the parsers.
54  * rte_flow_item is using big endian, rte_flow_attr and
55  * rte_flow_action are using CPU order.
56  * Because the pattern is used to describe the packets,
57  * normally the packets should use network order.
58  */
59
60 /**
61  * Parse the rule to see if it is a n-tuple rule.
62  * And get the n-tuple filter info BTW.
63  * pattern:
64  * The first not void item can be ETH or IPV4.
65  * The second not void item must be IPV4 if the first one is ETH.
66  * The third not void item must be UDP or TCP or SCTP
67  * The next not void item must be END.
68  * action:
69  * The first not void action should be QUEUE.
70  * The next not void action should be END.
71  * pattern example:
72  * ITEM         Spec                    Mask
73  * ETH          NULL                    NULL
74  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
75  *                      dst_addr 192.167.3.50   0xFFFFFFFF
76  *                      next_proto_id   17      0xFF
77  * UDP/TCP/     src_port        80      0xFFFF
78  * SCTP         dst_port        80      0xFFFF
79  * END
80  * other members in mask and spec should set to 0x00.
81  * item->last should be NULL.
82  */
83 static int
84 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
85                          const struct rte_flow_item pattern[],
86                          const struct rte_flow_action actions[],
87                          struct rte_eth_ntuple_filter *filter,
88                          struct rte_flow_error *error)
89 {
90         const struct rte_flow_item *item;
91         const struct rte_flow_action *act;
92         const struct rte_flow_item_ipv4 *ipv4_spec;
93         const struct rte_flow_item_ipv4 *ipv4_mask;
94         const struct rte_flow_item_tcp *tcp_spec;
95         const struct rte_flow_item_tcp *tcp_mask;
96         const struct rte_flow_item_udp *udp_spec;
97         const struct rte_flow_item_udp *udp_mask;
98         const struct rte_flow_item_sctp *sctp_spec;
99         const struct rte_flow_item_sctp *sctp_mask;
100         uint32_t index;
101
102         if (!pattern) {
103                 rte_flow_error_set(error,
104                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
105                         NULL, "NULL pattern.");
106                 return -rte_errno;
107         }
108
109         if (!actions) {
110                 rte_flow_error_set(error, EINVAL,
111                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
112                                    NULL, "NULL action.");
113                 return -rte_errno;
114         }
115         if (!attr) {
116                 rte_flow_error_set(error, EINVAL,
117                                    RTE_FLOW_ERROR_TYPE_ATTR,
118                                    NULL, "NULL attribute.");
119                 return -rte_errno;
120         }
121
122         /* parse pattern */
123         index = 0;
124
125         /* the first not void item can be MAC or IPv4 */
126         NEXT_ITEM_OF_PATTERN(item, pattern, index);
127
128         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
129             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
130                 rte_flow_error_set(error, EINVAL,
131                         RTE_FLOW_ERROR_TYPE_ITEM,
132                         item, "Not supported by ntuple filter");
133                 return -rte_errno;
134         }
135         /* Skip Ethernet */
136         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
137                 /*Not supported last point for range*/
138                 if (item->last) {
139                         rte_flow_error_set(error,
140                           EINVAL,
141                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
142                           item, "Not supported last point for range");
143                         return -rte_errno;
144                 }
145                 /* if the first item is MAC, the content should be NULL */
146                 if (item->spec || item->mask) {
147                         rte_flow_error_set(error, EINVAL,
148                                 RTE_FLOW_ERROR_TYPE_ITEM,
149                                 item, "Not supported by ntuple filter");
150                         return -rte_errno;
151                 }
152                 /* check if the next not void item is IPv4 */
153                 index++;
154                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
155                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
156                         rte_flow_error_set(error,
157                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
158                           item, "Not supported by ntuple filter");
159                         return -rte_errno;
160                 }
161         }
162
163         /* get the IPv4 info */
164         if (!item->spec || !item->mask) {
165                 rte_flow_error_set(error, EINVAL,
166                         RTE_FLOW_ERROR_TYPE_ITEM,
167                         item, "Invalid ntuple mask");
168                 return -rte_errno;
169         }
170         /* Not supported last point for range */
171         if (item->last) {
172                 rte_flow_error_set(error, EINVAL,
173                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
174                         item, "Not supported last point for range");
175                 return -rte_errno;
176         }
177
178         ipv4_mask = item->mask;
179         /**
180          * Only support src & dst addresses, protocol,
181          * others should be masked.
182          */
183
184         if (ipv4_mask->hdr.version_ihl ||
185                 ipv4_mask->hdr.type_of_service ||
186                 ipv4_mask->hdr.total_length ||
187                 ipv4_mask->hdr.packet_id ||
188                 ipv4_mask->hdr.fragment_offset ||
189                 ipv4_mask->hdr.time_to_live ||
190                 ipv4_mask->hdr.hdr_checksum) {
191                 rte_flow_error_set(error,
192                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
193                         item, "Not supported by ntuple filter");
194                 return -rte_errno;
195         }
196
197         filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
198         filter->src_ip_mask = ipv4_mask->hdr.src_addr;
199         filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
200
201         ipv4_spec = item->spec;
202         filter->dst_ip = ipv4_spec->hdr.dst_addr;
203         filter->src_ip = ipv4_spec->hdr.src_addr;
204         filter->proto  = ipv4_spec->hdr.next_proto_id;
205
206         /* check if the next not void item is TCP or UDP or SCTP */
207         index++;
208         NEXT_ITEM_OF_PATTERN(item, pattern, index);
209         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
210             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
211             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
212                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
213                 rte_flow_error_set(error, EINVAL,
214                         RTE_FLOW_ERROR_TYPE_ITEM,
215                         item, "Not supported by ntuple filter");
216                 return -rte_errno;
217         }
218
219         /* Not supported last point for range */
220         if (item->last) {
221                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
222                 rte_flow_error_set(error, EINVAL,
223                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
224                         item, "Not supported last point for range");
225                 return -rte_errno;
226         }
227
228         /* get the TCP/UDP/SCTP info */
229         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
230                 if (item->spec && item->mask) {
231                         tcp_mask = item->mask;
232
233                         /**
234                          * Only support src & dst ports, tcp flags,
235                          * others should be masked.
236                          */
237                         if (tcp_mask->hdr.sent_seq ||
238                                 tcp_mask->hdr.recv_ack ||
239                                 tcp_mask->hdr.data_off ||
240                                 tcp_mask->hdr.rx_win ||
241                                 tcp_mask->hdr.cksum ||
242                                 tcp_mask->hdr.tcp_urp) {
243                                 memset(filter, 0,
244                                         sizeof(struct rte_eth_ntuple_filter));
245                                 rte_flow_error_set(error, EINVAL,
246                                         RTE_FLOW_ERROR_TYPE_ITEM,
247                                         item, "Not supported by ntuple filter");
248                                 return -rte_errno;
249                         }
250
251                         filter->dst_port_mask  = tcp_mask->hdr.dst_port;
252                         filter->src_port_mask  = tcp_mask->hdr.src_port;
253                         if (tcp_mask->hdr.tcp_flags == 0xFF) {
254                                 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
255                         } else if (!tcp_mask->hdr.tcp_flags) {
256                                 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
257                         } else {
258                                 memset(filter, 0,
259                                         sizeof(struct rte_eth_ntuple_filter));
260                                 rte_flow_error_set(error, EINVAL,
261                                         RTE_FLOW_ERROR_TYPE_ITEM,
262                                         item, "Not supported by ntuple filter");
263                                 return -rte_errno;
264                         }
265
266                         tcp_spec = item->spec;
267                         filter->dst_port  = tcp_spec->hdr.dst_port;
268                         filter->src_port  = tcp_spec->hdr.src_port;
269                         filter->tcp_flags = tcp_spec->hdr.tcp_flags;
270                 }
271         } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
272                 if (item->spec && item->mask) {
273                         udp_mask = item->mask;
274
275                         /**
276                          * Only support src & dst ports,
277                          * others should be masked.
278                          */
279                         if (udp_mask->hdr.dgram_len ||
280                             udp_mask->hdr.dgram_cksum) {
281                                 memset(filter, 0,
282                                         sizeof(struct rte_eth_ntuple_filter));
283                                 rte_flow_error_set(error, EINVAL,
284                                         RTE_FLOW_ERROR_TYPE_ITEM,
285                                         item, "Not supported by ntuple filter");
286                                 return -rte_errno;
287                         }
288
289                         filter->dst_port_mask = udp_mask->hdr.dst_port;
290                         filter->src_port_mask = udp_mask->hdr.src_port;
291
292                         udp_spec = item->spec;
293                         filter->dst_port = udp_spec->hdr.dst_port;
294                         filter->src_port = udp_spec->hdr.src_port;
295                 }
296         } else {
297                 if (item->spec && item->mask) {
298                         sctp_mask = item->mask;
299
300                         /**
301                          * Only support src & dst ports,
302                          * others should be masked.
303                          */
304                         if (sctp_mask->hdr.tag ||
305                             sctp_mask->hdr.cksum) {
306                                 memset(filter, 0,
307                                         sizeof(struct rte_eth_ntuple_filter));
308                                 rte_flow_error_set(error, EINVAL,
309                                         RTE_FLOW_ERROR_TYPE_ITEM,
310                                         item, "Not supported by ntuple filter");
311                                 return -rte_errno;
312                         }
313
314                         filter->dst_port_mask = sctp_mask->hdr.dst_port;
315                         filter->src_port_mask = sctp_mask->hdr.src_port;
316
317                         sctp_spec = (const struct rte_flow_item_sctp *)
318                                         item->spec;
319                         filter->dst_port = sctp_spec->hdr.dst_port;
320                         filter->src_port = sctp_spec->hdr.src_port;
321                 }
322         }
323         /* check if the next not void item is END */
324         index++;
325         NEXT_ITEM_OF_PATTERN(item, pattern, index);
326         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
327                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
328                 rte_flow_error_set(error, EINVAL,
329                         RTE_FLOW_ERROR_TYPE_ITEM,
330                         item, "Not supported by ntuple filter");
331                 return -rte_errno;
332         }
333
334         /* parse action */
335         index = 0;
336
337         /**
338          * n-tuple only supports forwarding,
339          * check if the first not void action is QUEUE.
340          */
341         NEXT_ITEM_OF_ACTION(act, actions, index);
342         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
343                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
344                 rte_flow_error_set(error, EINVAL,
345                         RTE_FLOW_ERROR_TYPE_ACTION,
346                         item, "Not supported action.");
347                 return -rte_errno;
348         }
349         filter->queue =
350                 ((const struct rte_flow_action_queue *)act->conf)->index;
351
352         /* check if the next not void item is END */
353         index++;
354         NEXT_ITEM_OF_ACTION(act, actions, index);
355         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
356                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
357                 rte_flow_error_set(error, EINVAL,
358                         RTE_FLOW_ERROR_TYPE_ACTION,
359                         act, "Not supported action.");
360                 return -rte_errno;
361         }
362
363         /* parse attr */
364         /* must be input direction */
365         if (!attr->ingress) {
366                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
367                 rte_flow_error_set(error, EINVAL,
368                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
369                                    attr, "Only support ingress.");
370                 return -rte_errno;
371         }
372
373         /* not supported */
374         if (attr->egress) {
375                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
376                 rte_flow_error_set(error, EINVAL,
377                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
378                                    attr, "Not support egress.");
379                 return -rte_errno;
380         }
381
382         if (attr->priority > 0xFFFF) {
383                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
384                 rte_flow_error_set(error, EINVAL,
385                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
386                                    attr, "Error priority.");
387                 return -rte_errno;
388         }
389         filter->priority = (uint16_t)attr->priority;
390
391         return 0;
392 }
393
394 /* a specific function for igb because the flags is specific */
395 static int
396 igb_parse_ntuple_filter(struct rte_eth_dev *dev,
397                           const struct rte_flow_attr *attr,
398                           const struct rte_flow_item pattern[],
399                           const struct rte_flow_action actions[],
400                           struct rte_eth_ntuple_filter *filter,
401                           struct rte_flow_error *error)
402 {
403         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
404         int ret;
405
406         MAC_TYPE_FILTER_SUP(hw->mac.type);
407
408         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
409
410         if (ret)
411                 return ret;
412
413         /* Igb doesn't support many priorities. */
414         if (filter->priority > E1000_2TUPLE_MAX_PRI) {
415                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
416                 rte_flow_error_set(error, EINVAL,
417                         RTE_FLOW_ERROR_TYPE_ITEM,
418                         NULL, "Priority not supported by ntuple filter");
419                 return -rte_errno;
420         }
421
422         if (hw->mac.type == e1000_82576) {
423                 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) {
424                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
425                         rte_flow_error_set(error, EINVAL,
426                                 RTE_FLOW_ERROR_TYPE_ITEM,
427                                 NULL, "queue number not "
428                                 "supported by ntuple filter");
429                         return -rte_errno;
430                 }
431                 filter->flags |= RTE_5TUPLE_FLAGS;
432         } else {
433                 if (filter->src_ip_mask || filter->dst_ip_mask ||
434                         filter->src_port_mask) {
435                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
436                         rte_flow_error_set(error, EINVAL,
437                                 RTE_FLOW_ERROR_TYPE_ITEM,
438                                 NULL, "only two tuple are "
439                                 "supported by this filter");
440                         return -rte_errno;
441                 }
442                 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
443                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
444                         rte_flow_error_set(error, EINVAL,
445                                 RTE_FLOW_ERROR_TYPE_ITEM,
446                                 NULL, "queue number not "
447                                 "supported by ntuple filter");
448                         return -rte_errno;
449                 }
450                 filter->flags |= RTE_2TUPLE_FLAGS;
451         }
452
453         return 0;
454 }
455
456 /**
457  * Parse the rule to see if it is a ethertype rule.
458  * And get the ethertype filter info BTW.
459  * pattern:
460  * The first not void item can be ETH.
461  * The next not void item must be END.
462  * action:
463  * The first not void action should be QUEUE.
464  * The next not void action should be END.
465  * pattern example:
466  * ITEM         Spec                    Mask
467  * ETH          type    0x0807          0xFFFF
468  * END
469  * other members in mask and spec should set to 0x00.
470  * item->last should be NULL.
471  */
472 static int
473 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
474                             const struct rte_flow_item *pattern,
475                             const struct rte_flow_action *actions,
476                             struct rte_eth_ethertype_filter *filter,
477                             struct rte_flow_error *error)
478 {
479         const struct rte_flow_item *item;
480         const struct rte_flow_action *act;
481         const struct rte_flow_item_eth *eth_spec;
482         const struct rte_flow_item_eth *eth_mask;
483         const struct rte_flow_action_queue *act_q;
484         uint32_t index;
485
486         if (!pattern) {
487                 rte_flow_error_set(error, EINVAL,
488                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
489                                 NULL, "NULL pattern.");
490                 return -rte_errno;
491         }
492
493         if (!actions) {
494                 rte_flow_error_set(error, EINVAL,
495                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
496                                 NULL, "NULL action.");
497                 return -rte_errno;
498         }
499
500         if (!attr) {
501                 rte_flow_error_set(error, EINVAL,
502                                    RTE_FLOW_ERROR_TYPE_ATTR,
503                                    NULL, "NULL attribute.");
504                 return -rte_errno;
505         }
506
507         /* Parse pattern */
508         index = 0;
509
510         /* The first non-void item should be MAC. */
511         NEXT_ITEM_OF_PATTERN(item, pattern, index);
512         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
513                 rte_flow_error_set(error, EINVAL,
514                         RTE_FLOW_ERROR_TYPE_ITEM,
515                         item, "Not supported by ethertype filter");
516                 return -rte_errno;
517         }
518
519         /*Not supported last point for range*/
520         if (item->last) {
521                 rte_flow_error_set(error, EINVAL,
522                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
523                         item, "Not supported last point for range");
524                 return -rte_errno;
525         }
526
527         /* Get the MAC info. */
528         if (!item->spec || !item->mask) {
529                 rte_flow_error_set(error, EINVAL,
530                                 RTE_FLOW_ERROR_TYPE_ITEM,
531                                 item, "Not supported by ethertype filter");
532                 return -rte_errno;
533         }
534
535         eth_spec = item->spec;
536         eth_mask = item->mask;
537
538         /* Mask bits of source MAC address must be full of 0.
539          * Mask bits of destination MAC address must be full
540          * of 1 or full of 0.
541          */
542         if (!is_zero_ether_addr(&eth_mask->src) ||
543             (!is_zero_ether_addr(&eth_mask->dst) &&
544              !is_broadcast_ether_addr(&eth_mask->dst))) {
545                 rte_flow_error_set(error, EINVAL,
546                                 RTE_FLOW_ERROR_TYPE_ITEM,
547                                 item, "Invalid ether address mask");
548                 return -rte_errno;
549         }
550
551         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
552                 rte_flow_error_set(error, EINVAL,
553                                 RTE_FLOW_ERROR_TYPE_ITEM,
554                                 item, "Invalid ethertype mask");
555                 return -rte_errno;
556         }
557
558         /* If mask bits of destination MAC address
559          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
560          */
561         if (is_broadcast_ether_addr(&eth_mask->dst)) {
562                 filter->mac_addr = eth_spec->dst;
563                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
564         } else {
565                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
566         }
567         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
568
569         /* Check if the next non-void item is END. */
570         index++;
571         NEXT_ITEM_OF_PATTERN(item, pattern, index);
572         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
573                 rte_flow_error_set(error, EINVAL,
574                                 RTE_FLOW_ERROR_TYPE_ITEM,
575                                 item, "Not supported by ethertype filter.");
576                 return -rte_errno;
577         }
578
579         /* Parse action */
580
581         index = 0;
582         /* Check if the first non-void action is QUEUE or DROP. */
583         NEXT_ITEM_OF_ACTION(act, actions, index);
584         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
585             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
586                 rte_flow_error_set(error, EINVAL,
587                                 RTE_FLOW_ERROR_TYPE_ACTION,
588                                 act, "Not supported action.");
589                 return -rte_errno;
590         }
591
592         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
593                 act_q = (const struct rte_flow_action_queue *)act->conf;
594                 filter->queue = act_q->index;
595         } else {
596                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
597         }
598
599         /* Check if the next non-void item is END */
600         index++;
601         NEXT_ITEM_OF_ACTION(act, actions, index);
602         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
603                 rte_flow_error_set(error, EINVAL,
604                                 RTE_FLOW_ERROR_TYPE_ACTION,
605                                 act, "Not supported action.");
606                 return -rte_errno;
607         }
608
609         /* Parse attr */
610         /* Must be input direction */
611         if (!attr->ingress) {
612                 rte_flow_error_set(error, EINVAL,
613                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
614                                 attr, "Only support ingress.");
615                 return -rte_errno;
616         }
617
618         /* Not supported */
619         if (attr->egress) {
620                 rte_flow_error_set(error, EINVAL,
621                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
622                                 attr, "Not support egress.");
623                 return -rte_errno;
624         }
625
626         /* Not supported */
627         if (attr->priority) {
628                 rte_flow_error_set(error, EINVAL,
629                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
630                                 attr, "Not support priority.");
631                 return -rte_errno;
632         }
633
634         /* Not supported */
635         if (attr->group) {
636                 rte_flow_error_set(error, EINVAL,
637                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
638                                 attr, "Not support group.");
639                 return -rte_errno;
640         }
641
642         return 0;
643 }
644
645 static int
646 igb_parse_ethertype_filter(struct rte_eth_dev *dev,
647                                  const struct rte_flow_attr *attr,
648                              const struct rte_flow_item pattern[],
649                              const struct rte_flow_action actions[],
650                              struct rte_eth_ethertype_filter *filter,
651                              struct rte_flow_error *error)
652 {
653         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
654         int ret;
655
656         MAC_TYPE_FILTER_SUP(hw->mac.type);
657
658         ret = cons_parse_ethertype_filter(attr, pattern,
659                                         actions, filter, error);
660
661         if (ret)
662                 return ret;
663
664         if (hw->mac.type == e1000_82576) {
665                 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) {
666                         memset(filter, 0, sizeof(
667                                         struct rte_eth_ethertype_filter));
668                         rte_flow_error_set(error, EINVAL,
669                                 RTE_FLOW_ERROR_TYPE_ITEM,
670                                 NULL, "queue number not supported "
671                                         "by ethertype filter");
672                         return -rte_errno;
673                 }
674         } else {
675                 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
676                         memset(filter, 0, sizeof(
677                                         struct rte_eth_ethertype_filter));
678                         rte_flow_error_set(error, EINVAL,
679                                 RTE_FLOW_ERROR_TYPE_ITEM,
680                                 NULL, "queue number not supported "
681                                         "by ethertype filter");
682                         return -rte_errno;
683                 }
684         }
685
686         if (filter->ether_type == ETHER_TYPE_IPv4 ||
687                 filter->ether_type == ETHER_TYPE_IPv6) {
688                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
689                 rte_flow_error_set(error, EINVAL,
690                         RTE_FLOW_ERROR_TYPE_ITEM,
691                         NULL, "IPv4/IPv6 not supported by ethertype filter");
692                 return -rte_errno;
693         }
694
695         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
696                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
697                 rte_flow_error_set(error, EINVAL,
698                         RTE_FLOW_ERROR_TYPE_ITEM,
699                         NULL, "mac compare is unsupported");
700                 return -rte_errno;
701         }
702
703         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
704                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
705                 rte_flow_error_set(error, EINVAL,
706                         RTE_FLOW_ERROR_TYPE_ITEM,
707                         NULL, "drop option is unsupported");
708                 return -rte_errno;
709         }
710
711         return 0;
712 }
713
714 /**
715  * Parse the rule to see if it is a TCP SYN rule.
716  * And get the TCP SYN filter info BTW.
717  * pattern:
718  * The first not void item must be ETH.
719  * The second not void item must be IPV4 or IPV6.
720  * The third not void item must be TCP.
721  * The next not void item must be END.
722  * action:
723  * The first not void action should be QUEUE.
724  * The next not void action should be END.
725  * pattern example:
726  * ITEM         Spec                    Mask
727  * ETH          NULL                    NULL
728  * IPV4/IPV6    NULL                    NULL
729  * TCP          tcp_flags       0x02    0xFF
730  * END
731  * other members in mask and spec should set to 0x00.
732  * item->last should be NULL.
733  */
734 static int
735 cons_parse_syn_filter(const struct rte_flow_attr *attr,
736                                 const struct rte_flow_item pattern[],
737                                 const struct rte_flow_action actions[],
738                                 struct rte_eth_syn_filter *filter,
739                                 struct rte_flow_error *error)
740 {
741         const struct rte_flow_item *item;
742         const struct rte_flow_action *act;
743         const struct rte_flow_item_tcp *tcp_spec;
744         const struct rte_flow_item_tcp *tcp_mask;
745         const struct rte_flow_action_queue *act_q;
746         uint32_t index;
747
748         if (!pattern) {
749                 rte_flow_error_set(error, EINVAL,
750                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
751                                 NULL, "NULL pattern.");
752                 return -rte_errno;
753         }
754
755         if (!actions) {
756                 rte_flow_error_set(error, EINVAL,
757                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
758                                 NULL, "NULL action.");
759                 return -rte_errno;
760         }
761
762         if (!attr) {
763                 rte_flow_error_set(error, EINVAL,
764                                    RTE_FLOW_ERROR_TYPE_ATTR,
765                                    NULL, "NULL attribute.");
766                 return -rte_errno;
767         }
768
769         /* parse pattern */
770         index = 0;
771
772         /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
773         NEXT_ITEM_OF_PATTERN(item, pattern, index);
774         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
775             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
776             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
777             item->type != RTE_FLOW_ITEM_TYPE_TCP) {
778                 rte_flow_error_set(error, EINVAL,
779                                 RTE_FLOW_ERROR_TYPE_ITEM,
780                                 item, "Not supported by syn filter");
781                 return -rte_errno;
782         }
783                 /*Not supported last point for range*/
784         if (item->last) {
785                 rte_flow_error_set(error, EINVAL,
786                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
787                         item, "Not supported last point for range");
788                 return -rte_errno;
789         }
790
791         /* Skip Ethernet */
792         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
793                 /* if the item is MAC, the content should be NULL */
794                 if (item->spec || item->mask) {
795                         rte_flow_error_set(error, EINVAL,
796                                 RTE_FLOW_ERROR_TYPE_ITEM,
797                                 item, "Invalid SYN address mask");
798                         return -rte_errno;
799                 }
800
801                 /* check if the next not void item is IPv4 or IPv6 */
802                 index++;
803                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
804                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
805                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
806                         rte_flow_error_set(error, EINVAL,
807                                 RTE_FLOW_ERROR_TYPE_ITEM,
808                                 item, "Not supported by syn filter");
809                         return -rte_errno;
810                 }
811         }
812
813         /* Skip IP */
814         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
815             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
816                 /* if the item is IP, the content should be NULL */
817                 if (item->spec || item->mask) {
818                         rte_flow_error_set(error, EINVAL,
819                                 RTE_FLOW_ERROR_TYPE_ITEM,
820                                 item, "Invalid SYN mask");
821                         return -rte_errno;
822                 }
823
824                 /* check if the next not void item is TCP */
825                 index++;
826                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
827                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
828                         rte_flow_error_set(error, EINVAL,
829                                 RTE_FLOW_ERROR_TYPE_ITEM,
830                                 item, "Not supported by syn filter");
831                         return -rte_errno;
832                 }
833         }
834
835         /* Get the TCP info. Only support SYN. */
836         if (!item->spec || !item->mask) {
837                 rte_flow_error_set(error, EINVAL,
838                                 RTE_FLOW_ERROR_TYPE_ITEM,
839                                 item, "Invalid SYN mask");
840                 return -rte_errno;
841         }
842         /*Not supported last point for range*/
843         if (item->last) {
844                 rte_flow_error_set(error, EINVAL,
845                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
846                         item, "Not supported last point for range");
847                 return -rte_errno;
848         }
849
850         tcp_spec = item->spec;
851         tcp_mask = item->mask;
852         if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
853             tcp_mask->hdr.src_port ||
854             tcp_mask->hdr.dst_port ||
855             tcp_mask->hdr.sent_seq ||
856             tcp_mask->hdr.recv_ack ||
857             tcp_mask->hdr.data_off ||
858             tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
859             tcp_mask->hdr.rx_win ||
860             tcp_mask->hdr.cksum ||
861             tcp_mask->hdr.tcp_urp) {
862                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
863                 rte_flow_error_set(error, EINVAL,
864                                 RTE_FLOW_ERROR_TYPE_ITEM,
865                                 item, "Not supported by syn filter");
866                 return -rte_errno;
867         }
868
869         /* check if the next not void item is END */
870         index++;
871         NEXT_ITEM_OF_PATTERN(item, pattern, index);
872         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
873                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
874                 rte_flow_error_set(error, EINVAL,
875                                 RTE_FLOW_ERROR_TYPE_ITEM,
876                                 item, "Not supported by syn filter");
877                 return -rte_errno;
878         }
879
880         /* parse action */
881         index = 0;
882
883         /* check if the first not void action is QUEUE. */
884         NEXT_ITEM_OF_ACTION(act, actions, index);
885         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
886                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
887                 rte_flow_error_set(error, EINVAL,
888                                 RTE_FLOW_ERROR_TYPE_ACTION,
889                                 act, "Not supported action.");
890                 return -rte_errno;
891         }
892
893         act_q = (const struct rte_flow_action_queue *)act->conf;
894         filter->queue = act_q->index;
895
896         /* check if the next not void item is END */
897         index++;
898         NEXT_ITEM_OF_ACTION(act, actions, index);
899         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
900                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
901                 rte_flow_error_set(error, EINVAL,
902                                 RTE_FLOW_ERROR_TYPE_ACTION,
903                                 act, "Not supported action.");
904                 return -rte_errno;
905         }
906
907         /* parse attr */
908         /* must be input direction */
909         if (!attr->ingress) {
910                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
911                 rte_flow_error_set(error, EINVAL,
912                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
913                         attr, "Only support ingress.");
914                 return -rte_errno;
915         }
916
917         /* not supported */
918         if (attr->egress) {
919                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
920                 rte_flow_error_set(error, EINVAL,
921                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
922                         attr, "Not support egress.");
923                 return -rte_errno;
924         }
925
926         /* Support 2 priorities, the lowest or highest. */
927         if (!attr->priority) {
928                 filter->hig_pri = 0;
929         } else if (attr->priority == (uint32_t)~0U) {
930                 filter->hig_pri = 1;
931         } else {
932                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
933                 rte_flow_error_set(error, EINVAL,
934                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
935                         attr, "Not support priority.");
936                 return -rte_errno;
937         }
938
939         return 0;
940 }
941
942 static int
943 igb_parse_syn_filter(struct rte_eth_dev *dev,
944                                  const struct rte_flow_attr *attr,
945                              const struct rte_flow_item pattern[],
946                              const struct rte_flow_action actions[],
947                              struct rte_eth_syn_filter *filter,
948                              struct rte_flow_error *error)
949 {
950         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
951         int ret;
952
953         MAC_TYPE_FILTER_SUP(hw->mac.type);
954
955         ret = cons_parse_syn_filter(attr, pattern,
956                                         actions, filter, error);
957
958         if (hw->mac.type == e1000_82576) {
959                 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) {
960                         memset(filter, 0, sizeof(struct rte_eth_syn_filter));
961                         rte_flow_error_set(error, EINVAL,
962                                 RTE_FLOW_ERROR_TYPE_ITEM,
963                                 NULL, "queue number not "
964                                         "supported by syn filter");
965                         return -rte_errno;
966                 }
967         } else {
968                 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
969                         memset(filter, 0, sizeof(struct rte_eth_syn_filter));
970                         rte_flow_error_set(error, EINVAL,
971                                 RTE_FLOW_ERROR_TYPE_ITEM,
972                                 NULL, "queue number not "
973                                         "supported by syn filter");
974                         return -rte_errno;
975                 }
976         }
977
978         if (ret)
979                 return ret;
980
981         return 0;
982 }
983
984 /**
985  * Parse the rule to see if it is a flex byte rule.
986  * And get the flex byte filter info BTW.
987  * pattern:
988  * The first not void item must be RAW.
989  * The second not void item can be RAW or END.
990  * The third not void item can be RAW or END.
991  * The last not void item must be END.
992  * action:
993  * The first not void action should be QUEUE.
994  * The next not void action should be END.
995  * pattern example:
996  * ITEM         Spec                    Mask
997  * RAW          relative        0               0x1
998  *                      offset  0               0xFFFFFFFF
999  *                      pattern {0x08, 0x06}            {0xFF, 0xFF}
1000  * RAW          relative        1               0x1
1001  *                      offset  100             0xFFFFFFFF
1002  *                      pattern {0x11, 0x22, 0x33}      {0xFF, 0xFF, 0xFF}
1003  * END
1004  * other members in mask and spec should set to 0x00.
1005  * item->last should be NULL.
1006  */
1007 static int
1008 cons_parse_flex_filter(const struct rte_flow_attr *attr,
1009                                 const struct rte_flow_item pattern[],
1010                                 const struct rte_flow_action actions[],
1011                                 struct rte_eth_flex_filter *filter,
1012                                 struct rte_flow_error *error)
1013 {
1014         const struct rte_flow_item *item;
1015         const struct rte_flow_action *act;
1016         const struct rte_flow_item_raw *raw_spec;
1017         const struct rte_flow_item_raw *raw_mask;
1018         const struct rte_flow_action_queue *act_q;
1019         uint32_t index, i, offset, total_offset;
1020         uint32_t max_offset = 0;
1021         int32_t shift, j, raw_index = 0;
1022         int32_t relative[IGB_FLEX_RAW_NUM] = {0};
1023         int32_t raw_offset[IGB_FLEX_RAW_NUM] = {0};
1024
1025         if (!pattern) {
1026                 rte_flow_error_set(error, EINVAL,
1027                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1028                                 NULL, "NULL pattern.");
1029                 return -rte_errno;
1030         }
1031
1032         if (!actions) {
1033                 rte_flow_error_set(error, EINVAL,
1034                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1035                                 NULL, "NULL action.");
1036                 return -rte_errno;
1037         }
1038
1039         if (!attr) {
1040                 rte_flow_error_set(error, EINVAL,
1041                                    RTE_FLOW_ERROR_TYPE_ATTR,
1042                                    NULL, "NULL attribute.");
1043                 return -rte_errno;
1044         }
1045
1046         /* parse pattern */
1047         index = 0;
1048
1049 item_loop:
1050
1051         /* the first not void item should be RAW */
1052         NEXT_ITEM_OF_PATTERN(item, pattern, index);
1053         if (item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1054                 rte_flow_error_set(error, EINVAL,
1055                                 RTE_FLOW_ERROR_TYPE_ITEM,
1056                                 item, "Not supported by flex filter");
1057                 return -rte_errno;
1058         }
1059                 /*Not supported last point for range*/
1060         if (item->last) {
1061                 rte_flow_error_set(error, EINVAL,
1062                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1063                         item, "Not supported last point for range");
1064                 return -rte_errno;
1065         }
1066
1067         raw_spec = item->spec;
1068         raw_mask = item->mask;
1069
1070         if (!raw_mask->length ||
1071             !raw_mask->relative) {
1072                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1073                 rte_flow_error_set(error, EINVAL,
1074                                 RTE_FLOW_ERROR_TYPE_ITEM,
1075                                 item, "Not supported by flex filter");
1076                 return -rte_errno;
1077         }
1078
1079         if (raw_mask->offset)
1080                 offset = raw_spec->offset;
1081         else
1082                 offset = 0;
1083
1084         for (j = 0; j < raw_spec->length; j++) {
1085                 if (raw_mask->pattern[j] != 0xFF) {
1086                         memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1087                         rte_flow_error_set(error, EINVAL,
1088                                         RTE_FLOW_ERROR_TYPE_ITEM,
1089                                         item, "Not supported by flex filter");
1090                         return -rte_errno;
1091                 }
1092         }
1093
1094         total_offset = 0;
1095
1096         if (raw_spec->relative) {
1097                 for (j = raw_index; j > 0; j--) {
1098                         total_offset += raw_offset[j - 1];
1099                         if (!relative[j - 1])
1100                                 break;
1101                 }
1102                 if (total_offset + raw_spec->length + offset > max_offset)
1103                         max_offset = total_offset + raw_spec->length + offset;
1104         } else {
1105                 if (raw_spec->length + offset > max_offset)
1106                         max_offset = raw_spec->length + offset;
1107         }
1108
1109         if ((raw_spec->length + offset + total_offset) >
1110                         RTE_FLEX_FILTER_MAXLEN) {
1111                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1112                 rte_flow_error_set(error, EINVAL,
1113                                 RTE_FLOW_ERROR_TYPE_ITEM,
1114                                 item, "Not supported by flex filter");
1115                 return -rte_errno;
1116         }
1117
1118         if (raw_spec->relative == 0) {
1119                 for (j = 0; j < raw_spec->length; j++)
1120                         filter->bytes[offset + j] =
1121                         raw_spec->pattern[j];
1122                 j = offset / CHAR_BIT;
1123                 shift = offset % CHAR_BIT;
1124         } else {
1125                 for (j = 0; j < raw_spec->length; j++)
1126                         filter->bytes[total_offset + offset + j] =
1127                                 raw_spec->pattern[j];
1128                 j = (total_offset + offset) / CHAR_BIT;
1129                 shift = (total_offset + offset) % CHAR_BIT;
1130         }
1131
1132         i = 0;
1133
1134         for ( ; shift < CHAR_BIT; shift++) {
1135                 filter->mask[j] |= (0x80 >> shift);
1136                 i++;
1137                 if (i == raw_spec->length)
1138                         break;
1139                 if (shift == (CHAR_BIT - 1)) {
1140                         j++;
1141                         shift = -1;
1142                 }
1143         }
1144
1145         relative[raw_index] = raw_spec->relative;
1146         raw_offset[raw_index] = offset + raw_spec->length;
1147         raw_index++;
1148
1149         /* check if the next not void item is RAW */
1150         index++;
1151         NEXT_ITEM_OF_PATTERN(item, pattern, index);
1152         if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1153                 item->type != RTE_FLOW_ITEM_TYPE_END) {
1154                 rte_flow_error_set(error, EINVAL,
1155                                 RTE_FLOW_ERROR_TYPE_ITEM,
1156                                 item, "Not supported by flex filter");
1157                 return -rte_errno;
1158         }
1159
1160         /* go back to parser */
1161         if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
1162                 /* if the item is RAW, the content should be parse */
1163                 goto item_loop;
1164         }
1165
1166         filter->len = RTE_ALIGN(max_offset, 8);
1167
1168         /* parse action */
1169         index = 0;
1170
1171         /* check if the first not void action is QUEUE. */
1172         NEXT_ITEM_OF_ACTION(act, actions, index);
1173         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1174                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1175                 rte_flow_error_set(error, EINVAL,
1176                                 RTE_FLOW_ERROR_TYPE_ACTION,
1177                                 act, "Not supported action.");
1178                 return -rte_errno;
1179         }
1180
1181         act_q = (const struct rte_flow_action_queue *)act->conf;
1182         filter->queue = act_q->index;
1183
1184         /* check if the next not void item is END */
1185         index++;
1186         NEXT_ITEM_OF_ACTION(act, actions, index);
1187         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1188                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1189                 rte_flow_error_set(error, EINVAL,
1190                                 RTE_FLOW_ERROR_TYPE_ACTION,
1191                                 act, "Not supported action.");
1192                 return -rte_errno;
1193         }
1194
1195         /* parse attr */
1196         /* must be input direction */
1197         if (!attr->ingress) {
1198                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1199                 rte_flow_error_set(error, EINVAL,
1200                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1201                         attr, "Only support ingress.");
1202                 return -rte_errno;
1203         }
1204
1205         /* not supported */
1206         if (attr->egress) {
1207                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1208                 rte_flow_error_set(error, EINVAL,
1209                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1210                         attr, "Not support egress.");
1211                 return -rte_errno;
1212         }
1213
1214         if (attr->priority > 0xFFFF) {
1215                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1216                 rte_flow_error_set(error, EINVAL,
1217                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1218                                    attr, "Error priority.");
1219                 return -rte_errno;
1220         }
1221
1222         filter->priority = (uint16_t)attr->priority;
1223
1224         return 0;
1225 }
1226
1227 static int
1228 igb_parse_flex_filter(struct rte_eth_dev *dev,
1229                                  const struct rte_flow_attr *attr,
1230                              const struct rte_flow_item pattern[],
1231                              const struct rte_flow_action actions[],
1232                              struct rte_eth_flex_filter *filter,
1233                              struct rte_flow_error *error)
1234 {
1235         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1236         int ret;
1237
1238         MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
1239
1240         ret = cons_parse_flex_filter(attr, pattern,
1241                                         actions, filter, error);
1242
1243         if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
1244                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1245                 rte_flow_error_set(error, EINVAL,
1246                         RTE_FLOW_ERROR_TYPE_ITEM,
1247                         NULL, "queue number not supported by flex filter");
1248                 return -rte_errno;
1249         }
1250
1251         if (filter->len == 0 || filter->len > E1000_MAX_FLEX_FILTER_LEN ||
1252                 filter->len % sizeof(uint64_t) != 0) {
1253                 PMD_DRV_LOG(ERR, "filter's length is out of range");
1254                 return -EINVAL;
1255         }
1256
1257         if (filter->priority > E1000_MAX_FLEX_FILTER_PRI) {
1258                 PMD_DRV_LOG(ERR, "filter's priority is out of range");
1259                 return -EINVAL;
1260         }
1261
1262         if (ret)
1263                 return ret;
1264
1265         return 0;
1266 }
1267
1268 static int
1269 igb_parse_rss_filter(struct rte_eth_dev *dev,
1270                         const struct rte_flow_attr *attr,
1271                         const struct rte_flow_action actions[],
1272                         struct igb_rte_flow_rss_conf *rss_conf,
1273                         struct rte_flow_error *error)
1274 {
1275         const struct rte_flow_action *act;
1276         const struct rte_flow_action_rss *rss;
1277         uint16_t n, index;
1278
1279         /**
1280          * rss only supports forwarding,
1281          * check if the first not void action is RSS.
1282          */
1283         index = 0;
1284         NEXT_ITEM_OF_ACTION(act, actions, index);
1285         if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
1286                 memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
1287                 rte_flow_error_set(error, EINVAL,
1288                         RTE_FLOW_ERROR_TYPE_ACTION,
1289                         act, "Not supported action.");
1290                 return -rte_errno;
1291         }
1292
1293         rss = (const struct rte_flow_action_rss *)act->conf;
1294
1295         if (!rss || !rss->queue_num) {
1296                 rte_flow_error_set(error, EINVAL,
1297                                 RTE_FLOW_ERROR_TYPE_ACTION,
1298                                 act,
1299                            "no valid queues");
1300                 return -rte_errno;
1301         }
1302
1303         for (n = 0; n < rss->queue_num; n++) {
1304                 if (rss->queue[n] >= dev->data->nb_rx_queues) {
1305                         rte_flow_error_set(error, EINVAL,
1306                                    RTE_FLOW_ERROR_TYPE_ACTION,
1307                                    act,
1308                                    "queue id > max number of queues");
1309                         return -rte_errno;
1310                 }
1311         }
1312
1313         if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
1314                 return rte_flow_error_set
1315                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
1316                          "non-default RSS hash functions are not supported");
1317         if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
1318                 return rte_flow_error_set
1319                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
1320                          "RSS hash key must be exactly 40 bytes");
1321         if (rss->queue_num > RTE_DIM(rss_conf->queue))
1322                 return rte_flow_error_set
1323                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
1324                          "too many queues for RSS context");
1325         if (igb_rss_conf_init(rss_conf, rss))
1326                 return rte_flow_error_set
1327                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
1328                          "RSS context initialization failure");
1329
1330         /* check if the next not void item is END */
1331         index++;
1332         NEXT_ITEM_OF_ACTION(act, actions, index);
1333         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1334                 memset(rss_conf, 0, sizeof(struct rte_eth_rss_conf));
1335                 rte_flow_error_set(error, EINVAL,
1336                         RTE_FLOW_ERROR_TYPE_ACTION,
1337                         act, "Not supported action.");
1338                 return -rte_errno;
1339         }
1340
1341         /* parse attr */
1342         /* must be input direction */
1343         if (!attr->ingress) {
1344                 memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
1345                 rte_flow_error_set(error, EINVAL,
1346                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1347                                    attr, "Only support ingress.");
1348                 return -rte_errno;
1349         }
1350
1351         /* not supported */
1352         if (attr->egress) {
1353                 memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
1354                 rte_flow_error_set(error, EINVAL,
1355                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1356                                    attr, "Not support egress.");
1357                 return -rte_errno;
1358         }
1359
1360         if (attr->priority > 0xFFFF) {
1361                 memset(rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
1362                 rte_flow_error_set(error, EINVAL,
1363                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1364                                    attr, "Error priority.");
1365                 return -rte_errno;
1366         }
1367
1368         return 0;
1369 }
1370
1371 /**
1372  * Create a flow rule.
1373  * Theorically one rule can match more than one filters.
1374  * We will let it use the filter which it hitt first.
1375  * So, the sequence matters.
1376  */
1377 static struct rte_flow *
1378 igb_flow_create(struct rte_eth_dev *dev,
1379                   const struct rte_flow_attr *attr,
1380                   const struct rte_flow_item pattern[],
1381                   const struct rte_flow_action actions[],
1382                   struct rte_flow_error *error)
1383 {
1384         int ret;
1385         struct rte_eth_ntuple_filter ntuple_filter;
1386         struct rte_eth_ethertype_filter ethertype_filter;
1387         struct rte_eth_syn_filter syn_filter;
1388         struct rte_eth_flex_filter flex_filter;
1389         struct igb_rte_flow_rss_conf rss_conf;
1390         struct rte_flow *flow = NULL;
1391         struct igb_ntuple_filter_ele *ntuple_filter_ptr;
1392         struct igb_ethertype_filter_ele *ethertype_filter_ptr;
1393         struct igb_eth_syn_filter_ele *syn_filter_ptr;
1394         struct igb_flex_filter_ele *flex_filter_ptr;
1395         struct igb_rss_conf_ele *rss_filter_ptr;
1396         struct igb_flow_mem *igb_flow_mem_ptr;
1397
1398         flow = rte_zmalloc("igb_rte_flow", sizeof(struct rte_flow), 0);
1399         if (!flow) {
1400                 PMD_DRV_LOG(ERR, "failed to allocate memory");
1401                 return (struct rte_flow *)flow;
1402         }
1403         igb_flow_mem_ptr = rte_zmalloc("igb_flow_mem",
1404                         sizeof(struct igb_flow_mem), 0);
1405         if (!igb_flow_mem_ptr) {
1406                 PMD_DRV_LOG(ERR, "failed to allocate memory");
1407                 rte_free(flow);
1408                 return NULL;
1409         }
1410         igb_flow_mem_ptr->flow = flow;
1411         igb_flow_mem_ptr->dev = dev;
1412         TAILQ_INSERT_TAIL(&igb_flow_list,
1413                                 igb_flow_mem_ptr, entries);
1414
1415         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
1416         ret = igb_parse_ntuple_filter(dev, attr, pattern,
1417                         actions, &ntuple_filter, error);
1418         if (!ret) {
1419                 ret = igb_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
1420                 if (!ret) {
1421                         ntuple_filter_ptr = rte_zmalloc("igb_ntuple_filter",
1422                                 sizeof(struct igb_ntuple_filter_ele), 0);
1423                         if (!ntuple_filter_ptr) {
1424                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
1425                                 goto out;
1426                         }
1427
1428                         rte_memcpy(&ntuple_filter_ptr->filter_info,
1429                                 &ntuple_filter,
1430                                 sizeof(struct rte_eth_ntuple_filter));
1431                         TAILQ_INSERT_TAIL(&igb_filter_ntuple_list,
1432                                 ntuple_filter_ptr, entries);
1433                         flow->rule = ntuple_filter_ptr;
1434                         flow->filter_type = RTE_ETH_FILTER_NTUPLE;
1435                         return flow;
1436                 }
1437                 goto out;
1438         }
1439
1440         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
1441         ret = igb_parse_ethertype_filter(dev, attr, pattern,
1442                                 actions, &ethertype_filter, error);
1443         if (!ret) {
1444                 ret = igb_add_del_ethertype_filter(dev,
1445                                 &ethertype_filter, TRUE);
1446                 if (!ret) {
1447                         ethertype_filter_ptr = rte_zmalloc(
1448                                 "igb_ethertype_filter",
1449                                 sizeof(struct igb_ethertype_filter_ele), 0);
1450                         if (!ethertype_filter_ptr) {
1451                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
1452                                 goto out;
1453                         }
1454
1455                         rte_memcpy(&ethertype_filter_ptr->filter_info,
1456                                 &ethertype_filter,
1457                                 sizeof(struct rte_eth_ethertype_filter));
1458                         TAILQ_INSERT_TAIL(&igb_filter_ethertype_list,
1459                                 ethertype_filter_ptr, entries);
1460                         flow->rule = ethertype_filter_ptr;
1461                         flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
1462                         return flow;
1463                 }
1464                 goto out;
1465         }
1466
1467         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
1468         ret = igb_parse_syn_filter(dev, attr, pattern,
1469                                 actions, &syn_filter, error);
1470         if (!ret) {
1471                 ret = eth_igb_syn_filter_set(dev, &syn_filter, TRUE);
1472                 if (!ret) {
1473                         syn_filter_ptr = rte_zmalloc("igb_syn_filter",
1474                                 sizeof(struct igb_eth_syn_filter_ele), 0);
1475                         if (!syn_filter_ptr) {
1476                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
1477                                 goto out;
1478                         }
1479
1480                         rte_memcpy(&syn_filter_ptr->filter_info,
1481                                 &syn_filter,
1482                                 sizeof(struct rte_eth_syn_filter));
1483                         TAILQ_INSERT_TAIL(&igb_filter_syn_list,
1484                                 syn_filter_ptr,
1485                                 entries);
1486                         flow->rule = syn_filter_ptr;
1487                         flow->filter_type = RTE_ETH_FILTER_SYN;
1488                         return flow;
1489                 }
1490                 goto out;
1491         }
1492
1493         memset(&flex_filter, 0, sizeof(struct rte_eth_flex_filter));
1494         ret = igb_parse_flex_filter(dev, attr, pattern,
1495                                         actions, &flex_filter, error);
1496         if (!ret) {
1497                 ret = eth_igb_add_del_flex_filter(dev, &flex_filter, TRUE);
1498                 if (!ret) {
1499                         flex_filter_ptr = rte_zmalloc("igb_flex_filter",
1500                                 sizeof(struct igb_flex_filter_ele), 0);
1501                         if (!flex_filter_ptr) {
1502                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
1503                                 goto out;
1504                         }
1505
1506                         rte_memcpy(&flex_filter_ptr->filter_info,
1507                                 &flex_filter,
1508                                 sizeof(struct rte_eth_flex_filter));
1509                         TAILQ_INSERT_TAIL(&igb_filter_flex_list,
1510                                 flex_filter_ptr, entries);
1511                         flow->rule = flex_filter_ptr;
1512                         flow->filter_type = RTE_ETH_FILTER_FLEXIBLE;
1513                         return flow;
1514                 }
1515         }
1516
1517         memset(&rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
1518         ret = igb_parse_rss_filter(dev, attr,
1519                                         actions, &rss_conf, error);
1520         if (!ret) {
1521                 ret = igb_config_rss_filter(dev, &rss_conf, TRUE);
1522                 if (!ret) {
1523                         rss_filter_ptr = rte_zmalloc("igb_rss_filter",
1524                                 sizeof(struct igb_rss_conf_ele), 0);
1525                         if (!rss_filter_ptr) {
1526                                 PMD_DRV_LOG(ERR, "failed to allocate memory");
1527                                 goto out;
1528                         }
1529                         igb_rss_conf_init(&rss_filter_ptr->filter_info,
1530                                           &rss_conf.conf);
1531                         TAILQ_INSERT_TAIL(&igb_filter_rss_list,
1532                                 rss_filter_ptr, entries);
1533                         flow->rule = rss_filter_ptr;
1534                         flow->filter_type = RTE_ETH_FILTER_HASH;
1535                         return flow;
1536                 }
1537         }
1538
1539 out:
1540         TAILQ_REMOVE(&igb_flow_list,
1541                 igb_flow_mem_ptr, entries);
1542         rte_flow_error_set(error, -ret,
1543                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1544                            "Failed to create flow.");
1545         rte_free(igb_flow_mem_ptr);
1546         rte_free(flow);
1547         return NULL;
1548 }
1549
1550 /**
1551  * Check if the flow rule is supported by igb.
1552  * It only checkes the format. Don't guarantee the rule can be programmed into
1553  * the HW. Because there can be no enough room for the rule.
1554  */
1555 static int
1556 igb_flow_validate(__rte_unused struct rte_eth_dev *dev,
1557                 const struct rte_flow_attr *attr,
1558                 const struct rte_flow_item pattern[],
1559                 const struct rte_flow_action actions[],
1560                 struct rte_flow_error *error)
1561 {
1562         struct rte_eth_ntuple_filter ntuple_filter;
1563         struct rte_eth_ethertype_filter ethertype_filter;
1564         struct rte_eth_syn_filter syn_filter;
1565         struct rte_eth_flex_filter flex_filter;
1566         struct igb_rte_flow_rss_conf rss_conf;
1567         int ret;
1568
1569         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
1570         ret = igb_parse_ntuple_filter(dev, attr, pattern,
1571                                 actions, &ntuple_filter, error);
1572         if (!ret)
1573                 return 0;
1574
1575         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
1576         ret = igb_parse_ethertype_filter(dev, attr, pattern,
1577                                 actions, &ethertype_filter, error);
1578         if (!ret)
1579                 return 0;
1580
1581         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
1582         ret = igb_parse_syn_filter(dev, attr, pattern,
1583                                 actions, &syn_filter, error);
1584         if (!ret)
1585                 return 0;
1586
1587         memset(&flex_filter, 0, sizeof(struct rte_eth_flex_filter));
1588         ret = igb_parse_flex_filter(dev, attr, pattern,
1589                                 actions, &flex_filter, error);
1590         if (!ret)
1591                 return 0;
1592
1593         memset(&rss_conf, 0, sizeof(struct igb_rte_flow_rss_conf));
1594         ret = igb_parse_rss_filter(dev, attr,
1595                                         actions, &rss_conf, error);
1596
1597         return ret;
1598 }
1599
1600 /* Destroy a flow rule on igb. */
1601 static int
1602 igb_flow_destroy(struct rte_eth_dev *dev,
1603                 struct rte_flow *flow,
1604                 struct rte_flow_error *error)
1605 {
1606         int ret;
1607         struct rte_flow *pmd_flow = flow;
1608         enum rte_filter_type filter_type = pmd_flow->filter_type;
1609         struct igb_ntuple_filter_ele *ntuple_filter_ptr;
1610         struct igb_ethertype_filter_ele *ethertype_filter_ptr;
1611         struct igb_eth_syn_filter_ele *syn_filter_ptr;
1612         struct igb_flex_filter_ele *flex_filter_ptr;
1613         struct igb_flow_mem *igb_flow_mem_ptr;
1614         struct igb_rss_conf_ele *rss_filter_ptr;
1615
1616         switch (filter_type) {
1617         case RTE_ETH_FILTER_NTUPLE:
1618                 ntuple_filter_ptr = (struct igb_ntuple_filter_ele *)
1619                                         pmd_flow->rule;
1620                 ret = igb_add_del_ntuple_filter(dev,
1621                                 &ntuple_filter_ptr->filter_info, FALSE);
1622                 if (!ret) {
1623                         TAILQ_REMOVE(&igb_filter_ntuple_list,
1624                         ntuple_filter_ptr, entries);
1625                         rte_free(ntuple_filter_ptr);
1626                 }
1627                 break;
1628         case RTE_ETH_FILTER_ETHERTYPE:
1629                 ethertype_filter_ptr = (struct igb_ethertype_filter_ele *)
1630                                         pmd_flow->rule;
1631                 ret = igb_add_del_ethertype_filter(dev,
1632                                 &ethertype_filter_ptr->filter_info, FALSE);
1633                 if (!ret) {
1634                         TAILQ_REMOVE(&igb_filter_ethertype_list,
1635                                 ethertype_filter_ptr, entries);
1636                         rte_free(ethertype_filter_ptr);
1637                 }
1638                 break;
1639         case RTE_ETH_FILTER_SYN:
1640                 syn_filter_ptr = (struct igb_eth_syn_filter_ele *)
1641                                 pmd_flow->rule;
1642                 ret = eth_igb_syn_filter_set(dev,
1643                                 &syn_filter_ptr->filter_info, FALSE);
1644                 if (!ret) {
1645                         TAILQ_REMOVE(&igb_filter_syn_list,
1646                                 syn_filter_ptr, entries);
1647                         rte_free(syn_filter_ptr);
1648                 }
1649                 break;
1650         case RTE_ETH_FILTER_FLEXIBLE:
1651                 flex_filter_ptr = (struct igb_flex_filter_ele *)
1652                                 pmd_flow->rule;
1653                 ret = eth_igb_add_del_flex_filter(dev,
1654                                 &flex_filter_ptr->filter_info, FALSE);
1655                 if (!ret) {
1656                         TAILQ_REMOVE(&igb_filter_flex_list,
1657                                 flex_filter_ptr, entries);
1658                         rte_free(flex_filter_ptr);
1659                 }
1660                 break;
1661         case RTE_ETH_FILTER_HASH:
1662                 rss_filter_ptr = (struct igb_rss_conf_ele *)
1663                                 pmd_flow->rule;
1664                 ret = igb_config_rss_filter(dev,
1665                                         &rss_filter_ptr->filter_info, FALSE);
1666                 if (!ret) {
1667                         TAILQ_REMOVE(&igb_filter_rss_list,
1668                                 rss_filter_ptr, entries);
1669                         rte_free(rss_filter_ptr);
1670                 }
1671                 break;
1672         default:
1673                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
1674                             filter_type);
1675                 ret = -EINVAL;
1676                 break;
1677         }
1678
1679         if (ret) {
1680                 rte_flow_error_set(error, EINVAL,
1681                                 RTE_FLOW_ERROR_TYPE_HANDLE,
1682                                 NULL, "Failed to destroy flow");
1683                 return ret;
1684         }
1685
1686         TAILQ_FOREACH(igb_flow_mem_ptr, &igb_flow_list, entries) {
1687                 if (igb_flow_mem_ptr->flow == pmd_flow) {
1688                         TAILQ_REMOVE(&igb_flow_list,
1689                                 igb_flow_mem_ptr, entries);
1690                         rte_free(igb_flow_mem_ptr);
1691                 }
1692         }
1693         rte_free(flow);
1694
1695         return ret;
1696 }
1697
1698 /* remove all the n-tuple filters */
1699 static void
1700 igb_clear_all_ntuple_filter(struct rte_eth_dev *dev)
1701 {
1702         struct e1000_filter_info *filter_info =
1703                 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1704         struct e1000_5tuple_filter *p_5tuple;
1705         struct e1000_2tuple_filter *p_2tuple;
1706
1707         while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
1708                 igb_delete_5tuple_filter_82576(dev, p_5tuple);
1709
1710         while ((p_2tuple = TAILQ_FIRST(&filter_info->twotuple_list)))
1711                 igb_delete_2tuple_filter(dev, p_2tuple);
1712 }
1713
1714 /* remove all the ether type filters */
1715 static void
1716 igb_clear_all_ethertype_filter(struct rte_eth_dev *dev)
1717 {
1718         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1719         struct e1000_filter_info *filter_info =
1720                 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1721         int i;
1722
1723         for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
1724                 if (filter_info->ethertype_mask & (1 << i)) {
1725                         (void)igb_ethertype_filter_remove(filter_info,
1726                                                             (uint8_t)i);
1727                         E1000_WRITE_REG(hw, E1000_ETQF(i), 0);
1728                         E1000_WRITE_FLUSH(hw);
1729                 }
1730         }
1731 }
1732
1733 /* remove the SYN filter */
1734 static void
1735 igb_clear_syn_filter(struct rte_eth_dev *dev)
1736 {
1737         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1738         struct e1000_filter_info *filter_info =
1739                 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1740
1741         if (filter_info->syn_info & E1000_SYN_FILTER_ENABLE) {
1742                 filter_info->syn_info = 0;
1743                 E1000_WRITE_REG(hw, E1000_SYNQF(0), 0);
1744                 E1000_WRITE_FLUSH(hw);
1745         }
1746 }
1747
1748 /* remove all the flex filters */
1749 static void
1750 igb_clear_all_flex_filter(struct rte_eth_dev *dev)
1751 {
1752         struct e1000_filter_info *filter_info =
1753                 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1754         struct e1000_flex_filter *flex_filter;
1755
1756         while ((flex_filter = TAILQ_FIRST(&filter_info->flex_list)))
1757                 igb_remove_flex_filter(dev, flex_filter);
1758 }
1759
1760 /* remove the rss filter */
1761 static void
1762 igb_clear_rss_filter(struct rte_eth_dev *dev)
1763 {
1764         struct e1000_filter_info *filter =
1765                 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1766
1767         if (filter->rss_info.conf.queue_num)
1768                 igb_config_rss_filter(dev, &filter->rss_info, FALSE);
1769 }
1770
1771 void
1772 igb_filterlist_flush(struct rte_eth_dev *dev)
1773 {
1774         struct igb_ntuple_filter_ele *ntuple_filter_ptr;
1775         struct igb_ethertype_filter_ele *ethertype_filter_ptr;
1776         struct igb_eth_syn_filter_ele *syn_filter_ptr;
1777         struct igb_flex_filter_ele *flex_filter_ptr;
1778         struct igb_rss_conf_ele  *rss_filter_ptr;
1779         struct igb_flow_mem *igb_flow_mem_ptr;
1780         enum rte_filter_type filter_type;
1781         struct rte_flow *pmd_flow;
1782
1783         TAILQ_FOREACH(igb_flow_mem_ptr, &igb_flow_list, entries) {
1784                 if (igb_flow_mem_ptr->dev == dev) {
1785                         pmd_flow = igb_flow_mem_ptr->flow;
1786                         filter_type = pmd_flow->filter_type;
1787
1788                         switch (filter_type) {
1789                         case RTE_ETH_FILTER_NTUPLE:
1790                                 ntuple_filter_ptr =
1791                                 (struct igb_ntuple_filter_ele *)
1792                                         pmd_flow->rule;
1793                                 TAILQ_REMOVE(&igb_filter_ntuple_list,
1794                                                 ntuple_filter_ptr, entries);
1795                                 rte_free(ntuple_filter_ptr);
1796                                 break;
1797                         case RTE_ETH_FILTER_ETHERTYPE:
1798                                 ethertype_filter_ptr =
1799                                 (struct igb_ethertype_filter_ele *)
1800                                         pmd_flow->rule;
1801                                 TAILQ_REMOVE(&igb_filter_ethertype_list,
1802                                                 ethertype_filter_ptr, entries);
1803                                 rte_free(ethertype_filter_ptr);
1804                                 break;
1805                         case RTE_ETH_FILTER_SYN:
1806                                 syn_filter_ptr =
1807                                         (struct igb_eth_syn_filter_ele *)
1808                                                 pmd_flow->rule;
1809                                 TAILQ_REMOVE(&igb_filter_syn_list,
1810                                                 syn_filter_ptr, entries);
1811                                 rte_free(syn_filter_ptr);
1812                                 break;
1813                         case RTE_ETH_FILTER_FLEXIBLE:
1814                                 flex_filter_ptr =
1815                                         (struct igb_flex_filter_ele *)
1816                                                 pmd_flow->rule;
1817                                 TAILQ_REMOVE(&igb_filter_flex_list,
1818                                                 flex_filter_ptr, entries);
1819                                 rte_free(flex_filter_ptr);
1820                                 break;
1821                         case RTE_ETH_FILTER_HASH:
1822                                 rss_filter_ptr =
1823                                         (struct igb_rss_conf_ele *)
1824                                                 pmd_flow->rule;
1825                                 TAILQ_REMOVE(&igb_filter_rss_list,
1826                                                 rss_filter_ptr, entries);
1827                                 rte_free(rss_filter_ptr);
1828                                 break;
1829                         default:
1830                                 PMD_DRV_LOG(WARNING, "Filter type"
1831                                         "(%d) not supported", filter_type);
1832                                 break;
1833                         }
1834                         TAILQ_REMOVE(&igb_flow_list,
1835                                  igb_flow_mem_ptr,
1836                                  entries);
1837                         rte_free(igb_flow_mem_ptr->flow);
1838                         rte_free(igb_flow_mem_ptr);
1839                 }
1840         }
1841 }
1842
1843 /*  Destroy all flow rules associated with a port on igb. */
1844 static int
1845 igb_flow_flush(struct rte_eth_dev *dev,
1846                 __rte_unused struct rte_flow_error *error)
1847 {
1848         igb_clear_all_ntuple_filter(dev);
1849         igb_clear_all_ethertype_filter(dev);
1850         igb_clear_syn_filter(dev);
1851         igb_clear_all_flex_filter(dev);
1852         igb_clear_rss_filter(dev);
1853         igb_filterlist_flush(dev);
1854
1855         return 0;
1856 }
1857
1858 const struct rte_flow_ops igb_flow_ops = {
1859         .validate = igb_flow_validate,
1860         .create = igb_flow_create,
1861         .destroy = igb_flow_destroy,
1862         .flush = igb_flow_flush,
1863 };