70cafeaa0e1df8cc52cf1b2b498c5c7e128ef296
[dpdk.git] / drivers / net / e1000 / igb_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2016 Intel Corporation
3  */
4
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <errno.h>
8 #include <stdint.h>
9 #include <stdarg.h>
10
11 #include <rte_common.h>
12 #include <rte_interrupts.h>
13 #include <rte_byteorder.h>
14 #include <rte_log.h>
15 #include <rte_debug.h>
16 #include <rte_pci.h>
17 #include <rte_ether.h>
18 #include <rte_ethdev.h>
19 #include <rte_ethdev_pci.h>
20 #include <rte_memory.h>
21 #include <rte_eal.h>
22 #include <rte_atomic.h>
23 #include <rte_malloc.h>
24 #include <rte_dev.h>
25 #include <rte_flow.h>
26 #include <rte_flow_driver.h>
27
28 #include "e1000_logs.h"
29 #include "base/e1000_api.h"
30 #include "e1000_ethdev.h"
31
32 #define NEXT_ITEM_OF_PATTERN(item, pattern, index)              \
33         do {                                                    \
34                 item = (pattern) + (index);                     \
35                 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) { \
36                 (index)++;                                      \
37                 item = (pattern) + (index);                     \
38                 }                                               \
39         } while (0)
40
41 #define NEXT_ITEM_OF_ACTION(act, actions, index)                \
42         do {                                                    \
43                 act = (actions) + (index);                      \
44                 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
45                 (index)++;                                      \
46                 act = (actions) + (index);                      \
47                 }                                               \
48         } while (0)
49
50 #define IGB_FLEX_RAW_NUM        12
51
52 /**
53  * Please aware there's an asumption for all the parsers.
54  * rte_flow_item is using big endian, rte_flow_attr and
55  * rte_flow_action are using CPU order.
56  * Because the pattern is used to describe the packets,
57  * normally the packets should use network order.
58  */
59
60 /**
61  * Parse the rule to see if it is a n-tuple rule.
62  * And get the n-tuple filter info BTW.
63  * pattern:
64  * The first not void item can be ETH or IPV4.
65  * The second not void item must be IPV4 if the first one is ETH.
66  * The third not void item must be UDP or TCP or SCTP
67  * The next not void item must be END.
68  * action:
69  * The first not void action should be QUEUE.
70  * The next not void action should be END.
71  * pattern example:
72  * ITEM         Spec                    Mask
73  * ETH          NULL                    NULL
74  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
75  *                      dst_addr 192.167.3.50   0xFFFFFFFF
76  *                      next_proto_id   17      0xFF
77  * UDP/TCP/     src_port        80      0xFFFF
78  * SCTP         dst_port        80      0xFFFF
79  * END
80  * other members in mask and spec should set to 0x00.
81  * item->last should be NULL.
82  */
83 static int
84 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
85                          const struct rte_flow_item pattern[],
86                          const struct rte_flow_action actions[],
87                          struct rte_eth_ntuple_filter *filter,
88                          struct rte_flow_error *error)
89 {
90         const struct rte_flow_item *item;
91         const struct rte_flow_action *act;
92         const struct rte_flow_item_ipv4 *ipv4_spec;
93         const struct rte_flow_item_ipv4 *ipv4_mask;
94         const struct rte_flow_item_tcp *tcp_spec;
95         const struct rte_flow_item_tcp *tcp_mask;
96         const struct rte_flow_item_udp *udp_spec;
97         const struct rte_flow_item_udp *udp_mask;
98         const struct rte_flow_item_sctp *sctp_spec;
99         const struct rte_flow_item_sctp *sctp_mask;
100         uint32_t index;
101
102         if (!pattern) {
103                 rte_flow_error_set(error,
104                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
105                         NULL, "NULL pattern.");
106                 return -rte_errno;
107         }
108
109         if (!actions) {
110                 rte_flow_error_set(error, EINVAL,
111                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
112                                    NULL, "NULL action.");
113                 return -rte_errno;
114         }
115         if (!attr) {
116                 rte_flow_error_set(error, EINVAL,
117                                    RTE_FLOW_ERROR_TYPE_ATTR,
118                                    NULL, "NULL attribute.");
119                 return -rte_errno;
120         }
121
122         /* parse pattern */
123         index = 0;
124
125         /* the first not void item can be MAC or IPv4 */
126         NEXT_ITEM_OF_PATTERN(item, pattern, index);
127
128         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
129             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
130                 rte_flow_error_set(error, EINVAL,
131                         RTE_FLOW_ERROR_TYPE_ITEM,
132                         item, "Not supported by ntuple filter");
133                 return -rte_errno;
134         }
135         /* Skip Ethernet */
136         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
137                 /*Not supported last point for range*/
138                 if (item->last) {
139                         rte_flow_error_set(error,
140                           EINVAL,
141                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
142                           item, "Not supported last point for range");
143                         return -rte_errno;
144                 }
145                 /* if the first item is MAC, the content should be NULL */
146                 if (item->spec || item->mask) {
147                         rte_flow_error_set(error, EINVAL,
148                                 RTE_FLOW_ERROR_TYPE_ITEM,
149                                 item, "Not supported by ntuple filter");
150                         return -rte_errno;
151                 }
152                 /* check if the next not void item is IPv4 */
153                 index++;
154                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
155                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
156                         rte_flow_error_set(error,
157                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
158                           item, "Not supported by ntuple filter");
159                         return -rte_errno;
160                 }
161         }
162
163         /* get the IPv4 info */
164         if (!item->spec || !item->mask) {
165                 rte_flow_error_set(error, EINVAL,
166                         RTE_FLOW_ERROR_TYPE_ITEM,
167                         item, "Invalid ntuple mask");
168                 return -rte_errno;
169         }
170         /* Not supported last point for range */
171         if (item->last) {
172                 rte_flow_error_set(error, EINVAL,
173                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
174                         item, "Not supported last point for range");
175                 return -rte_errno;
176         }
177
178         ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
179         /**
180          * Only support src & dst addresses, protocol,
181          * others should be masked.
182          */
183
184         if (ipv4_mask->hdr.version_ihl ||
185                 ipv4_mask->hdr.type_of_service ||
186                 ipv4_mask->hdr.total_length ||
187                 ipv4_mask->hdr.packet_id ||
188                 ipv4_mask->hdr.fragment_offset ||
189                 ipv4_mask->hdr.time_to_live ||
190                 ipv4_mask->hdr.hdr_checksum) {
191                 rte_flow_error_set(error,
192                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
193                         item, "Not supported by ntuple filter");
194                 return -rte_errno;
195         }
196
197         filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
198         filter->src_ip_mask = ipv4_mask->hdr.src_addr;
199         filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
200
201         ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
202         filter->dst_ip = ipv4_spec->hdr.dst_addr;
203         filter->src_ip = ipv4_spec->hdr.src_addr;
204         filter->proto  = ipv4_spec->hdr.next_proto_id;
205
206         /* check if the next not void item is TCP or UDP or SCTP */
207         index++;
208         NEXT_ITEM_OF_PATTERN(item, pattern, index);
209         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
210             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
211             item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
212                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
213                 rte_flow_error_set(error, EINVAL,
214                         RTE_FLOW_ERROR_TYPE_ITEM,
215                         item, "Not supported by ntuple filter");
216                 return -rte_errno;
217         }
218
219         /* Not supported last point for range */
220         if (item->last) {
221                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
222                 rte_flow_error_set(error, EINVAL,
223                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
224                         item, "Not supported last point for range");
225                 return -rte_errno;
226         }
227
228         /* get the TCP/UDP/SCTP info */
229         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
230                 if (item->spec && item->mask) {
231                         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
232
233                         /**
234                          * Only support src & dst ports, tcp flags,
235                          * others should be masked.
236                          */
237                         if (tcp_mask->hdr.sent_seq ||
238                                 tcp_mask->hdr.recv_ack ||
239                                 tcp_mask->hdr.data_off ||
240                                 tcp_mask->hdr.rx_win ||
241                                 tcp_mask->hdr.cksum ||
242                                 tcp_mask->hdr.tcp_urp) {
243                                 memset(filter, 0,
244                                         sizeof(struct rte_eth_ntuple_filter));
245                                 rte_flow_error_set(error, EINVAL,
246                                         RTE_FLOW_ERROR_TYPE_ITEM,
247                                         item, "Not supported by ntuple filter");
248                                 return -rte_errno;
249                         }
250
251                         filter->dst_port_mask  = tcp_mask->hdr.dst_port;
252                         filter->src_port_mask  = tcp_mask->hdr.src_port;
253                         if (tcp_mask->hdr.tcp_flags == 0xFF) {
254                                 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
255                         } else if (!tcp_mask->hdr.tcp_flags) {
256                                 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
257                         } else {
258                                 memset(filter, 0,
259                                         sizeof(struct rte_eth_ntuple_filter));
260                                 rte_flow_error_set(error, EINVAL,
261                                         RTE_FLOW_ERROR_TYPE_ITEM,
262                                         item, "Not supported by ntuple filter");
263                                 return -rte_errno;
264                         }
265
266                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
267                         filter->dst_port  = tcp_spec->hdr.dst_port;
268                         filter->src_port  = tcp_spec->hdr.src_port;
269                         filter->tcp_flags = tcp_spec->hdr.tcp_flags;
270                 }
271         } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
272                 if (item->spec && item->mask) {
273                         udp_mask = (const struct rte_flow_item_udp *)item->mask;
274
275                         /**
276                          * Only support src & dst ports,
277                          * others should be masked.
278                          */
279                         if (udp_mask->hdr.dgram_len ||
280                             udp_mask->hdr.dgram_cksum) {
281                                 memset(filter, 0,
282                                         sizeof(struct rte_eth_ntuple_filter));
283                                 rte_flow_error_set(error, EINVAL,
284                                         RTE_FLOW_ERROR_TYPE_ITEM,
285                                         item, "Not supported by ntuple filter");
286                                 return -rte_errno;
287                         }
288
289                         filter->dst_port_mask = udp_mask->hdr.dst_port;
290                         filter->src_port_mask = udp_mask->hdr.src_port;
291
292                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
293                         filter->dst_port = udp_spec->hdr.dst_port;
294                         filter->src_port = udp_spec->hdr.src_port;
295                 }
296         } else {
297                 if (item->spec && item->mask) {
298                         sctp_mask = (const struct rte_flow_item_sctp *)
299                                         item->mask;
300
301                         /**
302                          * Only support src & dst ports,
303                          * others should be masked.
304                          */
305                         if (sctp_mask->hdr.tag ||
306                             sctp_mask->hdr.cksum) {
307                                 memset(filter, 0,
308                                         sizeof(struct rte_eth_ntuple_filter));
309                                 rte_flow_error_set(error, EINVAL,
310                                         RTE_FLOW_ERROR_TYPE_ITEM,
311                                         item, "Not supported by ntuple filter");
312                                 return -rte_errno;
313                         }
314
315                         filter->dst_port_mask = sctp_mask->hdr.dst_port;
316                         filter->src_port_mask = sctp_mask->hdr.src_port;
317
318                         sctp_spec = (const struct rte_flow_item_sctp *)
319                                         item->spec;
320                         filter->dst_port = sctp_spec->hdr.dst_port;
321                         filter->src_port = sctp_spec->hdr.src_port;
322                 }
323         }
324         /* check if the next not void item is END */
325         index++;
326         NEXT_ITEM_OF_PATTERN(item, pattern, index);
327         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
328                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
329                 rte_flow_error_set(error, EINVAL,
330                         RTE_FLOW_ERROR_TYPE_ITEM,
331                         item, "Not supported by ntuple filter");
332                 return -rte_errno;
333         }
334
335         /* parse action */
336         index = 0;
337
338         /**
339          * n-tuple only supports forwarding,
340          * check if the first not void action is QUEUE.
341          */
342         NEXT_ITEM_OF_ACTION(act, actions, index);
343         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
344                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
345                 rte_flow_error_set(error, EINVAL,
346                         RTE_FLOW_ERROR_TYPE_ACTION,
347                         item, "Not supported action.");
348                 return -rte_errno;
349         }
350         filter->queue =
351                 ((const struct rte_flow_action_queue *)act->conf)->index;
352
353         /* check if the next not void item is END */
354         index++;
355         NEXT_ITEM_OF_ACTION(act, actions, index);
356         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
357                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
358                 rte_flow_error_set(error, EINVAL,
359                         RTE_FLOW_ERROR_TYPE_ACTION,
360                         act, "Not supported action.");
361                 return -rte_errno;
362         }
363
364         /* parse attr */
365         /* must be input direction */
366         if (!attr->ingress) {
367                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
368                 rte_flow_error_set(error, EINVAL,
369                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
370                                    attr, "Only support ingress.");
371                 return -rte_errno;
372         }
373
374         /* not supported */
375         if (attr->egress) {
376                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
377                 rte_flow_error_set(error, EINVAL,
378                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
379                                    attr, "Not support egress.");
380                 return -rte_errno;
381         }
382
383         if (attr->priority > 0xFFFF) {
384                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
385                 rte_flow_error_set(error, EINVAL,
386                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
387                                    attr, "Error priority.");
388                 return -rte_errno;
389         }
390         filter->priority = (uint16_t)attr->priority;
391
392         return 0;
393 }
394
395 /* a specific function for igb because the flags is specific */
396 static int
397 igb_parse_ntuple_filter(struct rte_eth_dev *dev,
398                           const struct rte_flow_attr *attr,
399                           const struct rte_flow_item pattern[],
400                           const struct rte_flow_action actions[],
401                           struct rte_eth_ntuple_filter *filter,
402                           struct rte_flow_error *error)
403 {
404         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
405         int ret;
406
407         MAC_TYPE_FILTER_SUP(hw->mac.type);
408
409         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
410
411         if (ret)
412                 return ret;
413
414         /* Igb doesn't support many priorities. */
415         if (filter->priority > E1000_2TUPLE_MAX_PRI) {
416                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
417                 rte_flow_error_set(error, EINVAL,
418                         RTE_FLOW_ERROR_TYPE_ITEM,
419                         NULL, "Priority not supported by ntuple filter");
420                 return -rte_errno;
421         }
422
423         if (hw->mac.type == e1000_82576) {
424                 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) {
425                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
426                         rte_flow_error_set(error, EINVAL,
427                                 RTE_FLOW_ERROR_TYPE_ITEM,
428                                 NULL, "queue number not "
429                                 "supported by ntuple filter");
430                         return -rte_errno;
431                 }
432                 filter->flags |= RTE_5TUPLE_FLAGS;
433         } else {
434                 if (filter->src_ip_mask || filter->dst_ip_mask ||
435                         filter->src_port_mask) {
436                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
437                         rte_flow_error_set(error, EINVAL,
438                                 RTE_FLOW_ERROR_TYPE_ITEM,
439                                 NULL, "only two tuple are "
440                                 "supported by this filter");
441                         return -rte_errno;
442                 }
443                 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
444                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
445                         rte_flow_error_set(error, EINVAL,
446                                 RTE_FLOW_ERROR_TYPE_ITEM,
447                                 NULL, "queue number not "
448                                 "supported by ntuple filter");
449                         return -rte_errno;
450                 }
451                 filter->flags |= RTE_2TUPLE_FLAGS;
452         }
453
454         return 0;
455 }
456
457 /**
458  * Parse the rule to see if it is a ethertype rule.
459  * And get the ethertype filter info BTW.
460  * pattern:
461  * The first not void item can be ETH.
462  * The next not void item must be END.
463  * action:
464  * The first not void action should be QUEUE.
465  * The next not void action should be END.
466  * pattern example:
467  * ITEM         Spec                    Mask
468  * ETH          type    0x0807          0xFFFF
469  * END
470  * other members in mask and spec should set to 0x00.
471  * item->last should be NULL.
472  */
473 static int
474 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
475                             const struct rte_flow_item *pattern,
476                             const struct rte_flow_action *actions,
477                             struct rte_eth_ethertype_filter *filter,
478                             struct rte_flow_error *error)
479 {
480         const struct rte_flow_item *item;
481         const struct rte_flow_action *act;
482         const struct rte_flow_item_eth *eth_spec;
483         const struct rte_flow_item_eth *eth_mask;
484         const struct rte_flow_action_queue *act_q;
485         uint32_t index;
486
487         if (!pattern) {
488                 rte_flow_error_set(error, EINVAL,
489                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
490                                 NULL, "NULL pattern.");
491                 return -rte_errno;
492         }
493
494         if (!actions) {
495                 rte_flow_error_set(error, EINVAL,
496                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
497                                 NULL, "NULL action.");
498                 return -rte_errno;
499         }
500
501         if (!attr) {
502                 rte_flow_error_set(error, EINVAL,
503                                    RTE_FLOW_ERROR_TYPE_ATTR,
504                                    NULL, "NULL attribute.");
505                 return -rte_errno;
506         }
507
508         /* Parse pattern */
509         index = 0;
510
511         /* The first non-void item should be MAC. */
512         NEXT_ITEM_OF_PATTERN(item, pattern, index);
513         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
514                 rte_flow_error_set(error, EINVAL,
515                         RTE_FLOW_ERROR_TYPE_ITEM,
516                         item, "Not supported by ethertype filter");
517                 return -rte_errno;
518         }
519
520         /*Not supported last point for range*/
521         if (item->last) {
522                 rte_flow_error_set(error, EINVAL,
523                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
524                         item, "Not supported last point for range");
525                 return -rte_errno;
526         }
527
528         /* Get the MAC info. */
529         if (!item->spec || !item->mask) {
530                 rte_flow_error_set(error, EINVAL,
531                                 RTE_FLOW_ERROR_TYPE_ITEM,
532                                 item, "Not supported by ethertype filter");
533                 return -rte_errno;
534         }
535
536         eth_spec = (const struct rte_flow_item_eth *)item->spec;
537         eth_mask = (const struct rte_flow_item_eth *)item->mask;
538
539         /* Mask bits of source MAC address must be full of 0.
540          * Mask bits of destination MAC address must be full
541          * of 1 or full of 0.
542          */
543         if (!is_zero_ether_addr(&eth_mask->src) ||
544             (!is_zero_ether_addr(&eth_mask->dst) &&
545              !is_broadcast_ether_addr(&eth_mask->dst))) {
546                 rte_flow_error_set(error, EINVAL,
547                                 RTE_FLOW_ERROR_TYPE_ITEM,
548                                 item, "Invalid ether address mask");
549                 return -rte_errno;
550         }
551
552         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
553                 rte_flow_error_set(error, EINVAL,
554                                 RTE_FLOW_ERROR_TYPE_ITEM,
555                                 item, "Invalid ethertype mask");
556                 return -rte_errno;
557         }
558
559         /* If mask bits of destination MAC address
560          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
561          */
562         if (is_broadcast_ether_addr(&eth_mask->dst)) {
563                 filter->mac_addr = eth_spec->dst;
564                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
565         } else {
566                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
567         }
568         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
569
570         /* Check if the next non-void item is END. */
571         index++;
572         NEXT_ITEM_OF_PATTERN(item, pattern, index);
573         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
574                 rte_flow_error_set(error, EINVAL,
575                                 RTE_FLOW_ERROR_TYPE_ITEM,
576                                 item, "Not supported by ethertype filter.");
577                 return -rte_errno;
578         }
579
580         /* Parse action */
581
582         index = 0;
583         /* Check if the first non-void action is QUEUE or DROP. */
584         NEXT_ITEM_OF_ACTION(act, actions, index);
585         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
586             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
587                 rte_flow_error_set(error, EINVAL,
588                                 RTE_FLOW_ERROR_TYPE_ACTION,
589                                 act, "Not supported action.");
590                 return -rte_errno;
591         }
592
593         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
594                 act_q = (const struct rte_flow_action_queue *)act->conf;
595                 filter->queue = act_q->index;
596         } else {
597                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
598         }
599
600         /* Check if the next non-void item is END */
601         index++;
602         NEXT_ITEM_OF_ACTION(act, actions, index);
603         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
604                 rte_flow_error_set(error, EINVAL,
605                                 RTE_FLOW_ERROR_TYPE_ACTION,
606                                 act, "Not supported action.");
607                 return -rte_errno;
608         }
609
610         /* Parse attr */
611         /* Must be input direction */
612         if (!attr->ingress) {
613                 rte_flow_error_set(error, EINVAL,
614                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
615                                 attr, "Only support ingress.");
616                 return -rte_errno;
617         }
618
619         /* Not supported */
620         if (attr->egress) {
621                 rte_flow_error_set(error, EINVAL,
622                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
623                                 attr, "Not support egress.");
624                 return -rte_errno;
625         }
626
627         /* Not supported */
628         if (attr->priority) {
629                 rte_flow_error_set(error, EINVAL,
630                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
631                                 attr, "Not support priority.");
632                 return -rte_errno;
633         }
634
635         /* Not supported */
636         if (attr->group) {
637                 rte_flow_error_set(error, EINVAL,
638                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
639                                 attr, "Not support group.");
640                 return -rte_errno;
641         }
642
643         return 0;
644 }
645
646 static int
647 igb_parse_ethertype_filter(struct rte_eth_dev *dev,
648                                  const struct rte_flow_attr *attr,
649                              const struct rte_flow_item pattern[],
650                              const struct rte_flow_action actions[],
651                              struct rte_eth_ethertype_filter *filter,
652                              struct rte_flow_error *error)
653 {
654         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
655         int ret;
656
657         MAC_TYPE_FILTER_SUP(hw->mac.type);
658
659         ret = cons_parse_ethertype_filter(attr, pattern,
660                                         actions, filter, error);
661
662         if (ret)
663                 return ret;
664
665         if (hw->mac.type == e1000_82576) {
666                 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) {
667                         memset(filter, 0, sizeof(
668                                         struct rte_eth_ethertype_filter));
669                         rte_flow_error_set(error, EINVAL,
670                                 RTE_FLOW_ERROR_TYPE_ITEM,
671                                 NULL, "queue number not supported "
672                                         "by ethertype filter");
673                         return -rte_errno;
674                 }
675         } else {
676                 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
677                         memset(filter, 0, sizeof(
678                                         struct rte_eth_ethertype_filter));
679                         rte_flow_error_set(error, EINVAL,
680                                 RTE_FLOW_ERROR_TYPE_ITEM,
681                                 NULL, "queue number not supported "
682                                         "by ethertype filter");
683                         return -rte_errno;
684                 }
685         }
686
687         if (filter->ether_type == ETHER_TYPE_IPv4 ||
688                 filter->ether_type == ETHER_TYPE_IPv6) {
689                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
690                 rte_flow_error_set(error, EINVAL,
691                         RTE_FLOW_ERROR_TYPE_ITEM,
692                         NULL, "IPv4/IPv6 not supported by ethertype filter");
693                 return -rte_errno;
694         }
695
696         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
697                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
698                 rte_flow_error_set(error, EINVAL,
699                         RTE_FLOW_ERROR_TYPE_ITEM,
700                         NULL, "mac compare is unsupported");
701                 return -rte_errno;
702         }
703
704         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
705                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
706                 rte_flow_error_set(error, EINVAL,
707                         RTE_FLOW_ERROR_TYPE_ITEM,
708                         NULL, "drop option is unsupported");
709                 return -rte_errno;
710         }
711
712         return 0;
713 }
714
715 /**
716  * Parse the rule to see if it is a TCP SYN rule.
717  * And get the TCP SYN filter info BTW.
718  * pattern:
719  * The first not void item must be ETH.
720  * The second not void item must be IPV4 or IPV6.
721  * The third not void item must be TCP.
722  * The next not void item must be END.
723  * action:
724  * The first not void action should be QUEUE.
725  * The next not void action should be END.
726  * pattern example:
727  * ITEM         Spec                    Mask
728  * ETH          NULL                    NULL
729  * IPV4/IPV6    NULL                    NULL
730  * TCP          tcp_flags       0x02    0xFF
731  * END
732  * other members in mask and spec should set to 0x00.
733  * item->last should be NULL.
734  */
735 static int
736 cons_parse_syn_filter(const struct rte_flow_attr *attr,
737                                 const struct rte_flow_item pattern[],
738                                 const struct rte_flow_action actions[],
739                                 struct rte_eth_syn_filter *filter,
740                                 struct rte_flow_error *error)
741 {
742         const struct rte_flow_item *item;
743         const struct rte_flow_action *act;
744         const struct rte_flow_item_tcp *tcp_spec;
745         const struct rte_flow_item_tcp *tcp_mask;
746         const struct rte_flow_action_queue *act_q;
747         uint32_t index;
748
749         if (!pattern) {
750                 rte_flow_error_set(error, EINVAL,
751                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
752                                 NULL, "NULL pattern.");
753                 return -rte_errno;
754         }
755
756         if (!actions) {
757                 rte_flow_error_set(error, EINVAL,
758                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
759                                 NULL, "NULL action.");
760                 return -rte_errno;
761         }
762
763         if (!attr) {
764                 rte_flow_error_set(error, EINVAL,
765                                    RTE_FLOW_ERROR_TYPE_ATTR,
766                                    NULL, "NULL attribute.");
767                 return -rte_errno;
768         }
769
770         /* parse pattern */
771         index = 0;
772
773         /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
774         NEXT_ITEM_OF_PATTERN(item, pattern, index);
775         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
776             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
777             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
778             item->type != RTE_FLOW_ITEM_TYPE_TCP) {
779                 rte_flow_error_set(error, EINVAL,
780                                 RTE_FLOW_ERROR_TYPE_ITEM,
781                                 item, "Not supported by syn filter");
782                 return -rte_errno;
783         }
784                 /*Not supported last point for range*/
785         if (item->last) {
786                 rte_flow_error_set(error, EINVAL,
787                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
788                         item, "Not supported last point for range");
789                 return -rte_errno;
790         }
791
792         /* Skip Ethernet */
793         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
794                 /* if the item is MAC, the content should be NULL */
795                 if (item->spec || item->mask) {
796                         rte_flow_error_set(error, EINVAL,
797                                 RTE_FLOW_ERROR_TYPE_ITEM,
798                                 item, "Invalid SYN address mask");
799                         return -rte_errno;
800                 }
801
802                 /* check if the next not void item is IPv4 or IPv6 */
803                 index++;
804                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
805                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
806                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
807                         rte_flow_error_set(error, EINVAL,
808                                 RTE_FLOW_ERROR_TYPE_ITEM,
809                                 item, "Not supported by syn filter");
810                         return -rte_errno;
811                 }
812         }
813
814         /* Skip IP */
815         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
816             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
817                 /* if the item is IP, the content should be NULL */
818                 if (item->spec || item->mask) {
819                         rte_flow_error_set(error, EINVAL,
820                                 RTE_FLOW_ERROR_TYPE_ITEM,
821                                 item, "Invalid SYN mask");
822                         return -rte_errno;
823                 }
824
825                 /* check if the next not void item is TCP */
826                 index++;
827                 NEXT_ITEM_OF_PATTERN(item, pattern, index);
828                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
829                         rte_flow_error_set(error, EINVAL,
830                                 RTE_FLOW_ERROR_TYPE_ITEM,
831                                 item, "Not supported by syn filter");
832                         return -rte_errno;
833                 }
834         }
835
836         /* Get the TCP info. Only support SYN. */
837         if (!item->spec || !item->mask) {
838                 rte_flow_error_set(error, EINVAL,
839                                 RTE_FLOW_ERROR_TYPE_ITEM,
840                                 item, "Invalid SYN mask");
841                 return -rte_errno;
842         }
843         /*Not supported last point for range*/
844         if (item->last) {
845                 rte_flow_error_set(error, EINVAL,
846                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
847                         item, "Not supported last point for range");
848                 return -rte_errno;
849         }
850
851         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
852         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
853         if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
854             tcp_mask->hdr.src_port ||
855             tcp_mask->hdr.dst_port ||
856             tcp_mask->hdr.sent_seq ||
857             tcp_mask->hdr.recv_ack ||
858             tcp_mask->hdr.data_off ||
859             tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
860             tcp_mask->hdr.rx_win ||
861             tcp_mask->hdr.cksum ||
862             tcp_mask->hdr.tcp_urp) {
863                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
864                 rte_flow_error_set(error, EINVAL,
865                                 RTE_FLOW_ERROR_TYPE_ITEM,
866                                 item, "Not supported by syn filter");
867                 return -rte_errno;
868         }
869
870         /* check if the next not void item is END */
871         index++;
872         NEXT_ITEM_OF_PATTERN(item, pattern, index);
873         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
874                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
875                 rte_flow_error_set(error, EINVAL,
876                                 RTE_FLOW_ERROR_TYPE_ITEM,
877                                 item, "Not supported by syn filter");
878                 return -rte_errno;
879         }
880
881         /* parse action */
882         index = 0;
883
884         /* check if the first not void action is QUEUE. */
885         NEXT_ITEM_OF_ACTION(act, actions, index);
886         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
887                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
888                 rte_flow_error_set(error, EINVAL,
889                                 RTE_FLOW_ERROR_TYPE_ACTION,
890                                 act, "Not supported action.");
891                 return -rte_errno;
892         }
893
894         act_q = (const struct rte_flow_action_queue *)act->conf;
895         filter->queue = act_q->index;
896
897         /* check if the next not void item is END */
898         index++;
899         NEXT_ITEM_OF_ACTION(act, actions, index);
900         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
901                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
902                 rte_flow_error_set(error, EINVAL,
903                                 RTE_FLOW_ERROR_TYPE_ACTION,
904                                 act, "Not supported action.");
905                 return -rte_errno;
906         }
907
908         /* parse attr */
909         /* must be input direction */
910         if (!attr->ingress) {
911                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
912                 rte_flow_error_set(error, EINVAL,
913                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
914                         attr, "Only support ingress.");
915                 return -rte_errno;
916         }
917
918         /* not supported */
919         if (attr->egress) {
920                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
921                 rte_flow_error_set(error, EINVAL,
922                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
923                         attr, "Not support egress.");
924                 return -rte_errno;
925         }
926
927         /* Support 2 priorities, the lowest or highest. */
928         if (!attr->priority) {
929                 filter->hig_pri = 0;
930         } else if (attr->priority == (uint32_t)~0U) {
931                 filter->hig_pri = 1;
932         } else {
933                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
934                 rte_flow_error_set(error, EINVAL,
935                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
936                         attr, "Not support priority.");
937                 return -rte_errno;
938         }
939
940         return 0;
941 }
942
943 static int
944 igb_parse_syn_filter(struct rte_eth_dev *dev,
945                                  const struct rte_flow_attr *attr,
946                              const struct rte_flow_item pattern[],
947                              const struct rte_flow_action actions[],
948                              struct rte_eth_syn_filter *filter,
949                              struct rte_flow_error *error)
950 {
951         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
952         int ret;
953
954         MAC_TYPE_FILTER_SUP(hw->mac.type);
955
956         ret = cons_parse_syn_filter(attr, pattern,
957                                         actions, filter, error);
958
959         if (hw->mac.type == e1000_82576) {
960                 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM_82576) {
961                         memset(filter, 0, sizeof(struct rte_eth_syn_filter));
962                         rte_flow_error_set(error, EINVAL,
963                                 RTE_FLOW_ERROR_TYPE_ITEM,
964                                 NULL, "queue number not "
965                                         "supported by syn filter");
966                         return -rte_errno;
967                 }
968         } else {
969                 if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
970                         memset(filter, 0, sizeof(struct rte_eth_syn_filter));
971                         rte_flow_error_set(error, EINVAL,
972                                 RTE_FLOW_ERROR_TYPE_ITEM,
973                                 NULL, "queue number not "
974                                         "supported by syn filter");
975                         return -rte_errno;
976                 }
977         }
978
979         if (ret)
980                 return ret;
981
982         return 0;
983 }
984
985 /**
986  * Parse the rule to see if it is a flex byte rule.
987  * And get the flex byte filter info BTW.
988  * pattern:
989  * The first not void item must be RAW.
990  * The second not void item can be RAW or END.
991  * The third not void item can be RAW or END.
992  * The last not void item must be END.
993  * action:
994  * The first not void action should be QUEUE.
995  * The next not void action should be END.
996  * pattern example:
997  * ITEM         Spec                    Mask
998  * RAW          relative        0               0x1
999  *                      offset  0               0xFFFFFFFF
1000  *                      pattern {0x08, 0x06}            {0xFF, 0xFF}
1001  * RAW          relative        1               0x1
1002  *                      offset  100             0xFFFFFFFF
1003  *                      pattern {0x11, 0x22, 0x33}      {0xFF, 0xFF, 0xFF}
1004  * END
1005  * other members in mask and spec should set to 0x00.
1006  * item->last should be NULL.
1007  */
1008 static int
1009 cons_parse_flex_filter(const struct rte_flow_attr *attr,
1010                                 const struct rte_flow_item pattern[],
1011                                 const struct rte_flow_action actions[],
1012                                 struct rte_eth_flex_filter *filter,
1013                                 struct rte_flow_error *error)
1014 {
1015         const struct rte_flow_item *item;
1016         const struct rte_flow_action *act;
1017         const struct rte_flow_item_raw *raw_spec;
1018         const struct rte_flow_item_raw *raw_mask;
1019         const struct rte_flow_action_queue *act_q;
1020         uint32_t index, i, offset, total_offset;
1021         uint32_t max_offset = 0;
1022         int32_t shift, j, raw_index = 0;
1023         int32_t relative[IGB_FLEX_RAW_NUM] = {0};
1024         int32_t raw_offset[IGB_FLEX_RAW_NUM] = {0};
1025
1026         if (!pattern) {
1027                 rte_flow_error_set(error, EINVAL,
1028                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1029                                 NULL, "NULL pattern.");
1030                 return -rte_errno;
1031         }
1032
1033         if (!actions) {
1034                 rte_flow_error_set(error, EINVAL,
1035                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1036                                 NULL, "NULL action.");
1037                 return -rte_errno;
1038         }
1039
1040         if (!attr) {
1041                 rte_flow_error_set(error, EINVAL,
1042                                    RTE_FLOW_ERROR_TYPE_ATTR,
1043                                    NULL, "NULL attribute.");
1044                 return -rte_errno;
1045         }
1046
1047         /* parse pattern */
1048         index = 0;
1049
1050 item_loop:
1051
1052         /* the first not void item should be RAW */
1053         NEXT_ITEM_OF_PATTERN(item, pattern, index);
1054         if (item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1055                 rte_flow_error_set(error, EINVAL,
1056                                 RTE_FLOW_ERROR_TYPE_ITEM,
1057                                 item, "Not supported by flex filter");
1058                 return -rte_errno;
1059         }
1060                 /*Not supported last point for range*/
1061         if (item->last) {
1062                 rte_flow_error_set(error, EINVAL,
1063                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1064                         item, "Not supported last point for range");
1065                 return -rte_errno;
1066         }
1067
1068         raw_spec = (const struct rte_flow_item_raw *)item->spec;
1069         raw_mask = (const struct rte_flow_item_raw *)item->mask;
1070
1071         if (!raw_mask->length ||
1072             !raw_mask->relative) {
1073                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1074                 rte_flow_error_set(error, EINVAL,
1075                                 RTE_FLOW_ERROR_TYPE_ITEM,
1076                                 item, "Not supported by flex filter");
1077                 return -rte_errno;
1078         }
1079
1080         if (raw_mask->offset)
1081                 offset = raw_spec->offset;
1082         else
1083                 offset = 0;
1084
1085         for (j = 0; j < raw_spec->length; j++) {
1086                 if (raw_mask->pattern[j] != 0xFF) {
1087                         memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1088                         rte_flow_error_set(error, EINVAL,
1089                                         RTE_FLOW_ERROR_TYPE_ITEM,
1090                                         item, "Not supported by flex filter");
1091                         return -rte_errno;
1092                 }
1093         }
1094
1095         total_offset = 0;
1096
1097         if (raw_spec->relative) {
1098                 for (j = raw_index; j > 0; j--) {
1099                         total_offset += raw_offset[j - 1];
1100                         if (!relative[j - 1])
1101                                 break;
1102                 }
1103                 if (total_offset + raw_spec->length + offset > max_offset)
1104                         max_offset = total_offset + raw_spec->length + offset;
1105         } else {
1106                 if (raw_spec->length + offset > max_offset)
1107                         max_offset = raw_spec->length + offset;
1108         }
1109
1110         if ((raw_spec->length + offset + total_offset) >
1111                         RTE_FLEX_FILTER_MAXLEN) {
1112                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1113                 rte_flow_error_set(error, EINVAL,
1114                                 RTE_FLOW_ERROR_TYPE_ITEM,
1115                                 item, "Not supported by flex filter");
1116                 return -rte_errno;
1117         }
1118
1119         if (raw_spec->relative == 0) {
1120                 for (j = 0; j < raw_spec->length; j++)
1121                         filter->bytes[offset + j] =
1122                         raw_spec->pattern[j];
1123                 j = offset / CHAR_BIT;
1124                 shift = offset % CHAR_BIT;
1125         } else {
1126                 for (j = 0; j < raw_spec->length; j++)
1127                         filter->bytes[total_offset + offset + j] =
1128                                 raw_spec->pattern[j];
1129                 j = (total_offset + offset) / CHAR_BIT;
1130                 shift = (total_offset + offset) % CHAR_BIT;
1131         }
1132
1133         i = 0;
1134
1135         for ( ; shift < CHAR_BIT; shift++) {
1136                 filter->mask[j] |= (0x80 >> shift);
1137                 i++;
1138                 if (i == raw_spec->length)
1139                         break;
1140                 if (shift == (CHAR_BIT - 1)) {
1141                         j++;
1142                         shift = -1;
1143                 }
1144         }
1145
1146         relative[raw_index] = raw_spec->relative;
1147         raw_offset[raw_index] = offset + raw_spec->length;
1148         raw_index++;
1149
1150         /* check if the next not void item is RAW */
1151         index++;
1152         NEXT_ITEM_OF_PATTERN(item, pattern, index);
1153         if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1154                 item->type != RTE_FLOW_ITEM_TYPE_END) {
1155                 rte_flow_error_set(error, EINVAL,
1156                                 RTE_FLOW_ERROR_TYPE_ITEM,
1157                                 item, "Not supported by flex filter");
1158                 return -rte_errno;
1159         }
1160
1161         /* go back to parser */
1162         if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
1163                 /* if the item is RAW, the content should be parse */
1164                 goto item_loop;
1165         }
1166
1167         filter->len = RTE_ALIGN(max_offset, 8);
1168
1169         /* parse action */
1170         index = 0;
1171
1172         /* check if the first not void action is QUEUE. */
1173         NEXT_ITEM_OF_ACTION(act, actions, index);
1174         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1175                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1176                 rte_flow_error_set(error, EINVAL,
1177                                 RTE_FLOW_ERROR_TYPE_ACTION,
1178                                 act, "Not supported action.");
1179                 return -rte_errno;
1180         }
1181
1182         act_q = (const struct rte_flow_action_queue *)act->conf;
1183         filter->queue = act_q->index;
1184
1185         /* check if the next not void item is END */
1186         index++;
1187         NEXT_ITEM_OF_ACTION(act, actions, index);
1188         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1189                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1190                 rte_flow_error_set(error, EINVAL,
1191                                 RTE_FLOW_ERROR_TYPE_ACTION,
1192                                 act, "Not supported action.");
1193                 return -rte_errno;
1194         }
1195
1196         /* parse attr */
1197         /* must be input direction */
1198         if (!attr->ingress) {
1199                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1200                 rte_flow_error_set(error, EINVAL,
1201                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1202                         attr, "Only support ingress.");
1203                 return -rte_errno;
1204         }
1205
1206         /* not supported */
1207         if (attr->egress) {
1208                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1209                 rte_flow_error_set(error, EINVAL,
1210                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1211                         attr, "Not support egress.");
1212                 return -rte_errno;
1213         }
1214
1215         if (attr->priority > 0xFFFF) {
1216                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1217                 rte_flow_error_set(error, EINVAL,
1218                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1219                                    attr, "Error priority.");
1220                 return -rte_errno;
1221         }
1222
1223         filter->priority = (uint16_t)attr->priority;
1224
1225         return 0;
1226 }
1227
1228 static int
1229 igb_parse_flex_filter(struct rte_eth_dev *dev,
1230                                  const struct rte_flow_attr *attr,
1231                              const struct rte_flow_item pattern[],
1232                              const struct rte_flow_action actions[],
1233                              struct rte_eth_flex_filter *filter,
1234                              struct rte_flow_error *error)
1235 {
1236         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1237         int ret;
1238
1239         MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
1240
1241         ret = cons_parse_flex_filter(attr, pattern,
1242                                         actions, filter, error);
1243
1244         if (filter->queue >= IGB_MAX_RX_QUEUE_NUM) {
1245                 memset(filter, 0, sizeof(struct rte_eth_flex_filter));
1246                 rte_flow_error_set(error, EINVAL,
1247                         RTE_FLOW_ERROR_TYPE_ITEM,
1248                         NULL, "queue number not supported by flex filter");
1249                 return -rte_errno;
1250         }
1251
1252         if (filter->len == 0 || filter->len > E1000_MAX_FLEX_FILTER_LEN ||
1253                 filter->len % sizeof(uint64_t) != 0) {
1254                 PMD_DRV_LOG(ERR, "filter's length is out of range");
1255                 return -EINVAL;
1256         }
1257
1258         if (filter->priority > E1000_MAX_FLEX_FILTER_PRI) {
1259                 PMD_DRV_LOG(ERR, "filter's priority is out of range");
1260                 return -EINVAL;
1261         }
1262
1263         if (ret)
1264                 return ret;
1265
1266         return 0;
1267 }
1268
1269 /**
1270  * Create a flow rule.
1271  * Theorically one rule can match more than one filters.
1272  * We will let it use the filter which it hitt first.
1273  * So, the sequence matters.
1274  */
1275 static struct rte_flow *
1276 igb_flow_create(struct rte_eth_dev *dev,
1277                   const struct rte_flow_attr *attr,
1278                   const struct rte_flow_item pattern[],
1279                   const struct rte_flow_action actions[],
1280                   struct rte_flow_error *error)
1281 {
1282         int ret;
1283         struct rte_eth_ntuple_filter ntuple_filter;
1284         struct rte_eth_ethertype_filter ethertype_filter;
1285         struct rte_eth_syn_filter syn_filter;
1286         struct rte_eth_flex_filter flex_filter;
1287         struct rte_flow *flow = NULL;
1288         struct igb_ntuple_filter_ele *ntuple_filter_ptr;
1289         struct igb_ethertype_filter_ele *ethertype_filter_ptr;
1290         struct igb_eth_syn_filter_ele *syn_filter_ptr;
1291         struct igb_flex_filter_ele *flex_filter_ptr;
1292         struct igb_flow_mem *igb_flow_mem_ptr;
1293
1294         flow = rte_zmalloc("igb_rte_flow", sizeof(struct rte_flow), 0);
1295         if (!flow) {
1296                 PMD_DRV_LOG(ERR, "failed to allocate memory");
1297                 return (struct rte_flow *)flow;
1298         }
1299         igb_flow_mem_ptr = rte_zmalloc("igb_flow_mem",
1300                         sizeof(struct igb_flow_mem), 0);
1301         if (!igb_flow_mem_ptr) {
1302                 PMD_DRV_LOG(ERR, "failed to allocate memory");
1303                 rte_free(flow);
1304                 return NULL;
1305         }
1306         igb_flow_mem_ptr->flow = flow;
1307         igb_flow_mem_ptr->dev = dev;
1308         TAILQ_INSERT_TAIL(&igb_flow_list,
1309                                 igb_flow_mem_ptr, entries);
1310
1311         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
1312         ret = igb_parse_ntuple_filter(dev, attr, pattern,
1313                         actions, &ntuple_filter, error);
1314         if (!ret) {
1315                 ret = igb_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
1316                 if (!ret) {
1317                         ntuple_filter_ptr = rte_zmalloc("igb_ntuple_filter",
1318                                 sizeof(struct igb_ntuple_filter_ele), 0);
1319                         rte_memcpy(&ntuple_filter_ptr->filter_info,
1320                                 &ntuple_filter,
1321                                 sizeof(struct rte_eth_ntuple_filter));
1322                         TAILQ_INSERT_TAIL(&igb_filter_ntuple_list,
1323                                 ntuple_filter_ptr, entries);
1324                         flow->rule = ntuple_filter_ptr;
1325                         flow->filter_type = RTE_ETH_FILTER_NTUPLE;
1326                         return flow;
1327                 }
1328                 goto out;
1329         }
1330
1331         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
1332         ret = igb_parse_ethertype_filter(dev, attr, pattern,
1333                                 actions, &ethertype_filter, error);
1334         if (!ret) {
1335                 ret = igb_add_del_ethertype_filter(dev,
1336                                 &ethertype_filter, TRUE);
1337                 if (!ret) {
1338                         ethertype_filter_ptr = rte_zmalloc(
1339                                 "igb_ethertype_filter",
1340                                 sizeof(struct igb_ethertype_filter_ele), 0);
1341                         rte_memcpy(&ethertype_filter_ptr->filter_info,
1342                                 &ethertype_filter,
1343                                 sizeof(struct rte_eth_ethertype_filter));
1344                         TAILQ_INSERT_TAIL(&igb_filter_ethertype_list,
1345                                 ethertype_filter_ptr, entries);
1346                         flow->rule = ethertype_filter_ptr;
1347                         flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
1348                         return flow;
1349                 }
1350                 goto out;
1351         }
1352
1353         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
1354         ret = igb_parse_syn_filter(dev, attr, pattern,
1355                                 actions, &syn_filter, error);
1356         if (!ret) {
1357                 ret = eth_igb_syn_filter_set(dev, &syn_filter, TRUE);
1358                 if (!ret) {
1359                         syn_filter_ptr = rte_zmalloc("igb_syn_filter",
1360                                 sizeof(struct igb_eth_syn_filter_ele), 0);
1361                         rte_memcpy(&syn_filter_ptr->filter_info,
1362                                 &syn_filter,
1363                                 sizeof(struct rte_eth_syn_filter));
1364                         TAILQ_INSERT_TAIL(&igb_filter_syn_list,
1365                                 syn_filter_ptr,
1366                                 entries);
1367                         flow->rule = syn_filter_ptr;
1368                         flow->filter_type = RTE_ETH_FILTER_SYN;
1369                         return flow;
1370                 }
1371                 goto out;
1372         }
1373
1374         memset(&flex_filter, 0, sizeof(struct rte_eth_flex_filter));
1375         ret = igb_parse_flex_filter(dev, attr, pattern,
1376                                         actions, &flex_filter, error);
1377         if (!ret) {
1378                 ret = eth_igb_add_del_flex_filter(dev, &flex_filter, TRUE);
1379                 if (!ret) {
1380                         flex_filter_ptr = rte_zmalloc("igb_flex_filter",
1381                                 sizeof(struct igb_flex_filter_ele), 0);
1382                         rte_memcpy(&flex_filter_ptr->filter_info,
1383                                 &flex_filter,
1384                                 sizeof(struct rte_eth_flex_filter));
1385                         TAILQ_INSERT_TAIL(&igb_filter_flex_list,
1386                                 flex_filter_ptr, entries);
1387                         flow->rule = flex_filter_ptr;
1388                         flow->filter_type = RTE_ETH_FILTER_FLEXIBLE;
1389                         return flow;
1390                 }
1391         }
1392
1393 out:
1394         TAILQ_REMOVE(&igb_flow_list,
1395                 igb_flow_mem_ptr, entries);
1396         rte_flow_error_set(error, -ret,
1397                            RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1398                            "Failed to create flow.");
1399         rte_free(igb_flow_mem_ptr);
1400         rte_free(flow);
1401         return NULL;
1402 }
1403
1404 /**
1405  * Check if the flow rule is supported by igb.
1406  * It only checkes the format. Don't guarantee the rule can be programmed into
1407  * the HW. Because there can be no enough room for the rule.
1408  */
1409 static int
1410 igb_flow_validate(__rte_unused struct rte_eth_dev *dev,
1411                 const struct rte_flow_attr *attr,
1412                 const struct rte_flow_item pattern[],
1413                 const struct rte_flow_action actions[],
1414                 struct rte_flow_error *error)
1415 {
1416         struct rte_eth_ntuple_filter ntuple_filter;
1417         struct rte_eth_ethertype_filter ethertype_filter;
1418         struct rte_eth_syn_filter syn_filter;
1419         struct rte_eth_flex_filter flex_filter;
1420         int ret;
1421
1422         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
1423         ret = igb_parse_ntuple_filter(dev, attr, pattern,
1424                                 actions, &ntuple_filter, error);
1425         if (!ret)
1426                 return 0;
1427
1428         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
1429         ret = igb_parse_ethertype_filter(dev, attr, pattern,
1430                                 actions, &ethertype_filter, error);
1431         if (!ret)
1432                 return 0;
1433
1434         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
1435         ret = igb_parse_syn_filter(dev, attr, pattern,
1436                                 actions, &syn_filter, error);
1437         if (!ret)
1438                 return 0;
1439
1440         memset(&flex_filter, 0, sizeof(struct rte_eth_flex_filter));
1441         ret = igb_parse_flex_filter(dev, attr, pattern,
1442                                 actions, &flex_filter, error);
1443
1444         return ret;
1445 }
1446
1447 /* Destroy a flow rule on igb. */
1448 static int
1449 igb_flow_destroy(struct rte_eth_dev *dev,
1450                 struct rte_flow *flow,
1451                 struct rte_flow_error *error)
1452 {
1453         int ret;
1454         struct rte_flow *pmd_flow = flow;
1455         enum rte_filter_type filter_type = pmd_flow->filter_type;
1456         struct igb_ntuple_filter_ele *ntuple_filter_ptr;
1457         struct igb_ethertype_filter_ele *ethertype_filter_ptr;
1458         struct igb_eth_syn_filter_ele *syn_filter_ptr;
1459         struct igb_flex_filter_ele *flex_filter_ptr;
1460         struct igb_flow_mem *igb_flow_mem_ptr;
1461
1462         switch (filter_type) {
1463         case RTE_ETH_FILTER_NTUPLE:
1464                 ntuple_filter_ptr = (struct igb_ntuple_filter_ele *)
1465                                         pmd_flow->rule;
1466                 ret = igb_add_del_ntuple_filter(dev,
1467                                 &ntuple_filter_ptr->filter_info, FALSE);
1468                 if (!ret) {
1469                         TAILQ_REMOVE(&igb_filter_ntuple_list,
1470                         ntuple_filter_ptr, entries);
1471                         rte_free(ntuple_filter_ptr);
1472                 }
1473                 break;
1474         case RTE_ETH_FILTER_ETHERTYPE:
1475                 ethertype_filter_ptr = (struct igb_ethertype_filter_ele *)
1476                                         pmd_flow->rule;
1477                 ret = igb_add_del_ethertype_filter(dev,
1478                                 &ethertype_filter_ptr->filter_info, FALSE);
1479                 if (!ret) {
1480                         TAILQ_REMOVE(&igb_filter_ethertype_list,
1481                                 ethertype_filter_ptr, entries);
1482                         rte_free(ethertype_filter_ptr);
1483                 }
1484                 break;
1485         case RTE_ETH_FILTER_SYN:
1486                 syn_filter_ptr = (struct igb_eth_syn_filter_ele *)
1487                                 pmd_flow->rule;
1488                 ret = eth_igb_syn_filter_set(dev,
1489                                 &syn_filter_ptr->filter_info, FALSE);
1490                 if (!ret) {
1491                         TAILQ_REMOVE(&igb_filter_syn_list,
1492                                 syn_filter_ptr, entries);
1493                         rte_free(syn_filter_ptr);
1494                 }
1495                 break;
1496         case RTE_ETH_FILTER_FLEXIBLE:
1497                 flex_filter_ptr = (struct igb_flex_filter_ele *)
1498                                 pmd_flow->rule;
1499                 ret = eth_igb_add_del_flex_filter(dev,
1500                                 &flex_filter_ptr->filter_info, FALSE);
1501                 if (!ret) {
1502                         TAILQ_REMOVE(&igb_filter_flex_list,
1503                                 flex_filter_ptr, entries);
1504                         rte_free(flex_filter_ptr);
1505                 }
1506                 break;
1507         default:
1508                 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
1509                             filter_type);
1510                 ret = -EINVAL;
1511                 break;
1512         }
1513
1514         if (ret) {
1515                 rte_flow_error_set(error, EINVAL,
1516                                 RTE_FLOW_ERROR_TYPE_HANDLE,
1517                                 NULL, "Failed to destroy flow");
1518                 return ret;
1519         }
1520
1521         TAILQ_FOREACH(igb_flow_mem_ptr, &igb_flow_list, entries) {
1522                 if (igb_flow_mem_ptr->flow == pmd_flow) {
1523                         TAILQ_REMOVE(&igb_flow_list,
1524                                 igb_flow_mem_ptr, entries);
1525                         rte_free(igb_flow_mem_ptr);
1526                 }
1527         }
1528         rte_free(flow);
1529
1530         return ret;
1531 }
1532
1533 /* remove all the n-tuple filters */
1534 static void
1535 igb_clear_all_ntuple_filter(struct rte_eth_dev *dev)
1536 {
1537         struct e1000_filter_info *filter_info =
1538                 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1539         struct e1000_5tuple_filter *p_5tuple;
1540         struct e1000_2tuple_filter *p_2tuple;
1541
1542         while ((p_5tuple = TAILQ_FIRST(&filter_info->fivetuple_list)))
1543                 igb_delete_5tuple_filter_82576(dev, p_5tuple);
1544
1545         while ((p_2tuple = TAILQ_FIRST(&filter_info->twotuple_list)))
1546                 igb_delete_2tuple_filter(dev, p_2tuple);
1547 }
1548
1549 /* remove all the ether type filters */
1550 static void
1551 igb_clear_all_ethertype_filter(struct rte_eth_dev *dev)
1552 {
1553         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1554         struct e1000_filter_info *filter_info =
1555                 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1556         int i;
1557
1558         for (i = 0; i < E1000_MAX_ETQF_FILTERS; i++) {
1559                 if (filter_info->ethertype_mask & (1 << i)) {
1560                         (void)igb_ethertype_filter_remove(filter_info,
1561                                                             (uint8_t)i);
1562                         E1000_WRITE_REG(hw, E1000_ETQF(i), 0);
1563                         E1000_WRITE_FLUSH(hw);
1564                 }
1565         }
1566 }
1567
1568 /* remove the SYN filter */
1569 static void
1570 igb_clear_syn_filter(struct rte_eth_dev *dev)
1571 {
1572         struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1573         struct e1000_filter_info *filter_info =
1574                 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1575
1576         if (filter_info->syn_info & E1000_SYN_FILTER_ENABLE) {
1577                 filter_info->syn_info = 0;
1578                 E1000_WRITE_REG(hw, E1000_SYNQF(0), 0);
1579                 E1000_WRITE_FLUSH(hw);
1580         }
1581 }
1582
1583 /* remove all the flex filters */
1584 static void
1585 igb_clear_all_flex_filter(struct rte_eth_dev *dev)
1586 {
1587         struct e1000_filter_info *filter_info =
1588                 E1000_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
1589         struct e1000_flex_filter *flex_filter;
1590
1591         while ((flex_filter = TAILQ_FIRST(&filter_info->flex_list)))
1592                 igb_remove_flex_filter(dev, flex_filter);
1593 }
1594
1595 void
1596 igb_filterlist_flush(struct rte_eth_dev *dev)
1597 {
1598         struct igb_ntuple_filter_ele *ntuple_filter_ptr;
1599         struct igb_ethertype_filter_ele *ethertype_filter_ptr;
1600         struct igb_eth_syn_filter_ele *syn_filter_ptr;
1601         struct igb_flex_filter_ele *flex_filter_ptr;
1602         struct igb_flow_mem *igb_flow_mem_ptr;
1603         enum rte_filter_type filter_type;
1604         struct rte_flow *pmd_flow;
1605
1606         TAILQ_FOREACH(igb_flow_mem_ptr, &igb_flow_list, entries) {
1607                 if (igb_flow_mem_ptr->dev == dev) {
1608                         pmd_flow = igb_flow_mem_ptr->flow;
1609                         filter_type = pmd_flow->filter_type;
1610
1611                         switch (filter_type) {
1612                         case RTE_ETH_FILTER_NTUPLE:
1613                                 ntuple_filter_ptr =
1614                                 (struct igb_ntuple_filter_ele *)
1615                                         pmd_flow->rule;
1616                                 TAILQ_REMOVE(&igb_filter_ntuple_list,
1617                                                 ntuple_filter_ptr, entries);
1618                                 rte_free(ntuple_filter_ptr);
1619                                 break;
1620                         case RTE_ETH_FILTER_ETHERTYPE:
1621                                 ethertype_filter_ptr =
1622                                 (struct igb_ethertype_filter_ele *)
1623                                         pmd_flow->rule;
1624                                 TAILQ_REMOVE(&igb_filter_ethertype_list,
1625                                                 ethertype_filter_ptr, entries);
1626                                 rte_free(ethertype_filter_ptr);
1627                                 break;
1628                         case RTE_ETH_FILTER_SYN:
1629                                 syn_filter_ptr =
1630                                         (struct igb_eth_syn_filter_ele *)
1631                                                 pmd_flow->rule;
1632                                 TAILQ_REMOVE(&igb_filter_syn_list,
1633                                                 syn_filter_ptr, entries);
1634                                 rte_free(syn_filter_ptr);
1635                                 break;
1636                         case RTE_ETH_FILTER_FLEXIBLE:
1637                                 flex_filter_ptr =
1638                                         (struct igb_flex_filter_ele *)
1639                                                 pmd_flow->rule;
1640                                 TAILQ_REMOVE(&igb_filter_flex_list,
1641                                                 flex_filter_ptr, entries);
1642                                 rte_free(flex_filter_ptr);
1643                                 break;
1644                         default:
1645                                 PMD_DRV_LOG(WARNING, "Filter type"
1646                                         "(%d) not supported", filter_type);
1647                                 break;
1648                         }
1649                         TAILQ_REMOVE(&igb_flow_list,
1650                                  igb_flow_mem_ptr,
1651                                  entries);
1652                         rte_free(igb_flow_mem_ptr->flow);
1653                         rte_free(igb_flow_mem_ptr);
1654                 }
1655         }
1656 }
1657
1658 /*  Destroy all flow rules associated with a port on igb. */
1659 static int
1660 igb_flow_flush(struct rte_eth_dev *dev,
1661                 __rte_unused struct rte_flow_error *error)
1662 {
1663         igb_clear_all_ntuple_filter(dev);
1664         igb_clear_all_ethertype_filter(dev);
1665         igb_clear_syn_filter(dev);
1666         igb_clear_all_flex_filter(dev);
1667         igb_filterlist_flush(dev);
1668
1669         return 0;
1670 }
1671
1672 const struct rte_flow_ops igb_flow_ops = {
1673         .validate = igb_flow_validate,
1674         .create = igb_flow_create,
1675         .destroy = igb_flow_destroy,
1676         .flush = igb_flow_flush,
1677 };