net/txgbe: support syn filter add and delete
[dpdk.git] / drivers / net / txgbe / txgbe_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020
3  */
4
5 #include <sys/queue.h>
6 #include <rte_flow.h>
7 #include <rte_flow_driver.h>
8
9 #include "txgbe_ethdev.h"
10
11 #define TXGBE_MIN_N_TUPLE_PRIO 1
12 #define TXGBE_MAX_N_TUPLE_PRIO 7
13
14 /**
15  * Endless loop will never happen with below assumption
16  * 1. there is at least one no-void item(END)
17  * 2. cur is before END.
18  */
19 static inline
20 const struct rte_flow_item *next_no_void_pattern(
21                 const struct rte_flow_item pattern[],
22                 const struct rte_flow_item *cur)
23 {
24         const struct rte_flow_item *next =
25                 cur ? cur + 1 : &pattern[0];
26         while (1) {
27                 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
28                         return next;
29                 next++;
30         }
31 }
32
33 static inline
34 const struct rte_flow_action *next_no_void_action(
35                 const struct rte_flow_action actions[],
36                 const struct rte_flow_action *cur)
37 {
38         const struct rte_flow_action *next =
39                 cur ? cur + 1 : &actions[0];
40         while (1) {
41                 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
42                         return next;
43                 next++;
44         }
45 }
46
47 /**
48  * Please aware there's an assumption for all the parsers.
49  * rte_flow_item is using big endian, rte_flow_attr and
50  * rte_flow_action are using CPU order.
51  * Because the pattern is used to describe the packets,
52  * normally the packets should use network order.
53  */
54
55 /**
56  * Parse the rule to see if it is a n-tuple rule.
57  * And get the n-tuple filter info BTW.
58  * pattern:
59  * The first not void item can be ETH or IPV4.
60  * The second not void item must be IPV4 if the first one is ETH.
61  * The third not void item must be UDP or TCP.
62  * The next not void item must be END.
63  * action:
64  * The first not void action should be QUEUE.
65  * The next not void action should be END.
66  * pattern example:
67  * ITEM         Spec                    Mask
68  * ETH          NULL                    NULL
69  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
70  *              dst_addr 192.167.3.50   0xFFFFFFFF
71  *              next_proto_id   17      0xFF
72  * UDP/TCP/     src_port        80      0xFFFF
73  * SCTP         dst_port        80      0xFFFF
74  * END
75  * other members in mask and spec should set to 0x00.
76  * item->last should be NULL.
77  */
78 static int
79 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
80                          const struct rte_flow_item pattern[],
81                          const struct rte_flow_action actions[],
82                          struct rte_eth_ntuple_filter *filter,
83                          struct rte_flow_error *error)
84 {
85         const struct rte_flow_item *item;
86         const struct rte_flow_action *act;
87         const struct rte_flow_item_ipv4 *ipv4_spec;
88         const struct rte_flow_item_ipv4 *ipv4_mask;
89         const struct rte_flow_item_tcp *tcp_spec;
90         const struct rte_flow_item_tcp *tcp_mask;
91         const struct rte_flow_item_udp *udp_spec;
92         const struct rte_flow_item_udp *udp_mask;
93         const struct rte_flow_item_sctp *sctp_spec;
94         const struct rte_flow_item_sctp *sctp_mask;
95         const struct rte_flow_item_eth *eth_spec;
96         const struct rte_flow_item_eth *eth_mask;
97         const struct rte_flow_item_vlan *vlan_spec;
98         const struct rte_flow_item_vlan *vlan_mask;
99         struct rte_flow_item_eth eth_null;
100         struct rte_flow_item_vlan vlan_null;
101
102         if (!pattern) {
103                 rte_flow_error_set(error,
104                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
105                         NULL, "NULL pattern.");
106                 return -rte_errno;
107         }
108
109         if (!actions) {
110                 rte_flow_error_set(error, EINVAL,
111                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
112                                    NULL, "NULL action.");
113                 return -rte_errno;
114         }
115         if (!attr) {
116                 rte_flow_error_set(error, EINVAL,
117                                    RTE_FLOW_ERROR_TYPE_ATTR,
118                                    NULL, "NULL attribute.");
119                 return -rte_errno;
120         }
121
122         memset(&eth_null, 0, sizeof(struct rte_flow_item_eth));
123         memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan));
124
125         /* the first not void item can be MAC or IPv4 */
126         item = next_no_void_pattern(pattern, NULL);
127
128         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
129             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
130                 rte_flow_error_set(error, EINVAL,
131                         RTE_FLOW_ERROR_TYPE_ITEM,
132                         item, "Not supported by ntuple filter");
133                 return -rte_errno;
134         }
135         /* Skip Ethernet */
136         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
137                 eth_spec = item->spec;
138                 eth_mask = item->mask;
139                 /*Not supported last point for range*/
140                 if (item->last) {
141                         rte_flow_error_set(error,
142                           EINVAL,
143                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
144                           item, "Not supported last point for range");
145                         return -rte_errno;
146                 }
147                 /* if the first item is MAC, the content should be NULL */
148                 if ((item->spec || item->mask) &&
149                         (memcmp(eth_spec, &eth_null,
150                                 sizeof(struct rte_flow_item_eth)) ||
151                          memcmp(eth_mask, &eth_null,
152                                 sizeof(struct rte_flow_item_eth)))) {
153                         rte_flow_error_set(error, EINVAL,
154                                 RTE_FLOW_ERROR_TYPE_ITEM,
155                                 item, "Not supported by ntuple filter");
156                         return -rte_errno;
157                 }
158                 /* check if the next not void item is IPv4 or Vlan */
159                 item = next_no_void_pattern(pattern, item);
160                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
161                         item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
162                         rte_flow_error_set(error,
163                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
164                                 item, "Not supported by ntuple filter");
165                         return -rte_errno;
166                 }
167         }
168
169         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
170                 vlan_spec = item->spec;
171                 vlan_mask = item->mask;
172                 /*Not supported last point for range*/
173                 if (item->last) {
174                         rte_flow_error_set(error,
175                                 EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
176                                 item, "Not supported last point for range");
177                         return -rte_errno;
178                 }
179                 /* the content should be NULL */
180                 if ((item->spec || item->mask) &&
181                         (memcmp(vlan_spec, &vlan_null,
182                                 sizeof(struct rte_flow_item_vlan)) ||
183                          memcmp(vlan_mask, &vlan_null,
184                                 sizeof(struct rte_flow_item_vlan)))) {
185                         rte_flow_error_set(error, EINVAL,
186                                 RTE_FLOW_ERROR_TYPE_ITEM,
187                                 item, "Not supported by ntuple filter");
188                         return -rte_errno;
189                 }
190                 /* check if the next not void item is IPv4 */
191                 item = next_no_void_pattern(pattern, item);
192                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
193                         rte_flow_error_set(error,
194                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
195                           item, "Not supported by ntuple filter");
196                         return -rte_errno;
197                 }
198         }
199
200         if (item->mask) {
201                 /* get the IPv4 info */
202                 if (!item->spec || !item->mask) {
203                         rte_flow_error_set(error, EINVAL,
204                                 RTE_FLOW_ERROR_TYPE_ITEM,
205                                 item, "Invalid ntuple mask");
206                         return -rte_errno;
207                 }
208                 /*Not supported last point for range*/
209                 if (item->last) {
210                         rte_flow_error_set(error, EINVAL,
211                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
212                                 item, "Not supported last point for range");
213                         return -rte_errno;
214                 }
215
216                 ipv4_mask = item->mask;
217                 /**
218                  * Only support src & dst addresses, protocol,
219                  * others should be masked.
220                  */
221                 if (ipv4_mask->hdr.version_ihl ||
222                     ipv4_mask->hdr.type_of_service ||
223                     ipv4_mask->hdr.total_length ||
224                     ipv4_mask->hdr.packet_id ||
225                     ipv4_mask->hdr.fragment_offset ||
226                     ipv4_mask->hdr.time_to_live ||
227                     ipv4_mask->hdr.hdr_checksum) {
228                         rte_flow_error_set(error,
229                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
230                                 item, "Not supported by ntuple filter");
231                         return -rte_errno;
232                 }
233                 if ((ipv4_mask->hdr.src_addr != 0 &&
234                         ipv4_mask->hdr.src_addr != UINT32_MAX) ||
235                         (ipv4_mask->hdr.dst_addr != 0 &&
236                         ipv4_mask->hdr.dst_addr != UINT32_MAX) ||
237                         (ipv4_mask->hdr.next_proto_id != UINT8_MAX &&
238                         ipv4_mask->hdr.next_proto_id != 0)) {
239                         rte_flow_error_set(error,
240                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
241                                 item, "Not supported by ntuple filter");
242                         return -rte_errno;
243                 }
244
245                 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
246                 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
247                 filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
248
249                 ipv4_spec = item->spec;
250                 filter->dst_ip = ipv4_spec->hdr.dst_addr;
251                 filter->src_ip = ipv4_spec->hdr.src_addr;
252                 filter->proto  = ipv4_spec->hdr.next_proto_id;
253         }
254
255         /* check if the next not void item is TCP or UDP */
256         item = next_no_void_pattern(pattern, item);
257         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
258             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
259             item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
260             item->type != RTE_FLOW_ITEM_TYPE_END) {
261                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
262                 rte_flow_error_set(error, EINVAL,
263                         RTE_FLOW_ERROR_TYPE_ITEM,
264                         item, "Not supported by ntuple filter");
265                 return -rte_errno;
266         }
267
268         if (item->type != RTE_FLOW_ITEM_TYPE_END &&
269                 (!item->spec && !item->mask)) {
270                 goto action;
271         }
272
273         /* get the TCP/UDP/SCTP info */
274         if (item->type != RTE_FLOW_ITEM_TYPE_END &&
275                 (!item->spec || !item->mask)) {
276                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
277                 rte_flow_error_set(error, EINVAL,
278                         RTE_FLOW_ERROR_TYPE_ITEM,
279                         item, "Invalid ntuple mask");
280                 return -rte_errno;
281         }
282
283         /*Not supported last point for range*/
284         if (item->last) {
285                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
286                 rte_flow_error_set(error, EINVAL,
287                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
288                         item, "Not supported last point for range");
289                 return -rte_errno;
290         }
291
292         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
293                 tcp_mask = item->mask;
294
295                 /**
296                  * Only support src & dst ports, tcp flags,
297                  * others should be masked.
298                  */
299                 if (tcp_mask->hdr.sent_seq ||
300                     tcp_mask->hdr.recv_ack ||
301                     tcp_mask->hdr.data_off ||
302                     tcp_mask->hdr.rx_win ||
303                     tcp_mask->hdr.cksum ||
304                     tcp_mask->hdr.tcp_urp) {
305                         memset(filter, 0,
306                                 sizeof(struct rte_eth_ntuple_filter));
307                         rte_flow_error_set(error, EINVAL,
308                                 RTE_FLOW_ERROR_TYPE_ITEM,
309                                 item, "Not supported by ntuple filter");
310                         return -rte_errno;
311                 }
312                 if ((tcp_mask->hdr.src_port != 0 &&
313                         tcp_mask->hdr.src_port != UINT16_MAX) ||
314                         (tcp_mask->hdr.dst_port != 0 &&
315                         tcp_mask->hdr.dst_port != UINT16_MAX)) {
316                         rte_flow_error_set(error,
317                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
318                                 item, "Not supported by ntuple filter");
319                         return -rte_errno;
320                 }
321
322                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
323                 filter->src_port_mask  = tcp_mask->hdr.src_port;
324                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
325                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
326                 } else if (!tcp_mask->hdr.tcp_flags) {
327                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
328                 } else {
329                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
330                         rte_flow_error_set(error, EINVAL,
331                                 RTE_FLOW_ERROR_TYPE_ITEM,
332                                 item, "Not supported by ntuple filter");
333                         return -rte_errno;
334                 }
335
336                 tcp_spec = item->spec;
337                 filter->dst_port  = tcp_spec->hdr.dst_port;
338                 filter->src_port  = tcp_spec->hdr.src_port;
339                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
340         } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
341                 udp_mask = item->mask;
342
343                 /**
344                  * Only support src & dst ports,
345                  * others should be masked.
346                  */
347                 if (udp_mask->hdr.dgram_len ||
348                     udp_mask->hdr.dgram_cksum) {
349                         memset(filter, 0,
350                                 sizeof(struct rte_eth_ntuple_filter));
351                         rte_flow_error_set(error, EINVAL,
352                                 RTE_FLOW_ERROR_TYPE_ITEM,
353                                 item, "Not supported by ntuple filter");
354                         return -rte_errno;
355                 }
356                 if ((udp_mask->hdr.src_port != 0 &&
357                         udp_mask->hdr.src_port != UINT16_MAX) ||
358                         (udp_mask->hdr.dst_port != 0 &&
359                         udp_mask->hdr.dst_port != UINT16_MAX)) {
360                         rte_flow_error_set(error,
361                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
362                                 item, "Not supported by ntuple filter");
363                         return -rte_errno;
364                 }
365
366                 filter->dst_port_mask = udp_mask->hdr.dst_port;
367                 filter->src_port_mask = udp_mask->hdr.src_port;
368
369                 udp_spec = item->spec;
370                 filter->dst_port = udp_spec->hdr.dst_port;
371                 filter->src_port = udp_spec->hdr.src_port;
372         } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
373                 sctp_mask = item->mask;
374
375                 /**
376                  * Only support src & dst ports,
377                  * others should be masked.
378                  */
379                 if (sctp_mask->hdr.tag ||
380                     sctp_mask->hdr.cksum) {
381                         memset(filter, 0,
382                                 sizeof(struct rte_eth_ntuple_filter));
383                         rte_flow_error_set(error, EINVAL,
384                                 RTE_FLOW_ERROR_TYPE_ITEM,
385                                 item, "Not supported by ntuple filter");
386                         return -rte_errno;
387                 }
388
389                 filter->dst_port_mask = sctp_mask->hdr.dst_port;
390                 filter->src_port_mask = sctp_mask->hdr.src_port;
391
392                 sctp_spec = item->spec;
393                 filter->dst_port = sctp_spec->hdr.dst_port;
394                 filter->src_port = sctp_spec->hdr.src_port;
395         } else {
396                 goto action;
397         }
398
399         /* check if the next not void item is END */
400         item = next_no_void_pattern(pattern, item);
401         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
402                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
403                 rte_flow_error_set(error, EINVAL,
404                         RTE_FLOW_ERROR_TYPE_ITEM,
405                         item, "Not supported by ntuple filter");
406                 return -rte_errno;
407         }
408
409 action:
410
411         /**
412          * n-tuple only supports forwarding,
413          * check if the first not void action is QUEUE.
414          */
415         act = next_no_void_action(actions, NULL);
416         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
417                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
418                 rte_flow_error_set(error, EINVAL,
419                         RTE_FLOW_ERROR_TYPE_ACTION,
420                         item, "Not supported action.");
421                 return -rte_errno;
422         }
423         filter->queue =
424                 ((const struct rte_flow_action_queue *)act->conf)->index;
425
426         /* check if the next not void item is END */
427         act = next_no_void_action(actions, act);
428         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
429                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
430                 rte_flow_error_set(error, EINVAL,
431                         RTE_FLOW_ERROR_TYPE_ACTION,
432                         act, "Not supported action.");
433                 return -rte_errno;
434         }
435
436         /* parse attr */
437         /* must be input direction */
438         if (!attr->ingress) {
439                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
440                 rte_flow_error_set(error, EINVAL,
441                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
442                                    attr, "Only support ingress.");
443                 return -rte_errno;
444         }
445
446         /* not supported */
447         if (attr->egress) {
448                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
449                 rte_flow_error_set(error, EINVAL,
450                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
451                                    attr, "Not support egress.");
452                 return -rte_errno;
453         }
454
455         /* not supported */
456         if (attr->transfer) {
457                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
458                 rte_flow_error_set(error, EINVAL,
459                                    RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
460                                    attr, "No support for transfer.");
461                 return -rte_errno;
462         }
463
464         if (attr->priority > 0xFFFF) {
465                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
466                 rte_flow_error_set(error, EINVAL,
467                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
468                                    attr, "Error priority.");
469                 return -rte_errno;
470         }
471         filter->priority = (uint16_t)attr->priority;
472         if (attr->priority < TXGBE_MIN_N_TUPLE_PRIO ||
473                 attr->priority > TXGBE_MAX_N_TUPLE_PRIO)
474                 filter->priority = 1;
475
476         return 0;
477 }
478
479 /* a specific function for txgbe because the flags is specific */
480 static int
481 txgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
482                           const struct rte_flow_attr *attr,
483                           const struct rte_flow_item pattern[],
484                           const struct rte_flow_action actions[],
485                           struct rte_eth_ntuple_filter *filter,
486                           struct rte_flow_error *error)
487 {
488         int ret;
489
490         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
491
492         if (ret)
493                 return ret;
494
495         /* txgbe doesn't support tcp flags */
496         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
497                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
498                 rte_flow_error_set(error, EINVAL,
499                                    RTE_FLOW_ERROR_TYPE_ITEM,
500                                    NULL, "Not supported by ntuple filter");
501                 return -rte_errno;
502         }
503
504         /* txgbe doesn't support many priorities */
505         if (filter->priority < TXGBE_MIN_N_TUPLE_PRIO ||
506             filter->priority > TXGBE_MAX_N_TUPLE_PRIO) {
507                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
508                 rte_flow_error_set(error, EINVAL,
509                         RTE_FLOW_ERROR_TYPE_ITEM,
510                         NULL, "Priority not supported by ntuple filter");
511                 return -rte_errno;
512         }
513
514         if (filter->queue >= dev->data->nb_rx_queues) {
515                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
516                 rte_flow_error_set(error, EINVAL,
517                                    RTE_FLOW_ERROR_TYPE_ITEM,
518                                    NULL, "Not supported by ntuple filter");
519                 return -rte_errno;
520         }
521
522         /* fixed value for txgbe */
523         filter->flags = RTE_5TUPLE_FLAGS;
524         return 0;
525 }
526
527 /**
528  * Parse the rule to see if it is a ethertype rule.
529  * And get the ethertype filter info BTW.
530  * pattern:
531  * The first not void item can be ETH.
532  * The next not void item must be END.
533  * action:
534  * The first not void action should be QUEUE.
535  * The next not void action should be END.
536  * pattern example:
537  * ITEM         Spec                    Mask
538  * ETH          type    0x0807          0xFFFF
539  * END
540  * other members in mask and spec should set to 0x00.
541  * item->last should be NULL.
542  */
543 static int
544 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
545                             const struct rte_flow_item *pattern,
546                             const struct rte_flow_action *actions,
547                             struct rte_eth_ethertype_filter *filter,
548                             struct rte_flow_error *error)
549 {
550         const struct rte_flow_item *item;
551         const struct rte_flow_action *act;
552         const struct rte_flow_item_eth *eth_spec;
553         const struct rte_flow_item_eth *eth_mask;
554         const struct rte_flow_action_queue *act_q;
555
556         if (!pattern) {
557                 rte_flow_error_set(error, EINVAL,
558                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
559                                 NULL, "NULL pattern.");
560                 return -rte_errno;
561         }
562
563         if (!actions) {
564                 rte_flow_error_set(error, EINVAL,
565                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
566                                 NULL, "NULL action.");
567                 return -rte_errno;
568         }
569
570         if (!attr) {
571                 rte_flow_error_set(error, EINVAL,
572                                    RTE_FLOW_ERROR_TYPE_ATTR,
573                                    NULL, "NULL attribute.");
574                 return -rte_errno;
575         }
576
577         item = next_no_void_pattern(pattern, NULL);
578         /* The first non-void item should be MAC. */
579         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
580                 rte_flow_error_set(error, EINVAL,
581                         RTE_FLOW_ERROR_TYPE_ITEM,
582                         item, "Not supported by ethertype filter");
583                 return -rte_errno;
584         }
585
586         /*Not supported last point for range*/
587         if (item->last) {
588                 rte_flow_error_set(error, EINVAL,
589                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
590                         item, "Not supported last point for range");
591                 return -rte_errno;
592         }
593
594         /* Get the MAC info. */
595         if (!item->spec || !item->mask) {
596                 rte_flow_error_set(error, EINVAL,
597                                 RTE_FLOW_ERROR_TYPE_ITEM,
598                                 item, "Not supported by ethertype filter");
599                 return -rte_errno;
600         }
601
602         eth_spec = item->spec;
603         eth_mask = item->mask;
604
605         /* Mask bits of source MAC address must be full of 0.
606          * Mask bits of destination MAC address must be full
607          * of 1 or full of 0.
608          */
609         if (!rte_is_zero_ether_addr(&eth_mask->src) ||
610             (!rte_is_zero_ether_addr(&eth_mask->dst) &&
611              !rte_is_broadcast_ether_addr(&eth_mask->dst))) {
612                 rte_flow_error_set(error, EINVAL,
613                                 RTE_FLOW_ERROR_TYPE_ITEM,
614                                 item, "Invalid ether address mask");
615                 return -rte_errno;
616         }
617
618         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
619                 rte_flow_error_set(error, EINVAL,
620                                 RTE_FLOW_ERROR_TYPE_ITEM,
621                                 item, "Invalid ethertype mask");
622                 return -rte_errno;
623         }
624
625         /* If mask bits of destination MAC address
626          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
627          */
628         if (rte_is_broadcast_ether_addr(&eth_mask->dst)) {
629                 filter->mac_addr = eth_spec->dst;
630                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
631         } else {
632                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
633         }
634         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
635
636         /* Check if the next non-void item is END. */
637         item = next_no_void_pattern(pattern, item);
638         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
639                 rte_flow_error_set(error, EINVAL,
640                                 RTE_FLOW_ERROR_TYPE_ITEM,
641                                 item, "Not supported by ethertype filter.");
642                 return -rte_errno;
643         }
644
645         /* Parse action */
646
647         act = next_no_void_action(actions, NULL);
648         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
649             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
650                 rte_flow_error_set(error, EINVAL,
651                                 RTE_FLOW_ERROR_TYPE_ACTION,
652                                 act, "Not supported action.");
653                 return -rte_errno;
654         }
655
656         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
657                 act_q = (const struct rte_flow_action_queue *)act->conf;
658                 filter->queue = act_q->index;
659         } else {
660                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
661         }
662
663         /* Check if the next non-void item is END */
664         act = next_no_void_action(actions, act);
665         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
666                 rte_flow_error_set(error, EINVAL,
667                                 RTE_FLOW_ERROR_TYPE_ACTION,
668                                 act, "Not supported action.");
669                 return -rte_errno;
670         }
671
672         /* Parse attr */
673         /* Must be input direction */
674         if (!attr->ingress) {
675                 rte_flow_error_set(error, EINVAL,
676                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
677                                 attr, "Only support ingress.");
678                 return -rte_errno;
679         }
680
681         /* Not supported */
682         if (attr->egress) {
683                 rte_flow_error_set(error, EINVAL,
684                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
685                                 attr, "Not support egress.");
686                 return -rte_errno;
687         }
688
689         /* Not supported */
690         if (attr->transfer) {
691                 rte_flow_error_set(error, EINVAL,
692                                 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
693                                 attr, "No support for transfer.");
694                 return -rte_errno;
695         }
696
697         /* Not supported */
698         if (attr->priority) {
699                 rte_flow_error_set(error, EINVAL,
700                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
701                                 attr, "Not support priority.");
702                 return -rte_errno;
703         }
704
705         /* Not supported */
706         if (attr->group) {
707                 rte_flow_error_set(error, EINVAL,
708                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
709                                 attr, "Not support group.");
710                 return -rte_errno;
711         }
712
713         return 0;
714 }
715
716 static int
717 txgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
718                              const struct rte_flow_attr *attr,
719                              const struct rte_flow_item pattern[],
720                              const struct rte_flow_action actions[],
721                              struct rte_eth_ethertype_filter *filter,
722                              struct rte_flow_error *error)
723 {
724         int ret;
725
726         ret = cons_parse_ethertype_filter(attr, pattern,
727                                         actions, filter, error);
728
729         if (ret)
730                 return ret;
731
732         if (filter->queue >= dev->data->nb_rx_queues) {
733                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
734                 rte_flow_error_set(error, EINVAL,
735                         RTE_FLOW_ERROR_TYPE_ITEM,
736                         NULL, "queue index much too big");
737                 return -rte_errno;
738         }
739
740         if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
741                 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
742                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
743                 rte_flow_error_set(error, EINVAL,
744                         RTE_FLOW_ERROR_TYPE_ITEM,
745                         NULL, "IPv4/IPv6 not supported by ethertype filter");
746                 return -rte_errno;
747         }
748
749         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
750                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
751                 rte_flow_error_set(error, EINVAL,
752                         RTE_FLOW_ERROR_TYPE_ITEM,
753                         NULL, "mac compare is unsupported");
754                 return -rte_errno;
755         }
756
757         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
758                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
759                 rte_flow_error_set(error, EINVAL,
760                         RTE_FLOW_ERROR_TYPE_ITEM,
761                         NULL, "drop option is unsupported");
762                 return -rte_errno;
763         }
764
765         return 0;
766 }
767
768 /**
769  * Create or destroy a flow rule.
770  * Theorically one rule can match more than one filters.
771  * We will let it use the filter which it hitt first.
772  * So, the sequence matters.
773  */
774 static struct rte_flow *
775 txgbe_flow_create(struct rte_eth_dev *dev,
776                   const struct rte_flow_attr *attr,
777                   const struct rte_flow_item pattern[],
778                   const struct rte_flow_action actions[],
779                   struct rte_flow_error *error)
780 {
781         struct rte_flow *flow = NULL;
782         return flow;
783 }
784
785 /**
786  * Check if the flow rule is supported by txgbe.
787  * It only checks the format. Don't guarantee the rule can be programmed into
788  * the HW. Because there can be no enough room for the rule.
789  */
790 static int
791 txgbe_flow_validate(struct rte_eth_dev *dev,
792                 const struct rte_flow_attr *attr,
793                 const struct rte_flow_item pattern[],
794                 const struct rte_flow_action actions[],
795                 struct rte_flow_error *error)
796 {
797         struct rte_eth_ntuple_filter ntuple_filter;
798         struct rte_eth_ethertype_filter ethertype_filter;
799         int ret = 0;
800
801         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
802         ret = txgbe_parse_ntuple_filter(dev, attr, pattern,
803                                 actions, &ntuple_filter, error);
804         if (!ret)
805                 return 0;
806
807         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
808         ret = txgbe_parse_ethertype_filter(dev, attr, pattern,
809                                 actions, &ethertype_filter, error);
810         if (!ret)
811                 return 0;
812
813         return ret;
814 }
815
816 /* Destroy a flow rule on txgbe. */
817 static int
818 txgbe_flow_destroy(struct rte_eth_dev *dev,
819                 struct rte_flow *flow,
820                 struct rte_flow_error *error)
821 {
822         int ret = 0;
823
824         return ret;
825 }
826
827 /*  Destroy all flow rules associated with a port on txgbe. */
828 static int
829 txgbe_flow_flush(struct rte_eth_dev *dev,
830                 struct rte_flow_error *error)
831 {
832         int ret = 0;
833
834         return ret;
835 }
836
837 const struct rte_flow_ops txgbe_flow_ops = {
838         .validate = txgbe_flow_validate,
839         .create = txgbe_flow_create,
840         .destroy = txgbe_flow_destroy,
841         .flush = txgbe_flow_flush,
842 };
843