174c9da6eaf7ff7081cb751534546566316b5ff4
[dpdk.git] / drivers / net / txgbe / txgbe_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020
3  */
4
5 #include <sys/queue.h>
6 #include <rte_flow.h>
7 #include <rte_flow_driver.h>
8
9 #include "txgbe_ethdev.h"
10
11 #define TXGBE_MIN_N_TUPLE_PRIO 1
12 #define TXGBE_MAX_N_TUPLE_PRIO 7
13
14 /**
15  * Endless loop will never happen with below assumption
16  * 1. there is at least one no-void item(END)
17  * 2. cur is before END.
18  */
19 static inline
20 const struct rte_flow_item *next_no_void_pattern(
21                 const struct rte_flow_item pattern[],
22                 const struct rte_flow_item *cur)
23 {
24         const struct rte_flow_item *next =
25                 cur ? cur + 1 : &pattern[0];
26         while (1) {
27                 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
28                         return next;
29                 next++;
30         }
31 }
32
33 static inline
34 const struct rte_flow_action *next_no_void_action(
35                 const struct rte_flow_action actions[],
36                 const struct rte_flow_action *cur)
37 {
38         const struct rte_flow_action *next =
39                 cur ? cur + 1 : &actions[0];
40         while (1) {
41                 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
42                         return next;
43                 next++;
44         }
45 }
46
47 /**
48  * Please aware there's an assumption for all the parsers.
49  * rte_flow_item is using big endian, rte_flow_attr and
50  * rte_flow_action are using CPU order.
51  * Because the pattern is used to describe the packets,
52  * normally the packets should use network order.
53  */
54
55 /**
56  * Parse the rule to see if it is a n-tuple rule.
57  * And get the n-tuple filter info BTW.
58  * pattern:
59  * The first not void item can be ETH or IPV4.
60  * The second not void item must be IPV4 if the first one is ETH.
61  * The third not void item must be UDP or TCP.
62  * The next not void item must be END.
63  * action:
64  * The first not void action should be QUEUE.
65  * The next not void action should be END.
66  * pattern example:
67  * ITEM         Spec                    Mask
68  * ETH          NULL                    NULL
69  * IPV4         src_addr 192.168.1.20   0xFFFFFFFF
70  *              dst_addr 192.167.3.50   0xFFFFFFFF
71  *              next_proto_id   17      0xFF
72  * UDP/TCP/     src_port        80      0xFFFF
73  * SCTP         dst_port        80      0xFFFF
74  * END
75  * other members in mask and spec should set to 0x00.
76  * item->last should be NULL.
77  */
78 static int
79 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
80                          const struct rte_flow_item pattern[],
81                          const struct rte_flow_action actions[],
82                          struct rte_eth_ntuple_filter *filter,
83                          struct rte_flow_error *error)
84 {
85         const struct rte_flow_item *item;
86         const struct rte_flow_action *act;
87         const struct rte_flow_item_ipv4 *ipv4_spec;
88         const struct rte_flow_item_ipv4 *ipv4_mask;
89         const struct rte_flow_item_tcp *tcp_spec;
90         const struct rte_flow_item_tcp *tcp_mask;
91         const struct rte_flow_item_udp *udp_spec;
92         const struct rte_flow_item_udp *udp_mask;
93         const struct rte_flow_item_sctp *sctp_spec;
94         const struct rte_flow_item_sctp *sctp_mask;
95         const struct rte_flow_item_eth *eth_spec;
96         const struct rte_flow_item_eth *eth_mask;
97         const struct rte_flow_item_vlan *vlan_spec;
98         const struct rte_flow_item_vlan *vlan_mask;
99         struct rte_flow_item_eth eth_null;
100         struct rte_flow_item_vlan vlan_null;
101
102         if (!pattern) {
103                 rte_flow_error_set(error,
104                         EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
105                         NULL, "NULL pattern.");
106                 return -rte_errno;
107         }
108
109         if (!actions) {
110                 rte_flow_error_set(error, EINVAL,
111                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
112                                    NULL, "NULL action.");
113                 return -rte_errno;
114         }
115         if (!attr) {
116                 rte_flow_error_set(error, EINVAL,
117                                    RTE_FLOW_ERROR_TYPE_ATTR,
118                                    NULL, "NULL attribute.");
119                 return -rte_errno;
120         }
121
122         memset(&eth_null, 0, sizeof(struct rte_flow_item_eth));
123         memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan));
124
125         /* the first not void item can be MAC or IPv4 */
126         item = next_no_void_pattern(pattern, NULL);
127
128         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
129             item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
130                 rte_flow_error_set(error, EINVAL,
131                         RTE_FLOW_ERROR_TYPE_ITEM,
132                         item, "Not supported by ntuple filter");
133                 return -rte_errno;
134         }
135         /* Skip Ethernet */
136         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
137                 eth_spec = item->spec;
138                 eth_mask = item->mask;
139                 /*Not supported last point for range*/
140                 if (item->last) {
141                         rte_flow_error_set(error,
142                           EINVAL,
143                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
144                           item, "Not supported last point for range");
145                         return -rte_errno;
146                 }
147                 /* if the first item is MAC, the content should be NULL */
148                 if ((item->spec || item->mask) &&
149                         (memcmp(eth_spec, &eth_null,
150                                 sizeof(struct rte_flow_item_eth)) ||
151                          memcmp(eth_mask, &eth_null,
152                                 sizeof(struct rte_flow_item_eth)))) {
153                         rte_flow_error_set(error, EINVAL,
154                                 RTE_FLOW_ERROR_TYPE_ITEM,
155                                 item, "Not supported by ntuple filter");
156                         return -rte_errno;
157                 }
158                 /* check if the next not void item is IPv4 or Vlan */
159                 item = next_no_void_pattern(pattern, item);
160                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
161                         item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
162                         rte_flow_error_set(error,
163                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
164                                 item, "Not supported by ntuple filter");
165                         return -rte_errno;
166                 }
167         }
168
169         if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
170                 vlan_spec = item->spec;
171                 vlan_mask = item->mask;
172                 /*Not supported last point for range*/
173                 if (item->last) {
174                         rte_flow_error_set(error,
175                                 EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
176                                 item, "Not supported last point for range");
177                         return -rte_errno;
178                 }
179                 /* the content should be NULL */
180                 if ((item->spec || item->mask) &&
181                         (memcmp(vlan_spec, &vlan_null,
182                                 sizeof(struct rte_flow_item_vlan)) ||
183                          memcmp(vlan_mask, &vlan_null,
184                                 sizeof(struct rte_flow_item_vlan)))) {
185                         rte_flow_error_set(error, EINVAL,
186                                 RTE_FLOW_ERROR_TYPE_ITEM,
187                                 item, "Not supported by ntuple filter");
188                         return -rte_errno;
189                 }
190                 /* check if the next not void item is IPv4 */
191                 item = next_no_void_pattern(pattern, item);
192                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
193                         rte_flow_error_set(error,
194                           EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
195                           item, "Not supported by ntuple filter");
196                         return -rte_errno;
197                 }
198         }
199
200         if (item->mask) {
201                 /* get the IPv4 info */
202                 if (!item->spec || !item->mask) {
203                         rte_flow_error_set(error, EINVAL,
204                                 RTE_FLOW_ERROR_TYPE_ITEM,
205                                 item, "Invalid ntuple mask");
206                         return -rte_errno;
207                 }
208                 /*Not supported last point for range*/
209                 if (item->last) {
210                         rte_flow_error_set(error, EINVAL,
211                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
212                                 item, "Not supported last point for range");
213                         return -rte_errno;
214                 }
215
216                 ipv4_mask = item->mask;
217                 /**
218                  * Only support src & dst addresses, protocol,
219                  * others should be masked.
220                  */
221                 if (ipv4_mask->hdr.version_ihl ||
222                     ipv4_mask->hdr.type_of_service ||
223                     ipv4_mask->hdr.total_length ||
224                     ipv4_mask->hdr.packet_id ||
225                     ipv4_mask->hdr.fragment_offset ||
226                     ipv4_mask->hdr.time_to_live ||
227                     ipv4_mask->hdr.hdr_checksum) {
228                         rte_flow_error_set(error,
229                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
230                                 item, "Not supported by ntuple filter");
231                         return -rte_errno;
232                 }
233                 if ((ipv4_mask->hdr.src_addr != 0 &&
234                         ipv4_mask->hdr.src_addr != UINT32_MAX) ||
235                         (ipv4_mask->hdr.dst_addr != 0 &&
236                         ipv4_mask->hdr.dst_addr != UINT32_MAX) ||
237                         (ipv4_mask->hdr.next_proto_id != UINT8_MAX &&
238                         ipv4_mask->hdr.next_proto_id != 0)) {
239                         rte_flow_error_set(error,
240                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
241                                 item, "Not supported by ntuple filter");
242                         return -rte_errno;
243                 }
244
245                 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
246                 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
247                 filter->proto_mask  = ipv4_mask->hdr.next_proto_id;
248
249                 ipv4_spec = item->spec;
250                 filter->dst_ip = ipv4_spec->hdr.dst_addr;
251                 filter->src_ip = ipv4_spec->hdr.src_addr;
252                 filter->proto  = ipv4_spec->hdr.next_proto_id;
253         }
254
255         /* check if the next not void item is TCP or UDP */
256         item = next_no_void_pattern(pattern, item);
257         if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
258             item->type != RTE_FLOW_ITEM_TYPE_UDP &&
259             item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
260             item->type != RTE_FLOW_ITEM_TYPE_END) {
261                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
262                 rte_flow_error_set(error, EINVAL,
263                         RTE_FLOW_ERROR_TYPE_ITEM,
264                         item, "Not supported by ntuple filter");
265                 return -rte_errno;
266         }
267
268         if (item->type != RTE_FLOW_ITEM_TYPE_END &&
269                 (!item->spec && !item->mask)) {
270                 goto action;
271         }
272
273         /* get the TCP/UDP/SCTP info */
274         if (item->type != RTE_FLOW_ITEM_TYPE_END &&
275                 (!item->spec || !item->mask)) {
276                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
277                 rte_flow_error_set(error, EINVAL,
278                         RTE_FLOW_ERROR_TYPE_ITEM,
279                         item, "Invalid ntuple mask");
280                 return -rte_errno;
281         }
282
283         /*Not supported last point for range*/
284         if (item->last) {
285                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
286                 rte_flow_error_set(error, EINVAL,
287                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
288                         item, "Not supported last point for range");
289                 return -rte_errno;
290         }
291
292         if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
293                 tcp_mask = item->mask;
294
295                 /**
296                  * Only support src & dst ports, tcp flags,
297                  * others should be masked.
298                  */
299                 if (tcp_mask->hdr.sent_seq ||
300                     tcp_mask->hdr.recv_ack ||
301                     tcp_mask->hdr.data_off ||
302                     tcp_mask->hdr.rx_win ||
303                     tcp_mask->hdr.cksum ||
304                     tcp_mask->hdr.tcp_urp) {
305                         memset(filter, 0,
306                                 sizeof(struct rte_eth_ntuple_filter));
307                         rte_flow_error_set(error, EINVAL,
308                                 RTE_FLOW_ERROR_TYPE_ITEM,
309                                 item, "Not supported by ntuple filter");
310                         return -rte_errno;
311                 }
312                 if ((tcp_mask->hdr.src_port != 0 &&
313                         tcp_mask->hdr.src_port != UINT16_MAX) ||
314                         (tcp_mask->hdr.dst_port != 0 &&
315                         tcp_mask->hdr.dst_port != UINT16_MAX)) {
316                         rte_flow_error_set(error,
317                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
318                                 item, "Not supported by ntuple filter");
319                         return -rte_errno;
320                 }
321
322                 filter->dst_port_mask  = tcp_mask->hdr.dst_port;
323                 filter->src_port_mask  = tcp_mask->hdr.src_port;
324                 if (tcp_mask->hdr.tcp_flags == 0xFF) {
325                         filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
326                 } else if (!tcp_mask->hdr.tcp_flags) {
327                         filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
328                 } else {
329                         memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
330                         rte_flow_error_set(error, EINVAL,
331                                 RTE_FLOW_ERROR_TYPE_ITEM,
332                                 item, "Not supported by ntuple filter");
333                         return -rte_errno;
334                 }
335
336                 tcp_spec = item->spec;
337                 filter->dst_port  = tcp_spec->hdr.dst_port;
338                 filter->src_port  = tcp_spec->hdr.src_port;
339                 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
340         } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
341                 udp_mask = item->mask;
342
343                 /**
344                  * Only support src & dst ports,
345                  * others should be masked.
346                  */
347                 if (udp_mask->hdr.dgram_len ||
348                     udp_mask->hdr.dgram_cksum) {
349                         memset(filter, 0,
350                                 sizeof(struct rte_eth_ntuple_filter));
351                         rte_flow_error_set(error, EINVAL,
352                                 RTE_FLOW_ERROR_TYPE_ITEM,
353                                 item, "Not supported by ntuple filter");
354                         return -rte_errno;
355                 }
356                 if ((udp_mask->hdr.src_port != 0 &&
357                         udp_mask->hdr.src_port != UINT16_MAX) ||
358                         (udp_mask->hdr.dst_port != 0 &&
359                         udp_mask->hdr.dst_port != UINT16_MAX)) {
360                         rte_flow_error_set(error,
361                                 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
362                                 item, "Not supported by ntuple filter");
363                         return -rte_errno;
364                 }
365
366                 filter->dst_port_mask = udp_mask->hdr.dst_port;
367                 filter->src_port_mask = udp_mask->hdr.src_port;
368
369                 udp_spec = item->spec;
370                 filter->dst_port = udp_spec->hdr.dst_port;
371                 filter->src_port = udp_spec->hdr.src_port;
372         } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
373                 sctp_mask = item->mask;
374
375                 /**
376                  * Only support src & dst ports,
377                  * others should be masked.
378                  */
379                 if (sctp_mask->hdr.tag ||
380                     sctp_mask->hdr.cksum) {
381                         memset(filter, 0,
382                                 sizeof(struct rte_eth_ntuple_filter));
383                         rte_flow_error_set(error, EINVAL,
384                                 RTE_FLOW_ERROR_TYPE_ITEM,
385                                 item, "Not supported by ntuple filter");
386                         return -rte_errno;
387                 }
388
389                 filter->dst_port_mask = sctp_mask->hdr.dst_port;
390                 filter->src_port_mask = sctp_mask->hdr.src_port;
391
392                 sctp_spec = item->spec;
393                 filter->dst_port = sctp_spec->hdr.dst_port;
394                 filter->src_port = sctp_spec->hdr.src_port;
395         } else {
396                 goto action;
397         }
398
399         /* check if the next not void item is END */
400         item = next_no_void_pattern(pattern, item);
401         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
402                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
403                 rte_flow_error_set(error, EINVAL,
404                         RTE_FLOW_ERROR_TYPE_ITEM,
405                         item, "Not supported by ntuple filter");
406                 return -rte_errno;
407         }
408
409 action:
410
411         /**
412          * n-tuple only supports forwarding,
413          * check if the first not void action is QUEUE.
414          */
415         act = next_no_void_action(actions, NULL);
416         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
417                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
418                 rte_flow_error_set(error, EINVAL,
419                         RTE_FLOW_ERROR_TYPE_ACTION,
420                         item, "Not supported action.");
421                 return -rte_errno;
422         }
423         filter->queue =
424                 ((const struct rte_flow_action_queue *)act->conf)->index;
425
426         /* check if the next not void item is END */
427         act = next_no_void_action(actions, act);
428         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
429                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
430                 rte_flow_error_set(error, EINVAL,
431                         RTE_FLOW_ERROR_TYPE_ACTION,
432                         act, "Not supported action.");
433                 return -rte_errno;
434         }
435
436         /* parse attr */
437         /* must be input direction */
438         if (!attr->ingress) {
439                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
440                 rte_flow_error_set(error, EINVAL,
441                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
442                                    attr, "Only support ingress.");
443                 return -rte_errno;
444         }
445
446         /* not supported */
447         if (attr->egress) {
448                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
449                 rte_flow_error_set(error, EINVAL,
450                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
451                                    attr, "Not support egress.");
452                 return -rte_errno;
453         }
454
455         /* not supported */
456         if (attr->transfer) {
457                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
458                 rte_flow_error_set(error, EINVAL,
459                                    RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
460                                    attr, "No support for transfer.");
461                 return -rte_errno;
462         }
463
464         if (attr->priority > 0xFFFF) {
465                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
466                 rte_flow_error_set(error, EINVAL,
467                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
468                                    attr, "Error priority.");
469                 return -rte_errno;
470         }
471         filter->priority = (uint16_t)attr->priority;
472         if (attr->priority < TXGBE_MIN_N_TUPLE_PRIO ||
473                 attr->priority > TXGBE_MAX_N_TUPLE_PRIO)
474                 filter->priority = 1;
475
476         return 0;
477 }
478
479 /* a specific function for txgbe because the flags is specific */
480 static int
481 txgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
482                           const struct rte_flow_attr *attr,
483                           const struct rte_flow_item pattern[],
484                           const struct rte_flow_action actions[],
485                           struct rte_eth_ntuple_filter *filter,
486                           struct rte_flow_error *error)
487 {
488         int ret;
489
490         ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
491
492         if (ret)
493                 return ret;
494
495         /* txgbe doesn't support tcp flags */
496         if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
497                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
498                 rte_flow_error_set(error, EINVAL,
499                                    RTE_FLOW_ERROR_TYPE_ITEM,
500                                    NULL, "Not supported by ntuple filter");
501                 return -rte_errno;
502         }
503
504         /* txgbe doesn't support many priorities */
505         if (filter->priority < TXGBE_MIN_N_TUPLE_PRIO ||
506             filter->priority > TXGBE_MAX_N_TUPLE_PRIO) {
507                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
508                 rte_flow_error_set(error, EINVAL,
509                         RTE_FLOW_ERROR_TYPE_ITEM,
510                         NULL, "Priority not supported by ntuple filter");
511                 return -rte_errno;
512         }
513
514         if (filter->queue >= dev->data->nb_rx_queues) {
515                 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
516                 rte_flow_error_set(error, EINVAL,
517                                    RTE_FLOW_ERROR_TYPE_ITEM,
518                                    NULL, "Not supported by ntuple filter");
519                 return -rte_errno;
520         }
521
522         /* fixed value for txgbe */
523         filter->flags = RTE_5TUPLE_FLAGS;
524         return 0;
525 }
526
527 /**
528  * Parse the rule to see if it is a ethertype rule.
529  * And get the ethertype filter info BTW.
530  * pattern:
531  * The first not void item can be ETH.
532  * The next not void item must be END.
533  * action:
534  * The first not void action should be QUEUE.
535  * The next not void action should be END.
536  * pattern example:
537  * ITEM         Spec                    Mask
538  * ETH          type    0x0807          0xFFFF
539  * END
540  * other members in mask and spec should set to 0x00.
541  * item->last should be NULL.
542  */
543 static int
544 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
545                             const struct rte_flow_item *pattern,
546                             const struct rte_flow_action *actions,
547                             struct rte_eth_ethertype_filter *filter,
548                             struct rte_flow_error *error)
549 {
550         const struct rte_flow_item *item;
551         const struct rte_flow_action *act;
552         const struct rte_flow_item_eth *eth_spec;
553         const struct rte_flow_item_eth *eth_mask;
554         const struct rte_flow_action_queue *act_q;
555
556         if (!pattern) {
557                 rte_flow_error_set(error, EINVAL,
558                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
559                                 NULL, "NULL pattern.");
560                 return -rte_errno;
561         }
562
563         if (!actions) {
564                 rte_flow_error_set(error, EINVAL,
565                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
566                                 NULL, "NULL action.");
567                 return -rte_errno;
568         }
569
570         if (!attr) {
571                 rte_flow_error_set(error, EINVAL,
572                                    RTE_FLOW_ERROR_TYPE_ATTR,
573                                    NULL, "NULL attribute.");
574                 return -rte_errno;
575         }
576
577         item = next_no_void_pattern(pattern, NULL);
578         /* The first non-void item should be MAC. */
579         if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
580                 rte_flow_error_set(error, EINVAL,
581                         RTE_FLOW_ERROR_TYPE_ITEM,
582                         item, "Not supported by ethertype filter");
583                 return -rte_errno;
584         }
585
586         /*Not supported last point for range*/
587         if (item->last) {
588                 rte_flow_error_set(error, EINVAL,
589                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
590                         item, "Not supported last point for range");
591                 return -rte_errno;
592         }
593
594         /* Get the MAC info. */
595         if (!item->spec || !item->mask) {
596                 rte_flow_error_set(error, EINVAL,
597                                 RTE_FLOW_ERROR_TYPE_ITEM,
598                                 item, "Not supported by ethertype filter");
599                 return -rte_errno;
600         }
601
602         eth_spec = item->spec;
603         eth_mask = item->mask;
604
605         /* Mask bits of source MAC address must be full of 0.
606          * Mask bits of destination MAC address must be full
607          * of 1 or full of 0.
608          */
609         if (!rte_is_zero_ether_addr(&eth_mask->src) ||
610             (!rte_is_zero_ether_addr(&eth_mask->dst) &&
611              !rte_is_broadcast_ether_addr(&eth_mask->dst))) {
612                 rte_flow_error_set(error, EINVAL,
613                                 RTE_FLOW_ERROR_TYPE_ITEM,
614                                 item, "Invalid ether address mask");
615                 return -rte_errno;
616         }
617
618         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
619                 rte_flow_error_set(error, EINVAL,
620                                 RTE_FLOW_ERROR_TYPE_ITEM,
621                                 item, "Invalid ethertype mask");
622                 return -rte_errno;
623         }
624
625         /* If mask bits of destination MAC address
626          * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
627          */
628         if (rte_is_broadcast_ether_addr(&eth_mask->dst)) {
629                 filter->mac_addr = eth_spec->dst;
630                 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
631         } else {
632                 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
633         }
634         filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
635
636         /* Check if the next non-void item is END. */
637         item = next_no_void_pattern(pattern, item);
638         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
639                 rte_flow_error_set(error, EINVAL,
640                                 RTE_FLOW_ERROR_TYPE_ITEM,
641                                 item, "Not supported by ethertype filter.");
642                 return -rte_errno;
643         }
644
645         /* Parse action */
646
647         act = next_no_void_action(actions, NULL);
648         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
649             act->type != RTE_FLOW_ACTION_TYPE_DROP) {
650                 rte_flow_error_set(error, EINVAL,
651                                 RTE_FLOW_ERROR_TYPE_ACTION,
652                                 act, "Not supported action.");
653                 return -rte_errno;
654         }
655
656         if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
657                 act_q = (const struct rte_flow_action_queue *)act->conf;
658                 filter->queue = act_q->index;
659         } else {
660                 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
661         }
662
663         /* Check if the next non-void item is END */
664         act = next_no_void_action(actions, act);
665         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
666                 rte_flow_error_set(error, EINVAL,
667                                 RTE_FLOW_ERROR_TYPE_ACTION,
668                                 act, "Not supported action.");
669                 return -rte_errno;
670         }
671
672         /* Parse attr */
673         /* Must be input direction */
674         if (!attr->ingress) {
675                 rte_flow_error_set(error, EINVAL,
676                                 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
677                                 attr, "Only support ingress.");
678                 return -rte_errno;
679         }
680
681         /* Not supported */
682         if (attr->egress) {
683                 rte_flow_error_set(error, EINVAL,
684                                 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
685                                 attr, "Not support egress.");
686                 return -rte_errno;
687         }
688
689         /* Not supported */
690         if (attr->transfer) {
691                 rte_flow_error_set(error, EINVAL,
692                                 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
693                                 attr, "No support for transfer.");
694                 return -rte_errno;
695         }
696
697         /* Not supported */
698         if (attr->priority) {
699                 rte_flow_error_set(error, EINVAL,
700                                 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
701                                 attr, "Not support priority.");
702                 return -rte_errno;
703         }
704
705         /* Not supported */
706         if (attr->group) {
707                 rte_flow_error_set(error, EINVAL,
708                                 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
709                                 attr, "Not support group.");
710                 return -rte_errno;
711         }
712
713         return 0;
714 }
715
716 static int
717 txgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
718                              const struct rte_flow_attr *attr,
719                              const struct rte_flow_item pattern[],
720                              const struct rte_flow_action actions[],
721                              struct rte_eth_ethertype_filter *filter,
722                              struct rte_flow_error *error)
723 {
724         int ret;
725
726         ret = cons_parse_ethertype_filter(attr, pattern,
727                                         actions, filter, error);
728
729         if (ret)
730                 return ret;
731
732         if (filter->queue >= dev->data->nb_rx_queues) {
733                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
734                 rte_flow_error_set(error, EINVAL,
735                         RTE_FLOW_ERROR_TYPE_ITEM,
736                         NULL, "queue index much too big");
737                 return -rte_errno;
738         }
739
740         if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
741                 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
742                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
743                 rte_flow_error_set(error, EINVAL,
744                         RTE_FLOW_ERROR_TYPE_ITEM,
745                         NULL, "IPv4/IPv6 not supported by ethertype filter");
746                 return -rte_errno;
747         }
748
749         if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
750                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
751                 rte_flow_error_set(error, EINVAL,
752                         RTE_FLOW_ERROR_TYPE_ITEM,
753                         NULL, "mac compare is unsupported");
754                 return -rte_errno;
755         }
756
757         if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
758                 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
759                 rte_flow_error_set(error, EINVAL,
760                         RTE_FLOW_ERROR_TYPE_ITEM,
761                         NULL, "drop option is unsupported");
762                 return -rte_errno;
763         }
764
765         return 0;
766 }
767
768 /**
769  * Parse the rule to see if it is a TCP SYN rule.
770  * And get the TCP SYN filter info BTW.
771  * pattern:
772  * The first not void item must be ETH.
773  * The second not void item must be IPV4 or IPV6.
774  * The third not void item must be TCP.
775  * The next not void item must be END.
776  * action:
777  * The first not void action should be QUEUE.
778  * The next not void action should be END.
779  * pattern example:
780  * ITEM         Spec                    Mask
781  * ETH          NULL                    NULL
782  * IPV4/IPV6    NULL                    NULL
783  * TCP          tcp_flags       0x02    0xFF
784  * END
785  * other members in mask and spec should set to 0x00.
786  * item->last should be NULL.
787  */
788 static int
789 cons_parse_syn_filter(const struct rte_flow_attr *attr,
790                                 const struct rte_flow_item pattern[],
791                                 const struct rte_flow_action actions[],
792                                 struct rte_eth_syn_filter *filter,
793                                 struct rte_flow_error *error)
794 {
795         const struct rte_flow_item *item;
796         const struct rte_flow_action *act;
797         const struct rte_flow_item_tcp *tcp_spec;
798         const struct rte_flow_item_tcp *tcp_mask;
799         const struct rte_flow_action_queue *act_q;
800
801         if (!pattern) {
802                 rte_flow_error_set(error, EINVAL,
803                                 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
804                                 NULL, "NULL pattern.");
805                 return -rte_errno;
806         }
807
808         if (!actions) {
809                 rte_flow_error_set(error, EINVAL,
810                                 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
811                                 NULL, "NULL action.");
812                 return -rte_errno;
813         }
814
815         if (!attr) {
816                 rte_flow_error_set(error, EINVAL,
817                                    RTE_FLOW_ERROR_TYPE_ATTR,
818                                    NULL, "NULL attribute.");
819                 return -rte_errno;
820         }
821
822
823         /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
824         item = next_no_void_pattern(pattern, NULL);
825         if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
826             item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
827             item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
828             item->type != RTE_FLOW_ITEM_TYPE_TCP) {
829                 rte_flow_error_set(error, EINVAL,
830                                 RTE_FLOW_ERROR_TYPE_ITEM,
831                                 item, "Not supported by syn filter");
832                 return -rte_errno;
833         }
834                 /*Not supported last point for range*/
835         if (item->last) {
836                 rte_flow_error_set(error, EINVAL,
837                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
838                         item, "Not supported last point for range");
839                 return -rte_errno;
840         }
841
842         /* Skip Ethernet */
843         if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
844                 /* if the item is MAC, the content should be NULL */
845                 if (item->spec || item->mask) {
846                         rte_flow_error_set(error, EINVAL,
847                                 RTE_FLOW_ERROR_TYPE_ITEM,
848                                 item, "Invalid SYN address mask");
849                         return -rte_errno;
850                 }
851
852                 /* check if the next not void item is IPv4 or IPv6 */
853                 item = next_no_void_pattern(pattern, item);
854                 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
855                     item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
856                         rte_flow_error_set(error, EINVAL,
857                                 RTE_FLOW_ERROR_TYPE_ITEM,
858                                 item, "Not supported by syn filter");
859                         return -rte_errno;
860                 }
861         }
862
863         /* Skip IP */
864         if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
865             item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
866                 /* if the item is IP, the content should be NULL */
867                 if (item->spec || item->mask) {
868                         rte_flow_error_set(error, EINVAL,
869                                 RTE_FLOW_ERROR_TYPE_ITEM,
870                                 item, "Invalid SYN mask");
871                         return -rte_errno;
872                 }
873
874                 /* check if the next not void item is TCP */
875                 item = next_no_void_pattern(pattern, item);
876                 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
877                         rte_flow_error_set(error, EINVAL,
878                                 RTE_FLOW_ERROR_TYPE_ITEM,
879                                 item, "Not supported by syn filter");
880                         return -rte_errno;
881                 }
882         }
883
884         /* Get the TCP info. Only support SYN. */
885         if (!item->spec || !item->mask) {
886                 rte_flow_error_set(error, EINVAL,
887                                 RTE_FLOW_ERROR_TYPE_ITEM,
888                                 item, "Invalid SYN mask");
889                 return -rte_errno;
890         }
891         /*Not supported last point for range*/
892         if (item->last) {
893                 rte_flow_error_set(error, EINVAL,
894                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
895                         item, "Not supported last point for range");
896                 return -rte_errno;
897         }
898
899         tcp_spec = item->spec;
900         tcp_mask = item->mask;
901         if (!(tcp_spec->hdr.tcp_flags & RTE_TCP_SYN_FLAG) ||
902             tcp_mask->hdr.src_port ||
903             tcp_mask->hdr.dst_port ||
904             tcp_mask->hdr.sent_seq ||
905             tcp_mask->hdr.recv_ack ||
906             tcp_mask->hdr.data_off ||
907             tcp_mask->hdr.tcp_flags != RTE_TCP_SYN_FLAG ||
908             tcp_mask->hdr.rx_win ||
909             tcp_mask->hdr.cksum ||
910             tcp_mask->hdr.tcp_urp) {
911                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
912                 rte_flow_error_set(error, EINVAL,
913                                 RTE_FLOW_ERROR_TYPE_ITEM,
914                                 item, "Not supported by syn filter");
915                 return -rte_errno;
916         }
917
918         /* check if the next not void item is END */
919         item = next_no_void_pattern(pattern, item);
920         if (item->type != RTE_FLOW_ITEM_TYPE_END) {
921                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
922                 rte_flow_error_set(error, EINVAL,
923                                 RTE_FLOW_ERROR_TYPE_ITEM,
924                                 item, "Not supported by syn filter");
925                 return -rte_errno;
926         }
927
928         /* check if the first not void action is QUEUE. */
929         act = next_no_void_action(actions, NULL);
930         if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
931                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
932                 rte_flow_error_set(error, EINVAL,
933                                 RTE_FLOW_ERROR_TYPE_ACTION,
934                                 act, "Not supported action.");
935                 return -rte_errno;
936         }
937
938         act_q = (const struct rte_flow_action_queue *)act->conf;
939         filter->queue = act_q->index;
940         if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM) {
941                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
942                 rte_flow_error_set(error, EINVAL,
943                                 RTE_FLOW_ERROR_TYPE_ACTION,
944                                 act, "Not supported action.");
945                 return -rte_errno;
946         }
947
948         /* check if the next not void item is END */
949         act = next_no_void_action(actions, act);
950         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
951                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
952                 rte_flow_error_set(error, EINVAL,
953                                 RTE_FLOW_ERROR_TYPE_ACTION,
954                                 act, "Not supported action.");
955                 return -rte_errno;
956         }
957
958         /* parse attr */
959         /* must be input direction */
960         if (!attr->ingress) {
961                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
962                 rte_flow_error_set(error, EINVAL,
963                         RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
964                         attr, "Only support ingress.");
965                 return -rte_errno;
966         }
967
968         /* not supported */
969         if (attr->egress) {
970                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
971                 rte_flow_error_set(error, EINVAL,
972                         RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
973                         attr, "Not support egress.");
974                 return -rte_errno;
975         }
976
977         /* not supported */
978         if (attr->transfer) {
979                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
980                 rte_flow_error_set(error, EINVAL,
981                         RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
982                         attr, "No support for transfer.");
983                 return -rte_errno;
984         }
985
986         /* Support 2 priorities, the lowest or highest. */
987         if (!attr->priority) {
988                 filter->hig_pri = 0;
989         } else if (attr->priority == (uint32_t)~0U) {
990                 filter->hig_pri = 1;
991         } else {
992                 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
993                 rte_flow_error_set(error, EINVAL,
994                         RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
995                         attr, "Not support priority.");
996                 return -rte_errno;
997         }
998
999         return 0;
1000 }
1001
1002 static int
1003 txgbe_parse_syn_filter(struct rte_eth_dev *dev,
1004                              const struct rte_flow_attr *attr,
1005                              const struct rte_flow_item pattern[],
1006                              const struct rte_flow_action actions[],
1007                              struct rte_eth_syn_filter *filter,
1008                              struct rte_flow_error *error)
1009 {
1010         int ret;
1011
1012         ret = cons_parse_syn_filter(attr, pattern,
1013                                         actions, filter, error);
1014
1015         if (filter->queue >= dev->data->nb_rx_queues)
1016                 return -rte_errno;
1017
1018         if (ret)
1019                 return ret;
1020
1021         return 0;
1022 }
1023
1024 /**
1025  * Create or destroy a flow rule.
1026  * Theorically one rule can match more than one filters.
1027  * We will let it use the filter which it hitt first.
1028  * So, the sequence matters.
1029  */
1030 static struct rte_flow *
1031 txgbe_flow_create(struct rte_eth_dev *dev,
1032                   const struct rte_flow_attr *attr,
1033                   const struct rte_flow_item pattern[],
1034                   const struct rte_flow_action actions[],
1035                   struct rte_flow_error *error)
1036 {
1037         struct rte_flow *flow = NULL;
1038         return flow;
1039 }
1040
1041 /**
1042  * Check if the flow rule is supported by txgbe.
1043  * It only checks the format. Don't guarantee the rule can be programmed into
1044  * the HW. Because there can be no enough room for the rule.
1045  */
1046 static int
1047 txgbe_flow_validate(struct rte_eth_dev *dev,
1048                 const struct rte_flow_attr *attr,
1049                 const struct rte_flow_item pattern[],
1050                 const struct rte_flow_action actions[],
1051                 struct rte_flow_error *error)
1052 {
1053         struct rte_eth_ntuple_filter ntuple_filter;
1054         struct rte_eth_ethertype_filter ethertype_filter;
1055         struct rte_eth_syn_filter syn_filter;
1056         int ret = 0;
1057
1058         memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
1059         ret = txgbe_parse_ntuple_filter(dev, attr, pattern,
1060                                 actions, &ntuple_filter, error);
1061         if (!ret)
1062                 return 0;
1063
1064         memset(&ethertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
1065         ret = txgbe_parse_ethertype_filter(dev, attr, pattern,
1066                                 actions, &ethertype_filter, error);
1067         if (!ret)
1068                 return 0;
1069
1070         memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
1071         ret = txgbe_parse_syn_filter(dev, attr, pattern,
1072                                 actions, &syn_filter, error);
1073         if (!ret)
1074                 return 0;
1075
1076         return ret;
1077 }
1078
1079 /* Destroy a flow rule on txgbe. */
1080 static int
1081 txgbe_flow_destroy(struct rte_eth_dev *dev,
1082                 struct rte_flow *flow,
1083                 struct rte_flow_error *error)
1084 {
1085         int ret = 0;
1086
1087         return ret;
1088 }
1089
1090 /*  Destroy all flow rules associated with a port on txgbe. */
1091 static int
1092 txgbe_flow_flush(struct rte_eth_dev *dev,
1093                 struct rte_flow_error *error)
1094 {
1095         int ret = 0;
1096
1097         return ret;
1098 }
1099
1100 const struct rte_flow_ops txgbe_flow_ops = {
1101         .validate = txgbe_flow_validate,
1102         .create = txgbe_flow_create,
1103         .destroy = txgbe_flow_destroy,
1104         .flush = txgbe_flow_flush,
1105 };
1106