1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2020
7 #include <rte_flow_driver.h>
9 #include "txgbe_ethdev.h"
11 #define TXGBE_MIN_N_TUPLE_PRIO 1
12 #define TXGBE_MAX_N_TUPLE_PRIO 7
15 * Endless loop will never happen with below assumption
16 * 1. there is at least one no-void item(END)
17 * 2. cur is before END.
20 const struct rte_flow_item *next_no_void_pattern(
21 const struct rte_flow_item pattern[],
22 const struct rte_flow_item *cur)
24 const struct rte_flow_item *next =
25 cur ? cur + 1 : &pattern[0];
27 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
34 const struct rte_flow_action *next_no_void_action(
35 const struct rte_flow_action actions[],
36 const struct rte_flow_action *cur)
38 const struct rte_flow_action *next =
39 cur ? cur + 1 : &actions[0];
41 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
48 * Please aware there's an assumption for all the parsers.
49 * rte_flow_item is using big endian, rte_flow_attr and
50 * rte_flow_action are using CPU order.
51 * Because the pattern is used to describe the packets,
52 * normally the packets should use network order.
56 * Parse the rule to see if it is a n-tuple rule.
57 * And get the n-tuple filter info BTW.
59 * The first not void item can be ETH or IPV4.
60 * The second not void item must be IPV4 if the first one is ETH.
61 * The third not void item must be UDP or TCP.
62 * The next not void item must be END.
64 * The first not void action should be QUEUE.
65 * The next not void action should be END.
69 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
70 * dst_addr 192.167.3.50 0xFFFFFFFF
71 * next_proto_id 17 0xFF
72 * UDP/TCP/ src_port 80 0xFFFF
73 * SCTP dst_port 80 0xFFFF
75 * other members in mask and spec should set to 0x00.
76 * item->last should be NULL.
79 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
80 const struct rte_flow_item pattern[],
81 const struct rte_flow_action actions[],
82 struct rte_eth_ntuple_filter *filter,
83 struct rte_flow_error *error)
85 const struct rte_flow_item *item;
86 const struct rte_flow_action *act;
87 const struct rte_flow_item_ipv4 *ipv4_spec;
88 const struct rte_flow_item_ipv4 *ipv4_mask;
89 const struct rte_flow_item_tcp *tcp_spec;
90 const struct rte_flow_item_tcp *tcp_mask;
91 const struct rte_flow_item_udp *udp_spec;
92 const struct rte_flow_item_udp *udp_mask;
93 const struct rte_flow_item_sctp *sctp_spec;
94 const struct rte_flow_item_sctp *sctp_mask;
95 const struct rte_flow_item_eth *eth_spec;
96 const struct rte_flow_item_eth *eth_mask;
97 const struct rte_flow_item_vlan *vlan_spec;
98 const struct rte_flow_item_vlan *vlan_mask;
99 struct rte_flow_item_eth eth_null;
100 struct rte_flow_item_vlan vlan_null;
103 rte_flow_error_set(error,
104 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
105 NULL, "NULL pattern.");
110 rte_flow_error_set(error, EINVAL,
111 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
112 NULL, "NULL action.");
116 rte_flow_error_set(error, EINVAL,
117 RTE_FLOW_ERROR_TYPE_ATTR,
118 NULL, "NULL attribute.");
122 memset(ð_null, 0, sizeof(struct rte_flow_item_eth));
123 memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan));
125 /* the first not void item can be MAC or IPv4 */
126 item = next_no_void_pattern(pattern, NULL);
128 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
129 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
130 rte_flow_error_set(error, EINVAL,
131 RTE_FLOW_ERROR_TYPE_ITEM,
132 item, "Not supported by ntuple filter");
136 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
137 eth_spec = item->spec;
138 eth_mask = item->mask;
139 /*Not supported last point for range*/
141 rte_flow_error_set(error,
143 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
144 item, "Not supported last point for range");
147 /* if the first item is MAC, the content should be NULL */
148 if ((item->spec || item->mask) &&
149 (memcmp(eth_spec, ð_null,
150 sizeof(struct rte_flow_item_eth)) ||
151 memcmp(eth_mask, ð_null,
152 sizeof(struct rte_flow_item_eth)))) {
153 rte_flow_error_set(error, EINVAL,
154 RTE_FLOW_ERROR_TYPE_ITEM,
155 item, "Not supported by ntuple filter");
158 /* check if the next not void item is IPv4 or Vlan */
159 item = next_no_void_pattern(pattern, item);
160 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
161 item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
162 rte_flow_error_set(error,
163 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
164 item, "Not supported by ntuple filter");
169 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
170 vlan_spec = item->spec;
171 vlan_mask = item->mask;
172 /*Not supported last point for range*/
174 rte_flow_error_set(error,
175 EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
176 item, "Not supported last point for range");
179 /* the content should be NULL */
180 if ((item->spec || item->mask) &&
181 (memcmp(vlan_spec, &vlan_null,
182 sizeof(struct rte_flow_item_vlan)) ||
183 memcmp(vlan_mask, &vlan_null,
184 sizeof(struct rte_flow_item_vlan)))) {
185 rte_flow_error_set(error, EINVAL,
186 RTE_FLOW_ERROR_TYPE_ITEM,
187 item, "Not supported by ntuple filter");
190 /* check if the next not void item is IPv4 */
191 item = next_no_void_pattern(pattern, item);
192 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
193 rte_flow_error_set(error,
194 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
195 item, "Not supported by ntuple filter");
201 /* get the IPv4 info */
202 if (!item->spec || !item->mask) {
203 rte_flow_error_set(error, EINVAL,
204 RTE_FLOW_ERROR_TYPE_ITEM,
205 item, "Invalid ntuple mask");
208 /*Not supported last point for range*/
210 rte_flow_error_set(error, EINVAL,
211 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
212 item, "Not supported last point for range");
216 ipv4_mask = item->mask;
218 * Only support src & dst addresses, protocol,
219 * others should be masked.
221 if (ipv4_mask->hdr.version_ihl ||
222 ipv4_mask->hdr.type_of_service ||
223 ipv4_mask->hdr.total_length ||
224 ipv4_mask->hdr.packet_id ||
225 ipv4_mask->hdr.fragment_offset ||
226 ipv4_mask->hdr.time_to_live ||
227 ipv4_mask->hdr.hdr_checksum) {
228 rte_flow_error_set(error,
229 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
230 item, "Not supported by ntuple filter");
233 if ((ipv4_mask->hdr.src_addr != 0 &&
234 ipv4_mask->hdr.src_addr != UINT32_MAX) ||
235 (ipv4_mask->hdr.dst_addr != 0 &&
236 ipv4_mask->hdr.dst_addr != UINT32_MAX) ||
237 (ipv4_mask->hdr.next_proto_id != UINT8_MAX &&
238 ipv4_mask->hdr.next_proto_id != 0)) {
239 rte_flow_error_set(error,
240 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
241 item, "Not supported by ntuple filter");
245 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
246 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
247 filter->proto_mask = ipv4_mask->hdr.next_proto_id;
249 ipv4_spec = item->spec;
250 filter->dst_ip = ipv4_spec->hdr.dst_addr;
251 filter->src_ip = ipv4_spec->hdr.src_addr;
252 filter->proto = ipv4_spec->hdr.next_proto_id;
255 /* check if the next not void item is TCP or UDP */
256 item = next_no_void_pattern(pattern, item);
257 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
258 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
259 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
260 item->type != RTE_FLOW_ITEM_TYPE_END) {
261 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
262 rte_flow_error_set(error, EINVAL,
263 RTE_FLOW_ERROR_TYPE_ITEM,
264 item, "Not supported by ntuple filter");
268 if (item->type != RTE_FLOW_ITEM_TYPE_END &&
269 (!item->spec && !item->mask)) {
273 /* get the TCP/UDP/SCTP info */
274 if (item->type != RTE_FLOW_ITEM_TYPE_END &&
275 (!item->spec || !item->mask)) {
276 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
277 rte_flow_error_set(error, EINVAL,
278 RTE_FLOW_ERROR_TYPE_ITEM,
279 item, "Invalid ntuple mask");
283 /*Not supported last point for range*/
285 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
286 rte_flow_error_set(error, EINVAL,
287 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
288 item, "Not supported last point for range");
292 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
293 tcp_mask = item->mask;
296 * Only support src & dst ports, tcp flags,
297 * others should be masked.
299 if (tcp_mask->hdr.sent_seq ||
300 tcp_mask->hdr.recv_ack ||
301 tcp_mask->hdr.data_off ||
302 tcp_mask->hdr.rx_win ||
303 tcp_mask->hdr.cksum ||
304 tcp_mask->hdr.tcp_urp) {
306 sizeof(struct rte_eth_ntuple_filter));
307 rte_flow_error_set(error, EINVAL,
308 RTE_FLOW_ERROR_TYPE_ITEM,
309 item, "Not supported by ntuple filter");
312 if ((tcp_mask->hdr.src_port != 0 &&
313 tcp_mask->hdr.src_port != UINT16_MAX) ||
314 (tcp_mask->hdr.dst_port != 0 &&
315 tcp_mask->hdr.dst_port != UINT16_MAX)) {
316 rte_flow_error_set(error,
317 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
318 item, "Not supported by ntuple filter");
322 filter->dst_port_mask = tcp_mask->hdr.dst_port;
323 filter->src_port_mask = tcp_mask->hdr.src_port;
324 if (tcp_mask->hdr.tcp_flags == 0xFF) {
325 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
326 } else if (!tcp_mask->hdr.tcp_flags) {
327 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
329 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
330 rte_flow_error_set(error, EINVAL,
331 RTE_FLOW_ERROR_TYPE_ITEM,
332 item, "Not supported by ntuple filter");
336 tcp_spec = item->spec;
337 filter->dst_port = tcp_spec->hdr.dst_port;
338 filter->src_port = tcp_spec->hdr.src_port;
339 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
340 } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
341 udp_mask = item->mask;
344 * Only support src & dst ports,
345 * others should be masked.
347 if (udp_mask->hdr.dgram_len ||
348 udp_mask->hdr.dgram_cksum) {
350 sizeof(struct rte_eth_ntuple_filter));
351 rte_flow_error_set(error, EINVAL,
352 RTE_FLOW_ERROR_TYPE_ITEM,
353 item, "Not supported by ntuple filter");
356 if ((udp_mask->hdr.src_port != 0 &&
357 udp_mask->hdr.src_port != UINT16_MAX) ||
358 (udp_mask->hdr.dst_port != 0 &&
359 udp_mask->hdr.dst_port != UINT16_MAX)) {
360 rte_flow_error_set(error,
361 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
362 item, "Not supported by ntuple filter");
366 filter->dst_port_mask = udp_mask->hdr.dst_port;
367 filter->src_port_mask = udp_mask->hdr.src_port;
369 udp_spec = item->spec;
370 filter->dst_port = udp_spec->hdr.dst_port;
371 filter->src_port = udp_spec->hdr.src_port;
372 } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
373 sctp_mask = item->mask;
376 * Only support src & dst ports,
377 * others should be masked.
379 if (sctp_mask->hdr.tag ||
380 sctp_mask->hdr.cksum) {
382 sizeof(struct rte_eth_ntuple_filter));
383 rte_flow_error_set(error, EINVAL,
384 RTE_FLOW_ERROR_TYPE_ITEM,
385 item, "Not supported by ntuple filter");
389 filter->dst_port_mask = sctp_mask->hdr.dst_port;
390 filter->src_port_mask = sctp_mask->hdr.src_port;
392 sctp_spec = item->spec;
393 filter->dst_port = sctp_spec->hdr.dst_port;
394 filter->src_port = sctp_spec->hdr.src_port;
399 /* check if the next not void item is END */
400 item = next_no_void_pattern(pattern, item);
401 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
402 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
403 rte_flow_error_set(error, EINVAL,
404 RTE_FLOW_ERROR_TYPE_ITEM,
405 item, "Not supported by ntuple filter");
412 * n-tuple only supports forwarding,
413 * check if the first not void action is QUEUE.
415 act = next_no_void_action(actions, NULL);
416 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
417 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
418 rte_flow_error_set(error, EINVAL,
419 RTE_FLOW_ERROR_TYPE_ACTION,
420 item, "Not supported action.");
424 ((const struct rte_flow_action_queue *)act->conf)->index;
426 /* check if the next not void item is END */
427 act = next_no_void_action(actions, act);
428 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
429 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
430 rte_flow_error_set(error, EINVAL,
431 RTE_FLOW_ERROR_TYPE_ACTION,
432 act, "Not supported action.");
437 /* must be input direction */
438 if (!attr->ingress) {
439 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
440 rte_flow_error_set(error, EINVAL,
441 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
442 attr, "Only support ingress.");
448 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
449 rte_flow_error_set(error, EINVAL,
450 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
451 attr, "Not support egress.");
456 if (attr->transfer) {
457 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
458 rte_flow_error_set(error, EINVAL,
459 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
460 attr, "No support for transfer.");
464 if (attr->priority > 0xFFFF) {
465 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
466 rte_flow_error_set(error, EINVAL,
467 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
468 attr, "Error priority.");
471 filter->priority = (uint16_t)attr->priority;
472 if (attr->priority < TXGBE_MIN_N_TUPLE_PRIO ||
473 attr->priority > TXGBE_MAX_N_TUPLE_PRIO)
474 filter->priority = 1;
479 /* a specific function for txgbe because the flags is specific */
481 txgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
482 const struct rte_flow_attr *attr,
483 const struct rte_flow_item pattern[],
484 const struct rte_flow_action actions[],
485 struct rte_eth_ntuple_filter *filter,
486 struct rte_flow_error *error)
490 ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
495 /* txgbe doesn't support tcp flags */
496 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
497 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
498 rte_flow_error_set(error, EINVAL,
499 RTE_FLOW_ERROR_TYPE_ITEM,
500 NULL, "Not supported by ntuple filter");
504 /* txgbe doesn't support many priorities */
505 if (filter->priority < TXGBE_MIN_N_TUPLE_PRIO ||
506 filter->priority > TXGBE_MAX_N_TUPLE_PRIO) {
507 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
508 rte_flow_error_set(error, EINVAL,
509 RTE_FLOW_ERROR_TYPE_ITEM,
510 NULL, "Priority not supported by ntuple filter");
514 if (filter->queue >= dev->data->nb_rx_queues) {
515 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
516 rte_flow_error_set(error, EINVAL,
517 RTE_FLOW_ERROR_TYPE_ITEM,
518 NULL, "Not supported by ntuple filter");
522 /* fixed value for txgbe */
523 filter->flags = RTE_5TUPLE_FLAGS;
528 * Parse the rule to see if it is a ethertype rule.
529 * And get the ethertype filter info BTW.
531 * The first not void item can be ETH.
532 * The next not void item must be END.
534 * The first not void action should be QUEUE.
535 * The next not void action should be END.
538 * ETH type 0x0807 0xFFFF
540 * other members in mask and spec should set to 0x00.
541 * item->last should be NULL.
544 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
545 const struct rte_flow_item *pattern,
546 const struct rte_flow_action *actions,
547 struct rte_eth_ethertype_filter *filter,
548 struct rte_flow_error *error)
550 const struct rte_flow_item *item;
551 const struct rte_flow_action *act;
552 const struct rte_flow_item_eth *eth_spec;
553 const struct rte_flow_item_eth *eth_mask;
554 const struct rte_flow_action_queue *act_q;
557 rte_flow_error_set(error, EINVAL,
558 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
559 NULL, "NULL pattern.");
564 rte_flow_error_set(error, EINVAL,
565 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
566 NULL, "NULL action.");
571 rte_flow_error_set(error, EINVAL,
572 RTE_FLOW_ERROR_TYPE_ATTR,
573 NULL, "NULL attribute.");
577 item = next_no_void_pattern(pattern, NULL);
578 /* The first non-void item should be MAC. */
579 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
580 rte_flow_error_set(error, EINVAL,
581 RTE_FLOW_ERROR_TYPE_ITEM,
582 item, "Not supported by ethertype filter");
586 /*Not supported last point for range*/
588 rte_flow_error_set(error, EINVAL,
589 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
590 item, "Not supported last point for range");
594 /* Get the MAC info. */
595 if (!item->spec || !item->mask) {
596 rte_flow_error_set(error, EINVAL,
597 RTE_FLOW_ERROR_TYPE_ITEM,
598 item, "Not supported by ethertype filter");
602 eth_spec = item->spec;
603 eth_mask = item->mask;
605 /* Mask bits of source MAC address must be full of 0.
606 * Mask bits of destination MAC address must be full
609 if (!rte_is_zero_ether_addr(ð_mask->src) ||
610 (!rte_is_zero_ether_addr(ð_mask->dst) &&
611 !rte_is_broadcast_ether_addr(ð_mask->dst))) {
612 rte_flow_error_set(error, EINVAL,
613 RTE_FLOW_ERROR_TYPE_ITEM,
614 item, "Invalid ether address mask");
618 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
619 rte_flow_error_set(error, EINVAL,
620 RTE_FLOW_ERROR_TYPE_ITEM,
621 item, "Invalid ethertype mask");
625 /* If mask bits of destination MAC address
626 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
628 if (rte_is_broadcast_ether_addr(ð_mask->dst)) {
629 filter->mac_addr = eth_spec->dst;
630 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
632 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
634 filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
636 /* Check if the next non-void item is END. */
637 item = next_no_void_pattern(pattern, item);
638 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
639 rte_flow_error_set(error, EINVAL,
640 RTE_FLOW_ERROR_TYPE_ITEM,
641 item, "Not supported by ethertype filter.");
647 act = next_no_void_action(actions, NULL);
648 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
649 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
650 rte_flow_error_set(error, EINVAL,
651 RTE_FLOW_ERROR_TYPE_ACTION,
652 act, "Not supported action.");
656 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
657 act_q = (const struct rte_flow_action_queue *)act->conf;
658 filter->queue = act_q->index;
660 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
663 /* Check if the next non-void item is END */
664 act = next_no_void_action(actions, act);
665 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
666 rte_flow_error_set(error, EINVAL,
667 RTE_FLOW_ERROR_TYPE_ACTION,
668 act, "Not supported action.");
673 /* Must be input direction */
674 if (!attr->ingress) {
675 rte_flow_error_set(error, EINVAL,
676 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
677 attr, "Only support ingress.");
683 rte_flow_error_set(error, EINVAL,
684 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
685 attr, "Not support egress.");
690 if (attr->transfer) {
691 rte_flow_error_set(error, EINVAL,
692 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
693 attr, "No support for transfer.");
698 if (attr->priority) {
699 rte_flow_error_set(error, EINVAL,
700 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
701 attr, "Not support priority.");
707 rte_flow_error_set(error, EINVAL,
708 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
709 attr, "Not support group.");
717 txgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
718 const struct rte_flow_attr *attr,
719 const struct rte_flow_item pattern[],
720 const struct rte_flow_action actions[],
721 struct rte_eth_ethertype_filter *filter,
722 struct rte_flow_error *error)
726 ret = cons_parse_ethertype_filter(attr, pattern,
727 actions, filter, error);
732 if (filter->queue >= dev->data->nb_rx_queues) {
733 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
734 rte_flow_error_set(error, EINVAL,
735 RTE_FLOW_ERROR_TYPE_ITEM,
736 NULL, "queue index much too big");
740 if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
741 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
742 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
743 rte_flow_error_set(error, EINVAL,
744 RTE_FLOW_ERROR_TYPE_ITEM,
745 NULL, "IPv4/IPv6 not supported by ethertype filter");
749 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
750 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
751 rte_flow_error_set(error, EINVAL,
752 RTE_FLOW_ERROR_TYPE_ITEM,
753 NULL, "mac compare is unsupported");
757 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
758 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
759 rte_flow_error_set(error, EINVAL,
760 RTE_FLOW_ERROR_TYPE_ITEM,
761 NULL, "drop option is unsupported");
769 * Parse the rule to see if it is a TCP SYN rule.
770 * And get the TCP SYN filter info BTW.
772 * The first not void item must be ETH.
773 * The second not void item must be IPV4 or IPV6.
774 * The third not void item must be TCP.
775 * The next not void item must be END.
777 * The first not void action should be QUEUE.
778 * The next not void action should be END.
782 * IPV4/IPV6 NULL NULL
783 * TCP tcp_flags 0x02 0xFF
785 * other members in mask and spec should set to 0x00.
786 * item->last should be NULL.
789 cons_parse_syn_filter(const struct rte_flow_attr *attr,
790 const struct rte_flow_item pattern[],
791 const struct rte_flow_action actions[],
792 struct rte_eth_syn_filter *filter,
793 struct rte_flow_error *error)
795 const struct rte_flow_item *item;
796 const struct rte_flow_action *act;
797 const struct rte_flow_item_tcp *tcp_spec;
798 const struct rte_flow_item_tcp *tcp_mask;
799 const struct rte_flow_action_queue *act_q;
802 rte_flow_error_set(error, EINVAL,
803 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
804 NULL, "NULL pattern.");
809 rte_flow_error_set(error, EINVAL,
810 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
811 NULL, "NULL action.");
816 rte_flow_error_set(error, EINVAL,
817 RTE_FLOW_ERROR_TYPE_ATTR,
818 NULL, "NULL attribute.");
823 /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
824 item = next_no_void_pattern(pattern, NULL);
825 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
826 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
827 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
828 item->type != RTE_FLOW_ITEM_TYPE_TCP) {
829 rte_flow_error_set(error, EINVAL,
830 RTE_FLOW_ERROR_TYPE_ITEM,
831 item, "Not supported by syn filter");
834 /*Not supported last point for range*/
836 rte_flow_error_set(error, EINVAL,
837 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
838 item, "Not supported last point for range");
843 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
844 /* if the item is MAC, the content should be NULL */
845 if (item->spec || item->mask) {
846 rte_flow_error_set(error, EINVAL,
847 RTE_FLOW_ERROR_TYPE_ITEM,
848 item, "Invalid SYN address mask");
852 /* check if the next not void item is IPv4 or IPv6 */
853 item = next_no_void_pattern(pattern, item);
854 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
855 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
856 rte_flow_error_set(error, EINVAL,
857 RTE_FLOW_ERROR_TYPE_ITEM,
858 item, "Not supported by syn filter");
864 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
865 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
866 /* if the item is IP, the content should be NULL */
867 if (item->spec || item->mask) {
868 rte_flow_error_set(error, EINVAL,
869 RTE_FLOW_ERROR_TYPE_ITEM,
870 item, "Invalid SYN mask");
874 /* check if the next not void item is TCP */
875 item = next_no_void_pattern(pattern, item);
876 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
877 rte_flow_error_set(error, EINVAL,
878 RTE_FLOW_ERROR_TYPE_ITEM,
879 item, "Not supported by syn filter");
884 /* Get the TCP info. Only support SYN. */
885 if (!item->spec || !item->mask) {
886 rte_flow_error_set(error, EINVAL,
887 RTE_FLOW_ERROR_TYPE_ITEM,
888 item, "Invalid SYN mask");
891 /*Not supported last point for range*/
893 rte_flow_error_set(error, EINVAL,
894 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
895 item, "Not supported last point for range");
899 tcp_spec = item->spec;
900 tcp_mask = item->mask;
901 if (!(tcp_spec->hdr.tcp_flags & RTE_TCP_SYN_FLAG) ||
902 tcp_mask->hdr.src_port ||
903 tcp_mask->hdr.dst_port ||
904 tcp_mask->hdr.sent_seq ||
905 tcp_mask->hdr.recv_ack ||
906 tcp_mask->hdr.data_off ||
907 tcp_mask->hdr.tcp_flags != RTE_TCP_SYN_FLAG ||
908 tcp_mask->hdr.rx_win ||
909 tcp_mask->hdr.cksum ||
910 tcp_mask->hdr.tcp_urp) {
911 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
912 rte_flow_error_set(error, EINVAL,
913 RTE_FLOW_ERROR_TYPE_ITEM,
914 item, "Not supported by syn filter");
918 /* check if the next not void item is END */
919 item = next_no_void_pattern(pattern, item);
920 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
921 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
922 rte_flow_error_set(error, EINVAL,
923 RTE_FLOW_ERROR_TYPE_ITEM,
924 item, "Not supported by syn filter");
928 /* check if the first not void action is QUEUE. */
929 act = next_no_void_action(actions, NULL);
930 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
931 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
932 rte_flow_error_set(error, EINVAL,
933 RTE_FLOW_ERROR_TYPE_ACTION,
934 act, "Not supported action.");
938 act_q = (const struct rte_flow_action_queue *)act->conf;
939 filter->queue = act_q->index;
940 if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM) {
941 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
942 rte_flow_error_set(error, EINVAL,
943 RTE_FLOW_ERROR_TYPE_ACTION,
944 act, "Not supported action.");
948 /* check if the next not void item is END */
949 act = next_no_void_action(actions, act);
950 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
951 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
952 rte_flow_error_set(error, EINVAL,
953 RTE_FLOW_ERROR_TYPE_ACTION,
954 act, "Not supported action.");
959 /* must be input direction */
960 if (!attr->ingress) {
961 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
962 rte_flow_error_set(error, EINVAL,
963 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
964 attr, "Only support ingress.");
970 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
971 rte_flow_error_set(error, EINVAL,
972 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
973 attr, "Not support egress.");
978 if (attr->transfer) {
979 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
980 rte_flow_error_set(error, EINVAL,
981 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
982 attr, "No support for transfer.");
986 /* Support 2 priorities, the lowest or highest. */
987 if (!attr->priority) {
989 } else if (attr->priority == (uint32_t)~0U) {
992 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
993 rte_flow_error_set(error, EINVAL,
994 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
995 attr, "Not support priority.");
1003 txgbe_parse_syn_filter(struct rte_eth_dev *dev,
1004 const struct rte_flow_attr *attr,
1005 const struct rte_flow_item pattern[],
1006 const struct rte_flow_action actions[],
1007 struct rte_eth_syn_filter *filter,
1008 struct rte_flow_error *error)
1012 ret = cons_parse_syn_filter(attr, pattern,
1013 actions, filter, error);
1015 if (filter->queue >= dev->data->nb_rx_queues)
1025 * Create or destroy a flow rule.
1026 * Theorically one rule can match more than one filters.
1027 * We will let it use the filter which it hitt first.
1028 * So, the sequence matters.
1030 static struct rte_flow *
1031 txgbe_flow_create(struct rte_eth_dev *dev,
1032 const struct rte_flow_attr *attr,
1033 const struct rte_flow_item pattern[],
1034 const struct rte_flow_action actions[],
1035 struct rte_flow_error *error)
1037 struct rte_flow *flow = NULL;
1042 * Check if the flow rule is supported by txgbe.
1043 * It only checks the format. Don't guarantee the rule can be programmed into
1044 * the HW. Because there can be no enough room for the rule.
1047 txgbe_flow_validate(struct rte_eth_dev *dev,
1048 const struct rte_flow_attr *attr,
1049 const struct rte_flow_item pattern[],
1050 const struct rte_flow_action actions[],
1051 struct rte_flow_error *error)
1053 struct rte_eth_ntuple_filter ntuple_filter;
1054 struct rte_eth_ethertype_filter ethertype_filter;
1055 struct rte_eth_syn_filter syn_filter;
1058 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
1059 ret = txgbe_parse_ntuple_filter(dev, attr, pattern,
1060 actions, &ntuple_filter, error);
1064 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
1065 ret = txgbe_parse_ethertype_filter(dev, attr, pattern,
1066 actions, ðertype_filter, error);
1070 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
1071 ret = txgbe_parse_syn_filter(dev, attr, pattern,
1072 actions, &syn_filter, error);
1079 /* Destroy a flow rule on txgbe. */
1081 txgbe_flow_destroy(struct rte_eth_dev *dev,
1082 struct rte_flow *flow,
1083 struct rte_flow_error *error)
1090 /* Destroy all flow rules associated with a port on txgbe. */
1092 txgbe_flow_flush(struct rte_eth_dev *dev,
1093 struct rte_flow_error *error)
1100 const struct rte_flow_ops txgbe_flow_ops = {
1101 .validate = txgbe_flow_validate,
1102 .create = txgbe_flow_create,
1103 .destroy = txgbe_flow_destroy,
1104 .flush = txgbe_flow_flush,