1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2015-2020
6 #include <rte_bus_pci.h>
8 #include <rte_flow_driver.h>
10 #include "txgbe_ethdev.h"
12 #define TXGBE_MIN_N_TUPLE_PRIO 1
13 #define TXGBE_MAX_N_TUPLE_PRIO 7
16 * Endless loop will never happen with below assumption
17 * 1. there is at least one no-void item(END)
18 * 2. cur is before END.
21 const struct rte_flow_item *next_no_void_pattern(
22 const struct rte_flow_item pattern[],
23 const struct rte_flow_item *cur)
25 const struct rte_flow_item *next =
26 cur ? cur + 1 : &pattern[0];
28 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
35 const struct rte_flow_action *next_no_void_action(
36 const struct rte_flow_action actions[],
37 const struct rte_flow_action *cur)
39 const struct rte_flow_action *next =
40 cur ? cur + 1 : &actions[0];
42 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
49 * Please aware there's an assumption for all the parsers.
50 * rte_flow_item is using big endian, rte_flow_attr and
51 * rte_flow_action are using CPU order.
52 * Because the pattern is used to describe the packets,
53 * normally the packets should use network order.
57 * Parse the rule to see if it is a n-tuple rule.
58 * And get the n-tuple filter info BTW.
60 * The first not void item can be ETH or IPV4.
61 * The second not void item must be IPV4 if the first one is ETH.
62 * The third not void item must be UDP or TCP.
63 * The next not void item must be END.
65 * The first not void action should be QUEUE.
66 * The next not void action should be END.
70 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
71 * dst_addr 192.167.3.50 0xFFFFFFFF
72 * next_proto_id 17 0xFF
73 * UDP/TCP/ src_port 80 0xFFFF
74 * SCTP dst_port 80 0xFFFF
76 * other members in mask and spec should set to 0x00.
77 * item->last should be NULL.
80 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
81 const struct rte_flow_item pattern[],
82 const struct rte_flow_action actions[],
83 struct rte_eth_ntuple_filter *filter,
84 struct rte_flow_error *error)
86 const struct rte_flow_item *item;
87 const struct rte_flow_action *act;
88 const struct rte_flow_item_ipv4 *ipv4_spec;
89 const struct rte_flow_item_ipv4 *ipv4_mask;
90 const struct rte_flow_item_tcp *tcp_spec;
91 const struct rte_flow_item_tcp *tcp_mask;
92 const struct rte_flow_item_udp *udp_spec;
93 const struct rte_flow_item_udp *udp_mask;
94 const struct rte_flow_item_sctp *sctp_spec;
95 const struct rte_flow_item_sctp *sctp_mask;
96 const struct rte_flow_item_eth *eth_spec;
97 const struct rte_flow_item_eth *eth_mask;
98 const struct rte_flow_item_vlan *vlan_spec;
99 const struct rte_flow_item_vlan *vlan_mask;
100 struct rte_flow_item_eth eth_null;
101 struct rte_flow_item_vlan vlan_null;
104 rte_flow_error_set(error,
105 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
106 NULL, "NULL pattern.");
111 rte_flow_error_set(error, EINVAL,
112 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
113 NULL, "NULL action.");
117 rte_flow_error_set(error, EINVAL,
118 RTE_FLOW_ERROR_TYPE_ATTR,
119 NULL, "NULL attribute.");
123 memset(ð_null, 0, sizeof(struct rte_flow_item_eth));
124 memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan));
126 /* the first not void item can be MAC or IPv4 */
127 item = next_no_void_pattern(pattern, NULL);
129 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
130 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
131 rte_flow_error_set(error, EINVAL,
132 RTE_FLOW_ERROR_TYPE_ITEM,
133 item, "Not supported by ntuple filter");
137 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
138 eth_spec = item->spec;
139 eth_mask = item->mask;
140 /*Not supported last point for range*/
142 rte_flow_error_set(error,
144 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
145 item, "Not supported last point for range");
148 /* if the first item is MAC, the content should be NULL */
149 if ((item->spec || item->mask) &&
150 (memcmp(eth_spec, ð_null,
151 sizeof(struct rte_flow_item_eth)) ||
152 memcmp(eth_mask, ð_null,
153 sizeof(struct rte_flow_item_eth)))) {
154 rte_flow_error_set(error, EINVAL,
155 RTE_FLOW_ERROR_TYPE_ITEM,
156 item, "Not supported by ntuple filter");
159 /* check if the next not void item is IPv4 or Vlan */
160 item = next_no_void_pattern(pattern, item);
161 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
162 item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
163 rte_flow_error_set(error,
164 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
165 item, "Not supported by ntuple filter");
170 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
171 vlan_spec = item->spec;
172 vlan_mask = item->mask;
173 /*Not supported last point for range*/
175 rte_flow_error_set(error,
176 EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
177 item, "Not supported last point for range");
180 /* the content should be NULL */
181 if ((item->spec || item->mask) &&
182 (memcmp(vlan_spec, &vlan_null,
183 sizeof(struct rte_flow_item_vlan)) ||
184 memcmp(vlan_mask, &vlan_null,
185 sizeof(struct rte_flow_item_vlan)))) {
186 rte_flow_error_set(error, EINVAL,
187 RTE_FLOW_ERROR_TYPE_ITEM,
188 item, "Not supported by ntuple filter");
191 /* check if the next not void item is IPv4 */
192 item = next_no_void_pattern(pattern, item);
193 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
194 rte_flow_error_set(error,
195 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
196 item, "Not supported by ntuple filter");
202 /* get the IPv4 info */
203 if (!item->spec || !item->mask) {
204 rte_flow_error_set(error, EINVAL,
205 RTE_FLOW_ERROR_TYPE_ITEM,
206 item, "Invalid ntuple mask");
209 /*Not supported last point for range*/
211 rte_flow_error_set(error, EINVAL,
212 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
213 item, "Not supported last point for range");
217 ipv4_mask = item->mask;
219 * Only support src & dst addresses, protocol,
220 * others should be masked.
222 if (ipv4_mask->hdr.version_ihl ||
223 ipv4_mask->hdr.type_of_service ||
224 ipv4_mask->hdr.total_length ||
225 ipv4_mask->hdr.packet_id ||
226 ipv4_mask->hdr.fragment_offset ||
227 ipv4_mask->hdr.time_to_live ||
228 ipv4_mask->hdr.hdr_checksum) {
229 rte_flow_error_set(error,
230 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
231 item, "Not supported by ntuple filter");
234 if ((ipv4_mask->hdr.src_addr != 0 &&
235 ipv4_mask->hdr.src_addr != UINT32_MAX) ||
236 (ipv4_mask->hdr.dst_addr != 0 &&
237 ipv4_mask->hdr.dst_addr != UINT32_MAX) ||
238 (ipv4_mask->hdr.next_proto_id != UINT8_MAX &&
239 ipv4_mask->hdr.next_proto_id != 0)) {
240 rte_flow_error_set(error,
241 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
242 item, "Not supported by ntuple filter");
246 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
247 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
248 filter->proto_mask = ipv4_mask->hdr.next_proto_id;
250 ipv4_spec = item->spec;
251 filter->dst_ip = ipv4_spec->hdr.dst_addr;
252 filter->src_ip = ipv4_spec->hdr.src_addr;
253 filter->proto = ipv4_spec->hdr.next_proto_id;
256 /* check if the next not void item is TCP or UDP */
257 item = next_no_void_pattern(pattern, item);
258 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
259 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
260 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
261 item->type != RTE_FLOW_ITEM_TYPE_END) {
262 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
263 rte_flow_error_set(error, EINVAL,
264 RTE_FLOW_ERROR_TYPE_ITEM,
265 item, "Not supported by ntuple filter");
269 if (item->type != RTE_FLOW_ITEM_TYPE_END &&
270 (!item->spec && !item->mask)) {
274 /* get the TCP/UDP/SCTP info */
275 if (item->type != RTE_FLOW_ITEM_TYPE_END &&
276 (!item->spec || !item->mask)) {
277 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
278 rte_flow_error_set(error, EINVAL,
279 RTE_FLOW_ERROR_TYPE_ITEM,
280 item, "Invalid ntuple mask");
284 /*Not supported last point for range*/
286 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
287 rte_flow_error_set(error, EINVAL,
288 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
289 item, "Not supported last point for range");
293 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
294 tcp_mask = item->mask;
297 * Only support src & dst ports, tcp flags,
298 * others should be masked.
300 if (tcp_mask->hdr.sent_seq ||
301 tcp_mask->hdr.recv_ack ||
302 tcp_mask->hdr.data_off ||
303 tcp_mask->hdr.rx_win ||
304 tcp_mask->hdr.cksum ||
305 tcp_mask->hdr.tcp_urp) {
307 sizeof(struct rte_eth_ntuple_filter));
308 rte_flow_error_set(error, EINVAL,
309 RTE_FLOW_ERROR_TYPE_ITEM,
310 item, "Not supported by ntuple filter");
313 if ((tcp_mask->hdr.src_port != 0 &&
314 tcp_mask->hdr.src_port != UINT16_MAX) ||
315 (tcp_mask->hdr.dst_port != 0 &&
316 tcp_mask->hdr.dst_port != UINT16_MAX)) {
317 rte_flow_error_set(error,
318 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
319 item, "Not supported by ntuple filter");
323 filter->dst_port_mask = tcp_mask->hdr.dst_port;
324 filter->src_port_mask = tcp_mask->hdr.src_port;
325 if (tcp_mask->hdr.tcp_flags == 0xFF) {
326 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
327 } else if (!tcp_mask->hdr.tcp_flags) {
328 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
330 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
331 rte_flow_error_set(error, EINVAL,
332 RTE_FLOW_ERROR_TYPE_ITEM,
333 item, "Not supported by ntuple filter");
337 tcp_spec = item->spec;
338 filter->dst_port = tcp_spec->hdr.dst_port;
339 filter->src_port = tcp_spec->hdr.src_port;
340 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
341 } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
342 udp_mask = item->mask;
345 * Only support src & dst ports,
346 * others should be masked.
348 if (udp_mask->hdr.dgram_len ||
349 udp_mask->hdr.dgram_cksum) {
351 sizeof(struct rte_eth_ntuple_filter));
352 rte_flow_error_set(error, EINVAL,
353 RTE_FLOW_ERROR_TYPE_ITEM,
354 item, "Not supported by ntuple filter");
357 if ((udp_mask->hdr.src_port != 0 &&
358 udp_mask->hdr.src_port != UINT16_MAX) ||
359 (udp_mask->hdr.dst_port != 0 &&
360 udp_mask->hdr.dst_port != UINT16_MAX)) {
361 rte_flow_error_set(error,
362 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
363 item, "Not supported by ntuple filter");
367 filter->dst_port_mask = udp_mask->hdr.dst_port;
368 filter->src_port_mask = udp_mask->hdr.src_port;
370 udp_spec = item->spec;
371 filter->dst_port = udp_spec->hdr.dst_port;
372 filter->src_port = udp_spec->hdr.src_port;
373 } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
374 sctp_mask = item->mask;
377 * Only support src & dst ports,
378 * others should be masked.
380 if (sctp_mask->hdr.tag ||
381 sctp_mask->hdr.cksum) {
383 sizeof(struct rte_eth_ntuple_filter));
384 rte_flow_error_set(error, EINVAL,
385 RTE_FLOW_ERROR_TYPE_ITEM,
386 item, "Not supported by ntuple filter");
390 filter->dst_port_mask = sctp_mask->hdr.dst_port;
391 filter->src_port_mask = sctp_mask->hdr.src_port;
393 sctp_spec = item->spec;
394 filter->dst_port = sctp_spec->hdr.dst_port;
395 filter->src_port = sctp_spec->hdr.src_port;
400 /* check if the next not void item is END */
401 item = next_no_void_pattern(pattern, item);
402 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
403 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
404 rte_flow_error_set(error, EINVAL,
405 RTE_FLOW_ERROR_TYPE_ITEM,
406 item, "Not supported by ntuple filter");
413 * n-tuple only supports forwarding,
414 * check if the first not void action is QUEUE.
416 act = next_no_void_action(actions, NULL);
417 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
418 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
419 rte_flow_error_set(error, EINVAL,
420 RTE_FLOW_ERROR_TYPE_ACTION,
421 item, "Not supported action.");
425 ((const struct rte_flow_action_queue *)act->conf)->index;
427 /* check if the next not void item is END */
428 act = next_no_void_action(actions, act);
429 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
430 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
431 rte_flow_error_set(error, EINVAL,
432 RTE_FLOW_ERROR_TYPE_ACTION,
433 act, "Not supported action.");
438 /* must be input direction */
439 if (!attr->ingress) {
440 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
441 rte_flow_error_set(error, EINVAL,
442 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
443 attr, "Only support ingress.");
449 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
450 rte_flow_error_set(error, EINVAL,
451 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
452 attr, "Not support egress.");
457 if (attr->transfer) {
458 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
459 rte_flow_error_set(error, EINVAL,
460 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
461 attr, "No support for transfer.");
465 if (attr->priority > 0xFFFF) {
466 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
467 rte_flow_error_set(error, EINVAL,
468 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
469 attr, "Error priority.");
472 filter->priority = (uint16_t)attr->priority;
473 if (attr->priority < TXGBE_MIN_N_TUPLE_PRIO ||
474 attr->priority > TXGBE_MAX_N_TUPLE_PRIO)
475 filter->priority = 1;
480 /* a specific function for txgbe because the flags is specific */
482 txgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
483 const struct rte_flow_attr *attr,
484 const struct rte_flow_item pattern[],
485 const struct rte_flow_action actions[],
486 struct rte_eth_ntuple_filter *filter,
487 struct rte_flow_error *error)
491 ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
496 /* txgbe doesn't support tcp flags */
497 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
498 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
499 rte_flow_error_set(error, EINVAL,
500 RTE_FLOW_ERROR_TYPE_ITEM,
501 NULL, "Not supported by ntuple filter");
505 /* txgbe doesn't support many priorities */
506 if (filter->priority < TXGBE_MIN_N_TUPLE_PRIO ||
507 filter->priority > TXGBE_MAX_N_TUPLE_PRIO) {
508 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
509 rte_flow_error_set(error, EINVAL,
510 RTE_FLOW_ERROR_TYPE_ITEM,
511 NULL, "Priority not supported by ntuple filter");
515 if (filter->queue >= dev->data->nb_rx_queues) {
516 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
517 rte_flow_error_set(error, EINVAL,
518 RTE_FLOW_ERROR_TYPE_ITEM,
519 NULL, "Not supported by ntuple filter");
523 /* fixed value for txgbe */
524 filter->flags = RTE_5TUPLE_FLAGS;
529 * Parse the rule to see if it is a ethertype rule.
530 * And get the ethertype filter info BTW.
532 * The first not void item can be ETH.
533 * The next not void item must be END.
535 * The first not void action should be QUEUE.
536 * The next not void action should be END.
539 * ETH type 0x0807 0xFFFF
541 * other members in mask and spec should set to 0x00.
542 * item->last should be NULL.
545 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
546 const struct rte_flow_item *pattern,
547 const struct rte_flow_action *actions,
548 struct rte_eth_ethertype_filter *filter,
549 struct rte_flow_error *error)
551 const struct rte_flow_item *item;
552 const struct rte_flow_action *act;
553 const struct rte_flow_item_eth *eth_spec;
554 const struct rte_flow_item_eth *eth_mask;
555 const struct rte_flow_action_queue *act_q;
558 rte_flow_error_set(error, EINVAL,
559 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
560 NULL, "NULL pattern.");
565 rte_flow_error_set(error, EINVAL,
566 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
567 NULL, "NULL action.");
572 rte_flow_error_set(error, EINVAL,
573 RTE_FLOW_ERROR_TYPE_ATTR,
574 NULL, "NULL attribute.");
578 item = next_no_void_pattern(pattern, NULL);
579 /* The first non-void item should be MAC. */
580 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
581 rte_flow_error_set(error, EINVAL,
582 RTE_FLOW_ERROR_TYPE_ITEM,
583 item, "Not supported by ethertype filter");
587 /*Not supported last point for range*/
589 rte_flow_error_set(error, EINVAL,
590 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
591 item, "Not supported last point for range");
595 /* Get the MAC info. */
596 if (!item->spec || !item->mask) {
597 rte_flow_error_set(error, EINVAL,
598 RTE_FLOW_ERROR_TYPE_ITEM,
599 item, "Not supported by ethertype filter");
603 eth_spec = item->spec;
604 eth_mask = item->mask;
606 /* Mask bits of source MAC address must be full of 0.
607 * Mask bits of destination MAC address must be full
610 if (!rte_is_zero_ether_addr(ð_mask->src) ||
611 (!rte_is_zero_ether_addr(ð_mask->dst) &&
612 !rte_is_broadcast_ether_addr(ð_mask->dst))) {
613 rte_flow_error_set(error, EINVAL,
614 RTE_FLOW_ERROR_TYPE_ITEM,
615 item, "Invalid ether address mask");
619 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
620 rte_flow_error_set(error, EINVAL,
621 RTE_FLOW_ERROR_TYPE_ITEM,
622 item, "Invalid ethertype mask");
626 /* If mask bits of destination MAC address
627 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
629 if (rte_is_broadcast_ether_addr(ð_mask->dst)) {
630 filter->mac_addr = eth_spec->dst;
631 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
633 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
635 filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
637 /* Check if the next non-void item is END. */
638 item = next_no_void_pattern(pattern, item);
639 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
640 rte_flow_error_set(error, EINVAL,
641 RTE_FLOW_ERROR_TYPE_ITEM,
642 item, "Not supported by ethertype filter.");
648 act = next_no_void_action(actions, NULL);
649 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
650 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
651 rte_flow_error_set(error, EINVAL,
652 RTE_FLOW_ERROR_TYPE_ACTION,
653 act, "Not supported action.");
657 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
658 act_q = (const struct rte_flow_action_queue *)act->conf;
659 filter->queue = act_q->index;
661 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
664 /* Check if the next non-void item is END */
665 act = next_no_void_action(actions, act);
666 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
667 rte_flow_error_set(error, EINVAL,
668 RTE_FLOW_ERROR_TYPE_ACTION,
669 act, "Not supported action.");
674 /* Must be input direction */
675 if (!attr->ingress) {
676 rte_flow_error_set(error, EINVAL,
677 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
678 attr, "Only support ingress.");
684 rte_flow_error_set(error, EINVAL,
685 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
686 attr, "Not support egress.");
691 if (attr->transfer) {
692 rte_flow_error_set(error, EINVAL,
693 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
694 attr, "No support for transfer.");
699 if (attr->priority) {
700 rte_flow_error_set(error, EINVAL,
701 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
702 attr, "Not support priority.");
708 rte_flow_error_set(error, EINVAL,
709 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
710 attr, "Not support group.");
718 txgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
719 const struct rte_flow_attr *attr,
720 const struct rte_flow_item pattern[],
721 const struct rte_flow_action actions[],
722 struct rte_eth_ethertype_filter *filter,
723 struct rte_flow_error *error)
727 ret = cons_parse_ethertype_filter(attr, pattern,
728 actions, filter, error);
733 if (filter->queue >= dev->data->nb_rx_queues) {
734 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
735 rte_flow_error_set(error, EINVAL,
736 RTE_FLOW_ERROR_TYPE_ITEM,
737 NULL, "queue index much too big");
741 if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
742 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
743 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
744 rte_flow_error_set(error, EINVAL,
745 RTE_FLOW_ERROR_TYPE_ITEM,
746 NULL, "IPv4/IPv6 not supported by ethertype filter");
750 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
751 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
752 rte_flow_error_set(error, EINVAL,
753 RTE_FLOW_ERROR_TYPE_ITEM,
754 NULL, "mac compare is unsupported");
758 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
759 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
760 rte_flow_error_set(error, EINVAL,
761 RTE_FLOW_ERROR_TYPE_ITEM,
762 NULL, "drop option is unsupported");
770 * Parse the rule to see if it is a TCP SYN rule.
771 * And get the TCP SYN filter info BTW.
773 * The first not void item must be ETH.
774 * The second not void item must be IPV4 or IPV6.
775 * The third not void item must be TCP.
776 * The next not void item must be END.
778 * The first not void action should be QUEUE.
779 * The next not void action should be END.
783 * IPV4/IPV6 NULL NULL
784 * TCP tcp_flags 0x02 0xFF
786 * other members in mask and spec should set to 0x00.
787 * item->last should be NULL.
790 cons_parse_syn_filter(const struct rte_flow_attr *attr,
791 const struct rte_flow_item pattern[],
792 const struct rte_flow_action actions[],
793 struct rte_eth_syn_filter *filter,
794 struct rte_flow_error *error)
796 const struct rte_flow_item *item;
797 const struct rte_flow_action *act;
798 const struct rte_flow_item_tcp *tcp_spec;
799 const struct rte_flow_item_tcp *tcp_mask;
800 const struct rte_flow_action_queue *act_q;
803 rte_flow_error_set(error, EINVAL,
804 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
805 NULL, "NULL pattern.");
810 rte_flow_error_set(error, EINVAL,
811 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
812 NULL, "NULL action.");
817 rte_flow_error_set(error, EINVAL,
818 RTE_FLOW_ERROR_TYPE_ATTR,
819 NULL, "NULL attribute.");
824 /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
825 item = next_no_void_pattern(pattern, NULL);
826 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
827 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
828 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
829 item->type != RTE_FLOW_ITEM_TYPE_TCP) {
830 rte_flow_error_set(error, EINVAL,
831 RTE_FLOW_ERROR_TYPE_ITEM,
832 item, "Not supported by syn filter");
835 /*Not supported last point for range*/
837 rte_flow_error_set(error, EINVAL,
838 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
839 item, "Not supported last point for range");
844 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
845 /* if the item is MAC, the content should be NULL */
846 if (item->spec || item->mask) {
847 rte_flow_error_set(error, EINVAL,
848 RTE_FLOW_ERROR_TYPE_ITEM,
849 item, "Invalid SYN address mask");
853 /* check if the next not void item is IPv4 or IPv6 */
854 item = next_no_void_pattern(pattern, item);
855 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
856 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
857 rte_flow_error_set(error, EINVAL,
858 RTE_FLOW_ERROR_TYPE_ITEM,
859 item, "Not supported by syn filter");
865 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
866 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
867 /* if the item is IP, the content should be NULL */
868 if (item->spec || item->mask) {
869 rte_flow_error_set(error, EINVAL,
870 RTE_FLOW_ERROR_TYPE_ITEM,
871 item, "Invalid SYN mask");
875 /* check if the next not void item is TCP */
876 item = next_no_void_pattern(pattern, item);
877 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
878 rte_flow_error_set(error, EINVAL,
879 RTE_FLOW_ERROR_TYPE_ITEM,
880 item, "Not supported by syn filter");
885 /* Get the TCP info. Only support SYN. */
886 if (!item->spec || !item->mask) {
887 rte_flow_error_set(error, EINVAL,
888 RTE_FLOW_ERROR_TYPE_ITEM,
889 item, "Invalid SYN mask");
892 /*Not supported last point for range*/
894 rte_flow_error_set(error, EINVAL,
895 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
896 item, "Not supported last point for range");
900 tcp_spec = item->spec;
901 tcp_mask = item->mask;
902 if (!(tcp_spec->hdr.tcp_flags & RTE_TCP_SYN_FLAG) ||
903 tcp_mask->hdr.src_port ||
904 tcp_mask->hdr.dst_port ||
905 tcp_mask->hdr.sent_seq ||
906 tcp_mask->hdr.recv_ack ||
907 tcp_mask->hdr.data_off ||
908 tcp_mask->hdr.tcp_flags != RTE_TCP_SYN_FLAG ||
909 tcp_mask->hdr.rx_win ||
910 tcp_mask->hdr.cksum ||
911 tcp_mask->hdr.tcp_urp) {
912 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
913 rte_flow_error_set(error, EINVAL,
914 RTE_FLOW_ERROR_TYPE_ITEM,
915 item, "Not supported by syn filter");
919 /* check if the next not void item is END */
920 item = next_no_void_pattern(pattern, item);
921 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
922 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
923 rte_flow_error_set(error, EINVAL,
924 RTE_FLOW_ERROR_TYPE_ITEM,
925 item, "Not supported by syn filter");
929 /* check if the first not void action is QUEUE. */
930 act = next_no_void_action(actions, NULL);
931 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
932 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
933 rte_flow_error_set(error, EINVAL,
934 RTE_FLOW_ERROR_TYPE_ACTION,
935 act, "Not supported action.");
939 act_q = (const struct rte_flow_action_queue *)act->conf;
940 filter->queue = act_q->index;
941 if (filter->queue >= TXGBE_MAX_RX_QUEUE_NUM) {
942 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
943 rte_flow_error_set(error, EINVAL,
944 RTE_FLOW_ERROR_TYPE_ACTION,
945 act, "Not supported action.");
949 /* check if the next not void item is END */
950 act = next_no_void_action(actions, act);
951 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
952 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
953 rte_flow_error_set(error, EINVAL,
954 RTE_FLOW_ERROR_TYPE_ACTION,
955 act, "Not supported action.");
960 /* must be input direction */
961 if (!attr->ingress) {
962 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
963 rte_flow_error_set(error, EINVAL,
964 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
965 attr, "Only support ingress.");
971 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
972 rte_flow_error_set(error, EINVAL,
973 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
974 attr, "Not support egress.");
979 if (attr->transfer) {
980 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
981 rte_flow_error_set(error, EINVAL,
982 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
983 attr, "No support for transfer.");
987 /* Support 2 priorities, the lowest or highest. */
988 if (!attr->priority) {
990 } else if (attr->priority == (uint32_t)~0U) {
993 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
994 rte_flow_error_set(error, EINVAL,
995 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
996 attr, "Not support priority.");
1004 txgbe_parse_syn_filter(struct rte_eth_dev *dev,
1005 const struct rte_flow_attr *attr,
1006 const struct rte_flow_item pattern[],
1007 const struct rte_flow_action actions[],
1008 struct rte_eth_syn_filter *filter,
1009 struct rte_flow_error *error)
1013 ret = cons_parse_syn_filter(attr, pattern,
1014 actions, filter, error);
1016 if (filter->queue >= dev->data->nb_rx_queues)
1026 * Parse the rule to see if it is a L2 tunnel rule.
1027 * And get the L2 tunnel filter info BTW.
1028 * Only support E-tag now.
1030 * The first not void item can be E_TAG.
1031 * The next not void item must be END.
1033 * The first not void action should be VF or PF.
1034 * The next not void action should be END.
1038 e_cid_base 0x309 0xFFF
1040 * other members in mask and spec should set to 0x00.
1041 * item->last should be NULL.
1044 cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
1045 const struct rte_flow_attr *attr,
1046 const struct rte_flow_item pattern[],
1047 const struct rte_flow_action actions[],
1048 struct txgbe_l2_tunnel_conf *filter,
1049 struct rte_flow_error *error)
1051 const struct rte_flow_item *item;
1052 const struct rte_flow_item_e_tag *e_tag_spec;
1053 const struct rte_flow_item_e_tag *e_tag_mask;
1054 const struct rte_flow_action *act;
1055 const struct rte_flow_action_vf *act_vf;
1056 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1059 rte_flow_error_set(error, EINVAL,
1060 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1061 NULL, "NULL pattern.");
1066 rte_flow_error_set(error, EINVAL,
1067 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1068 NULL, "NULL action.");
1073 rte_flow_error_set(error, EINVAL,
1074 RTE_FLOW_ERROR_TYPE_ATTR,
1075 NULL, "NULL attribute.");
1079 /* The first not void item should be e-tag. */
1080 item = next_no_void_pattern(pattern, NULL);
1081 if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1082 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1083 rte_flow_error_set(error, EINVAL,
1084 RTE_FLOW_ERROR_TYPE_ITEM,
1085 item, "Not supported by L2 tunnel filter");
1089 if (!item->spec || !item->mask) {
1090 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1091 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1092 item, "Not supported by L2 tunnel filter");
1096 /*Not supported last point for range*/
1098 rte_flow_error_set(error, EINVAL,
1099 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1100 item, "Not supported last point for range");
1104 e_tag_spec = item->spec;
1105 e_tag_mask = item->mask;
1107 /* Only care about GRP and E cid base. */
1108 if (e_tag_mask->epcp_edei_in_ecid_b ||
1109 e_tag_mask->in_ecid_e ||
1110 e_tag_mask->ecid_e ||
1111 e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1112 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1113 rte_flow_error_set(error, EINVAL,
1114 RTE_FLOW_ERROR_TYPE_ITEM,
1115 item, "Not supported by L2 tunnel filter");
1119 filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1121 * grp and e_cid_base are bit fields and only use 14 bits.
1122 * e-tag id is taken as little endian by HW.
1124 filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1126 /* check if the next not void item is END */
1127 item = next_no_void_pattern(pattern, item);
1128 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1129 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1130 rte_flow_error_set(error, EINVAL,
1131 RTE_FLOW_ERROR_TYPE_ITEM,
1132 item, "Not supported by L2 tunnel filter");
1137 /* must be input direction */
1138 if (!attr->ingress) {
1139 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1140 rte_flow_error_set(error, EINVAL,
1141 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1142 attr, "Only support ingress.");
1148 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1149 rte_flow_error_set(error, EINVAL,
1150 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1151 attr, "Not support egress.");
1156 if (attr->transfer) {
1157 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1158 rte_flow_error_set(error, EINVAL,
1159 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1160 attr, "No support for transfer.");
1165 if (attr->priority) {
1166 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1167 rte_flow_error_set(error, EINVAL,
1168 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1169 attr, "Not support priority.");
1173 /* check if the first not void action is VF or PF. */
1174 act = next_no_void_action(actions, NULL);
1175 if (act->type != RTE_FLOW_ACTION_TYPE_VF &&
1176 act->type != RTE_FLOW_ACTION_TYPE_PF) {
1177 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1178 rte_flow_error_set(error, EINVAL,
1179 RTE_FLOW_ERROR_TYPE_ACTION,
1180 act, "Not supported action.");
1184 if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
1185 act_vf = (const struct rte_flow_action_vf *)act->conf;
1186 filter->pool = act_vf->id;
1188 filter->pool = pci_dev->max_vfs;
1191 /* check if the next not void item is END */
1192 act = next_no_void_action(actions, act);
1193 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1194 memset(filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1195 rte_flow_error_set(error, EINVAL,
1196 RTE_FLOW_ERROR_TYPE_ACTION,
1197 act, "Not supported action.");
1205 txgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1206 const struct rte_flow_attr *attr,
1207 const struct rte_flow_item pattern[],
1208 const struct rte_flow_action actions[],
1209 struct txgbe_l2_tunnel_conf *l2_tn_filter,
1210 struct rte_flow_error *error)
1213 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1216 ret = cons_parse_l2_tn_filter(dev, attr, pattern,
1217 actions, l2_tn_filter, error);
1219 vf_num = pci_dev->max_vfs;
1221 if (l2_tn_filter->pool > vf_num)
1228 * Create or destroy a flow rule.
1229 * Theorically one rule can match more than one filters.
1230 * We will let it use the filter which it hitt first.
1231 * So, the sequence matters.
1233 static struct rte_flow *
1234 txgbe_flow_create(struct rte_eth_dev *dev,
1235 const struct rte_flow_attr *attr,
1236 const struct rte_flow_item pattern[],
1237 const struct rte_flow_action actions[],
1238 struct rte_flow_error *error)
1240 struct rte_flow *flow = NULL;
1245 * Check if the flow rule is supported by txgbe.
1246 * It only checks the format. Don't guarantee the rule can be programmed into
1247 * the HW. Because there can be no enough room for the rule.
1250 txgbe_flow_validate(struct rte_eth_dev *dev,
1251 const struct rte_flow_attr *attr,
1252 const struct rte_flow_item pattern[],
1253 const struct rte_flow_action actions[],
1254 struct rte_flow_error *error)
1256 struct rte_eth_ntuple_filter ntuple_filter;
1257 struct rte_eth_ethertype_filter ethertype_filter;
1258 struct rte_eth_syn_filter syn_filter;
1259 struct txgbe_l2_tunnel_conf l2_tn_filter;
1262 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
1263 ret = txgbe_parse_ntuple_filter(dev, attr, pattern,
1264 actions, &ntuple_filter, error);
1268 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
1269 ret = txgbe_parse_ethertype_filter(dev, attr, pattern,
1270 actions, ðertype_filter, error);
1274 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
1275 ret = txgbe_parse_syn_filter(dev, attr, pattern,
1276 actions, &syn_filter, error);
1280 memset(&l2_tn_filter, 0, sizeof(struct txgbe_l2_tunnel_conf));
1281 ret = txgbe_parse_l2_tn_filter(dev, attr, pattern,
1282 actions, &l2_tn_filter, error);
1289 /* Destroy a flow rule on txgbe. */
1291 txgbe_flow_destroy(struct rte_eth_dev *dev,
1292 struct rte_flow *flow,
1293 struct rte_flow_error *error)
1300 /* Destroy all flow rules associated with a port on txgbe. */
1302 txgbe_flow_flush(struct rte_eth_dev *dev,
1303 struct rte_flow_error *error)
1310 const struct rte_flow_ops txgbe_flow_ops = {
1311 .validate = txgbe_flow_validate,
1312 .create = txgbe_flow_create,
1313 .destroy = txgbe_flow_destroy,
1314 .flush = txgbe_flow_flush,