4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
47 #include <rte_interrupts.h>
49 #include <rte_debug.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_atomic.h>
60 #include <rte_malloc.h>
61 #include <rte_random.h>
63 #include <rte_hash_crc.h>
65 #include <rte_flow_driver.h>
67 #include "ixgbe_logs.h"
68 #include "base/ixgbe_api.h"
69 #include "base/ixgbe_vf.h"
70 #include "base/ixgbe_common.h"
71 #include "ixgbe_ethdev.h"
72 #include "ixgbe_bypass.h"
73 #include "ixgbe_rxtx.h"
74 #include "base/ixgbe_type.h"
75 #include "base/ixgbe_phy.h"
76 #include "rte_pmd_ixgbe.h"
78 static int ixgbe_flow_flush(struct rte_eth_dev *dev,
79 struct rte_flow_error *error);
81 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
82 const struct rte_flow_item pattern[],
83 const struct rte_flow_action actions[],
84 struct rte_eth_ntuple_filter *filter,
85 struct rte_flow_error *error);
87 ixgbe_parse_ntuple_filter(const struct rte_flow_attr *attr,
88 const struct rte_flow_item pattern[],
89 const struct rte_flow_action actions[],
90 struct rte_eth_ntuple_filter *filter,
91 struct rte_flow_error *error);
93 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
94 const struct rte_flow_item *pattern,
95 const struct rte_flow_action *actions,
96 struct rte_eth_ethertype_filter *filter,
97 struct rte_flow_error *error);
99 ixgbe_parse_ethertype_filter(const struct rte_flow_attr *attr,
100 const struct rte_flow_item pattern[],
101 const struct rte_flow_action actions[],
102 struct rte_eth_ethertype_filter *filter,
103 struct rte_flow_error *error);
105 cons_parse_syn_filter(const struct rte_flow_attr *attr,
106 const struct rte_flow_item pattern[],
107 const struct rte_flow_action actions[],
108 struct rte_eth_syn_filter *filter,
109 struct rte_flow_error *error);
111 ixgbe_parse_syn_filter(const struct rte_flow_attr *attr,
112 const struct rte_flow_item pattern[],
113 const struct rte_flow_action actions[],
114 struct rte_eth_syn_filter *filter,
115 struct rte_flow_error *error);
117 cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
118 const struct rte_flow_item pattern[],
119 const struct rte_flow_action actions[],
120 struct rte_eth_l2_tunnel_conf *filter,
121 struct rte_flow_error *error);
123 ixgbe_validate_l2_tn_filter(struct rte_eth_dev *dev,
124 const struct rte_flow_attr *attr,
125 const struct rte_flow_item pattern[],
126 const struct rte_flow_action actions[],
127 struct rte_eth_l2_tunnel_conf *rule,
128 struct rte_flow_error *error);
130 ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
131 const struct rte_flow_attr *attr,
132 const struct rte_flow_item pattern[],
133 const struct rte_flow_action actions[],
134 struct rte_flow_error *error);
136 const struct rte_flow_ops ixgbe_flow_ops = {
144 #define IXGBE_MIN_N_TUPLE_PRIO 1
145 #define IXGBE_MAX_N_TUPLE_PRIO 7
146 #define NEXT_ITEM_OF_PATTERN(item, pattern, index)\
148 item = pattern + index;\
149 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\
151 item = pattern + index; \
155 #define NEXT_ITEM_OF_ACTION(act, actions, index)\
157 act = actions + index; \
158 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
160 act = actions + index; \
165 * Please aware there's an asumption for all the parsers.
166 * rte_flow_item is using big endian, rte_flow_attr and
167 * rte_flow_action are using CPU order.
168 * Because the pattern is used to describe the packets,
169 * normally the packets should use network order.
173 * Parse the rule to see if it is a n-tuple rule.
174 * And get the n-tuple filter info BTW.
176 * The first not void item can be ETH or IPV4.
177 * The second not void item must be IPV4 if the first one is ETH.
178 * The third not void item must be UDP or TCP.
179 * The next not void item must be END.
181 * The first not void action should be QUEUE.
182 * The next not void action should be END.
186 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
187 * dst_addr 192.167.3.50 0xFFFFFFFF
188 * next_proto_id 17 0xFF
189 * UDP/TCP src_port 80 0xFFFF
192 * other members in mask and spec should set to 0x00.
193 * item->last should be NULL.
196 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
197 const struct rte_flow_item pattern[],
198 const struct rte_flow_action actions[],
199 struct rte_eth_ntuple_filter *filter,
200 struct rte_flow_error *error)
202 const struct rte_flow_item *item;
203 const struct rte_flow_action *act;
204 const struct rte_flow_item_ipv4 *ipv4_spec;
205 const struct rte_flow_item_ipv4 *ipv4_mask;
206 const struct rte_flow_item_tcp *tcp_spec;
207 const struct rte_flow_item_tcp *tcp_mask;
208 const struct rte_flow_item_udp *udp_spec;
209 const struct rte_flow_item_udp *udp_mask;
213 rte_flow_error_set(error,
214 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
215 NULL, "NULL pattern.");
220 rte_flow_error_set(error, EINVAL,
221 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
222 NULL, "NULL action.");
226 rte_flow_error_set(error, EINVAL,
227 RTE_FLOW_ERROR_TYPE_ATTR,
228 NULL, "NULL attribute.");
235 /* the first not void item can be MAC or IPv4 */
236 NEXT_ITEM_OF_PATTERN(item, pattern, index);
238 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
239 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
240 rte_flow_error_set(error, EINVAL,
241 RTE_FLOW_ERROR_TYPE_ITEM,
242 item, "Not supported by ntuple filter");
246 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
247 /*Not supported last point for range*/
249 rte_flow_error_set(error,
251 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
252 item, "Not supported last point for range");
256 /* if the first item is MAC, the content should be NULL */
257 if (item->spec || item->mask) {
258 rte_flow_error_set(error, EINVAL,
259 RTE_FLOW_ERROR_TYPE_ITEM,
260 item, "Not supported by ntuple filter");
263 /* check if the next not void item is IPv4 */
265 NEXT_ITEM_OF_PATTERN(item, pattern, index);
266 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
267 rte_flow_error_set(error,
268 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
269 item, "Not supported by ntuple filter");
274 /* get the IPv4 info */
275 if (!item->spec || !item->mask) {
276 rte_flow_error_set(error, EINVAL,
277 RTE_FLOW_ERROR_TYPE_ITEM,
278 item, "Invalid ntuple mask");
281 /*Not supported last point for range*/
283 rte_flow_error_set(error, EINVAL,
284 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
285 item, "Not supported last point for range");
290 ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
292 * Only support src & dst addresses, protocol,
293 * others should be masked.
295 if (ipv4_mask->hdr.version_ihl ||
296 ipv4_mask->hdr.type_of_service ||
297 ipv4_mask->hdr.total_length ||
298 ipv4_mask->hdr.packet_id ||
299 ipv4_mask->hdr.fragment_offset ||
300 ipv4_mask->hdr.time_to_live ||
301 ipv4_mask->hdr.hdr_checksum) {
302 rte_flow_error_set(error,
303 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
304 item, "Not supported by ntuple filter");
308 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
309 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
310 filter->proto_mask = ipv4_mask->hdr.next_proto_id;
312 ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
313 filter->dst_ip = ipv4_spec->hdr.dst_addr;
314 filter->src_ip = ipv4_spec->hdr.src_addr;
315 filter->proto = ipv4_spec->hdr.next_proto_id;
317 /* check if the next not void item is TCP or UDP */
319 NEXT_ITEM_OF_PATTERN(item, pattern, index);
320 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
321 item->type != RTE_FLOW_ITEM_TYPE_UDP) {
322 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
323 rte_flow_error_set(error, EINVAL,
324 RTE_FLOW_ERROR_TYPE_ITEM,
325 item, "Not supported by ntuple filter");
329 /* get the TCP/UDP info */
330 if (!item->spec || !item->mask) {
331 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
332 rte_flow_error_set(error, EINVAL,
333 RTE_FLOW_ERROR_TYPE_ITEM,
334 item, "Invalid ntuple mask");
338 /*Not supported last point for range*/
340 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
341 rte_flow_error_set(error, EINVAL,
342 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
343 item, "Not supported last point for range");
348 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
349 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
352 * Only support src & dst ports, tcp flags,
353 * others should be masked.
355 if (tcp_mask->hdr.sent_seq ||
356 tcp_mask->hdr.recv_ack ||
357 tcp_mask->hdr.data_off ||
358 tcp_mask->hdr.rx_win ||
359 tcp_mask->hdr.cksum ||
360 tcp_mask->hdr.tcp_urp) {
362 sizeof(struct rte_eth_ntuple_filter));
363 rte_flow_error_set(error, EINVAL,
364 RTE_FLOW_ERROR_TYPE_ITEM,
365 item, "Not supported by ntuple filter");
369 filter->dst_port_mask = tcp_mask->hdr.dst_port;
370 filter->src_port_mask = tcp_mask->hdr.src_port;
371 if (tcp_mask->hdr.tcp_flags == 0xFF) {
372 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
373 } else if (!tcp_mask->hdr.tcp_flags) {
374 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
376 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
377 rte_flow_error_set(error, EINVAL,
378 RTE_FLOW_ERROR_TYPE_ITEM,
379 item, "Not supported by ntuple filter");
383 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
384 filter->dst_port = tcp_spec->hdr.dst_port;
385 filter->src_port = tcp_spec->hdr.src_port;
386 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
388 udp_mask = (const struct rte_flow_item_udp *)item->mask;
391 * Only support src & dst ports,
392 * others should be masked.
394 if (udp_mask->hdr.dgram_len ||
395 udp_mask->hdr.dgram_cksum) {
397 sizeof(struct rte_eth_ntuple_filter));
398 rte_flow_error_set(error, EINVAL,
399 RTE_FLOW_ERROR_TYPE_ITEM,
400 item, "Not supported by ntuple filter");
404 filter->dst_port_mask = udp_mask->hdr.dst_port;
405 filter->src_port_mask = udp_mask->hdr.src_port;
407 udp_spec = (const struct rte_flow_item_udp *)item->spec;
408 filter->dst_port = udp_spec->hdr.dst_port;
409 filter->src_port = udp_spec->hdr.src_port;
412 /* check if the next not void item is END */
414 NEXT_ITEM_OF_PATTERN(item, pattern, index);
415 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
416 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
417 rte_flow_error_set(error, EINVAL,
418 RTE_FLOW_ERROR_TYPE_ITEM,
419 item, "Not supported by ntuple filter");
427 * n-tuple only supports forwarding,
428 * check if the first not void action is QUEUE.
430 NEXT_ITEM_OF_ACTION(act, actions, index);
431 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
432 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
433 rte_flow_error_set(error, EINVAL,
434 RTE_FLOW_ERROR_TYPE_ACTION,
435 item, "Not supported action.");
439 ((const struct rte_flow_action_queue *)act->conf)->index;
441 /* check if the next not void item is END */
443 NEXT_ITEM_OF_ACTION(act, actions, index);
444 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
445 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
446 rte_flow_error_set(error, EINVAL,
447 RTE_FLOW_ERROR_TYPE_ACTION,
448 act, "Not supported action.");
453 /* must be input direction */
454 if (!attr->ingress) {
455 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
456 rte_flow_error_set(error, EINVAL,
457 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
458 attr, "Only support ingress.");
464 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
465 rte_flow_error_set(error, EINVAL,
466 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
467 attr, "Not support egress.");
471 if (attr->priority > 0xFFFF) {
472 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
473 rte_flow_error_set(error, EINVAL,
474 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
475 attr, "Error priority.");
478 filter->priority = (uint16_t)attr->priority;
479 if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
480 attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
481 filter->priority = 1;
486 /* a specific function for ixgbe because the flags is specific */
488 ixgbe_parse_ntuple_filter(const struct rte_flow_attr *attr,
489 const struct rte_flow_item pattern[],
490 const struct rte_flow_action actions[],
491 struct rte_eth_ntuple_filter *filter,
492 struct rte_flow_error *error)
496 ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
501 /* Ixgbe doesn't support tcp flags. */
502 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
503 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
504 rte_flow_error_set(error, EINVAL,
505 RTE_FLOW_ERROR_TYPE_ITEM,
506 NULL, "Not supported by ntuple filter");
510 /* Ixgbe doesn't support many priorities. */
511 if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
512 filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
513 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
514 rte_flow_error_set(error, EINVAL,
515 RTE_FLOW_ERROR_TYPE_ITEM,
516 NULL, "Priority not supported by ntuple filter");
520 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
521 filter->priority > IXGBE_5TUPLE_MAX_PRI ||
522 filter->priority < IXGBE_5TUPLE_MIN_PRI)
525 /* fixed value for ixgbe */
526 filter->flags = RTE_5TUPLE_FLAGS;
531 * Parse the rule to see if it is a ethertype rule.
532 * And get the ethertype filter info BTW.
534 * The first not void item can be ETH.
535 * The next not void item must be END.
537 * The first not void action should be QUEUE.
538 * The next not void action should be END.
541 * ETH type 0x0807 0xFFFF
543 * other members in mask and spec should set to 0x00.
544 * item->last should be NULL.
547 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
548 const struct rte_flow_item *pattern,
549 const struct rte_flow_action *actions,
550 struct rte_eth_ethertype_filter *filter,
551 struct rte_flow_error *error)
553 const struct rte_flow_item *item;
554 const struct rte_flow_action *act;
555 const struct rte_flow_item_eth *eth_spec;
556 const struct rte_flow_item_eth *eth_mask;
557 const struct rte_flow_action_queue *act_q;
561 rte_flow_error_set(error, EINVAL,
562 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
563 NULL, "NULL pattern.");
568 rte_flow_error_set(error, EINVAL,
569 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
570 NULL, "NULL action.");
575 rte_flow_error_set(error, EINVAL,
576 RTE_FLOW_ERROR_TYPE_ATTR,
577 NULL, "NULL attribute.");
584 /* The first non-void item should be MAC. */
585 item = pattern + index;
586 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
588 item = pattern + index;
590 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
591 rte_flow_error_set(error, EINVAL,
592 RTE_FLOW_ERROR_TYPE_ITEM,
593 item, "Not supported by ethertype filter");
597 /*Not supported last point for range*/
599 rte_flow_error_set(error, EINVAL,
600 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
601 item, "Not supported last point for range");
605 /* Get the MAC info. */
606 if (!item->spec || !item->mask) {
607 rte_flow_error_set(error, EINVAL,
608 RTE_FLOW_ERROR_TYPE_ITEM,
609 item, "Not supported by ethertype filter");
613 eth_spec = (const struct rte_flow_item_eth *)item->spec;
614 eth_mask = (const struct rte_flow_item_eth *)item->mask;
616 /* Mask bits of source MAC address must be full of 0.
617 * Mask bits of destination MAC address must be full
620 if (!is_zero_ether_addr(ð_mask->src) ||
621 (!is_zero_ether_addr(ð_mask->dst) &&
622 !is_broadcast_ether_addr(ð_mask->dst))) {
623 rte_flow_error_set(error, EINVAL,
624 RTE_FLOW_ERROR_TYPE_ITEM,
625 item, "Invalid ether address mask");
629 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
630 rte_flow_error_set(error, EINVAL,
631 RTE_FLOW_ERROR_TYPE_ITEM,
632 item, "Invalid ethertype mask");
636 /* If mask bits of destination MAC address
637 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
639 if (is_broadcast_ether_addr(ð_mask->dst)) {
640 filter->mac_addr = eth_spec->dst;
641 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
643 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
645 filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
647 /* Check if the next non-void item is END. */
649 item = pattern + index;
650 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
652 item = pattern + index;
654 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
655 rte_flow_error_set(error, EINVAL,
656 RTE_FLOW_ERROR_TYPE_ITEM,
657 item, "Not supported by ethertype filter.");
664 /* Check if the first non-void action is QUEUE or DROP. */
665 act = actions + index;
666 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
668 act = actions + index;
670 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
671 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
672 rte_flow_error_set(error, EINVAL,
673 RTE_FLOW_ERROR_TYPE_ACTION,
674 act, "Not supported action.");
678 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
679 act_q = (const struct rte_flow_action_queue *)act->conf;
680 filter->queue = act_q->index;
682 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
685 /* Check if the next non-void item is END */
687 act = actions + index;
688 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
690 act = actions + index;
692 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
693 rte_flow_error_set(error, EINVAL,
694 RTE_FLOW_ERROR_TYPE_ACTION,
695 act, "Not supported action.");
700 /* Must be input direction */
701 if (!attr->ingress) {
702 rte_flow_error_set(error, EINVAL,
703 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
704 attr, "Only support ingress.");
710 rte_flow_error_set(error, EINVAL,
711 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
712 attr, "Not support egress.");
717 if (attr->priority) {
718 rte_flow_error_set(error, EINVAL,
719 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
720 attr, "Not support priority.");
726 rte_flow_error_set(error, EINVAL,
727 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
728 attr, "Not support group.");
736 ixgbe_parse_ethertype_filter(const struct rte_flow_attr *attr,
737 const struct rte_flow_item pattern[],
738 const struct rte_flow_action actions[],
739 struct rte_eth_ethertype_filter *filter,
740 struct rte_flow_error *error)
744 ret = cons_parse_ethertype_filter(attr, pattern,
745 actions, filter, error);
750 /* Ixgbe doesn't support MAC address. */
751 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
752 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
753 rte_flow_error_set(error, EINVAL,
754 RTE_FLOW_ERROR_TYPE_ITEM,
755 NULL, "Not supported by ethertype filter");
759 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
760 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
761 rte_flow_error_set(error, EINVAL,
762 RTE_FLOW_ERROR_TYPE_ITEM,
763 NULL, "queue index much too big");
767 if (filter->ether_type == ETHER_TYPE_IPv4 ||
768 filter->ether_type == ETHER_TYPE_IPv6) {
769 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
770 rte_flow_error_set(error, EINVAL,
771 RTE_FLOW_ERROR_TYPE_ITEM,
772 NULL, "IPv4/IPv6 not supported by ethertype filter");
776 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
777 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
778 rte_flow_error_set(error, EINVAL,
779 RTE_FLOW_ERROR_TYPE_ITEM,
780 NULL, "mac compare is unsupported");
784 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
785 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
786 rte_flow_error_set(error, EINVAL,
787 RTE_FLOW_ERROR_TYPE_ITEM,
788 NULL, "drop option is unsupported");
796 * Parse the rule to see if it is a TCP SYN rule.
797 * And get the TCP SYN filter info BTW.
799 * The first not void item must be ETH.
800 * The second not void item must be IPV4 or IPV6.
801 * The third not void item must be TCP.
802 * The next not void item must be END.
804 * The first not void action should be QUEUE.
805 * The next not void action should be END.
809 * IPV4/IPV6 NULL NULL
810 * TCP tcp_flags 0x02 0xFF
812 * other members in mask and spec should set to 0x00.
813 * item->last should be NULL.
816 cons_parse_syn_filter(const struct rte_flow_attr *attr,
817 const struct rte_flow_item pattern[],
818 const struct rte_flow_action actions[],
819 struct rte_eth_syn_filter *filter,
820 struct rte_flow_error *error)
822 const struct rte_flow_item *item;
823 const struct rte_flow_action *act;
824 const struct rte_flow_item_tcp *tcp_spec;
825 const struct rte_flow_item_tcp *tcp_mask;
826 const struct rte_flow_action_queue *act_q;
830 rte_flow_error_set(error, EINVAL,
831 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
832 NULL, "NULL pattern.");
837 rte_flow_error_set(error, EINVAL,
838 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
839 NULL, "NULL action.");
844 rte_flow_error_set(error, EINVAL,
845 RTE_FLOW_ERROR_TYPE_ATTR,
846 NULL, "NULL attribute.");
853 /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
854 NEXT_ITEM_OF_PATTERN(item, pattern, index);
855 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
856 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
857 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
858 item->type != RTE_FLOW_ITEM_TYPE_TCP) {
859 rte_flow_error_set(error, EINVAL,
860 RTE_FLOW_ERROR_TYPE_ITEM,
861 item, "Not supported by syn filter");
864 /*Not supported last point for range*/
866 rte_flow_error_set(error, EINVAL,
867 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
868 item, "Not supported last point for range");
873 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
874 /* if the item is MAC, the content should be NULL */
875 if (item->spec || item->mask) {
876 rte_flow_error_set(error, EINVAL,
877 RTE_FLOW_ERROR_TYPE_ITEM,
878 item, "Invalid SYN address mask");
882 /* check if the next not void item is IPv4 or IPv6 */
884 NEXT_ITEM_OF_PATTERN(item, pattern, index);
885 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
886 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
887 rte_flow_error_set(error, EINVAL,
888 RTE_FLOW_ERROR_TYPE_ITEM,
889 item, "Not supported by syn filter");
895 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
896 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
897 /* if the item is IP, the content should be NULL */
898 if (item->spec || item->mask) {
899 rte_flow_error_set(error, EINVAL,
900 RTE_FLOW_ERROR_TYPE_ITEM,
901 item, "Invalid SYN mask");
905 /* check if the next not void item is TCP */
907 NEXT_ITEM_OF_PATTERN(item, pattern, index);
908 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
909 rte_flow_error_set(error, EINVAL,
910 RTE_FLOW_ERROR_TYPE_ITEM,
911 item, "Not supported by syn filter");
916 /* Get the TCP info. Only support SYN. */
917 if (!item->spec || !item->mask) {
918 rte_flow_error_set(error, EINVAL,
919 RTE_FLOW_ERROR_TYPE_ITEM,
920 item, "Invalid SYN mask");
923 /*Not supported last point for range*/
925 rte_flow_error_set(error, EINVAL,
926 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
927 item, "Not supported last point for range");
931 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
932 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
933 if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
934 tcp_mask->hdr.src_port ||
935 tcp_mask->hdr.dst_port ||
936 tcp_mask->hdr.sent_seq ||
937 tcp_mask->hdr.recv_ack ||
938 tcp_mask->hdr.data_off ||
939 tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
940 tcp_mask->hdr.rx_win ||
941 tcp_mask->hdr.cksum ||
942 tcp_mask->hdr.tcp_urp) {
943 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
944 rte_flow_error_set(error, EINVAL,
945 RTE_FLOW_ERROR_TYPE_ITEM,
946 item, "Not supported by syn filter");
950 /* check if the next not void item is END */
952 NEXT_ITEM_OF_PATTERN(item, pattern, index);
953 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
954 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
955 rte_flow_error_set(error, EINVAL,
956 RTE_FLOW_ERROR_TYPE_ITEM,
957 item, "Not supported by syn filter");
964 /* check if the first not void action is QUEUE. */
965 NEXT_ITEM_OF_ACTION(act, actions, index);
966 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
967 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
968 rte_flow_error_set(error, EINVAL,
969 RTE_FLOW_ERROR_TYPE_ACTION,
970 act, "Not supported action.");
974 act_q = (const struct rte_flow_action_queue *)act->conf;
975 filter->queue = act_q->index;
976 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
977 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
978 rte_flow_error_set(error, EINVAL,
979 RTE_FLOW_ERROR_TYPE_ACTION,
980 act, "Not supported action.");
984 /* check if the next not void item is END */
986 NEXT_ITEM_OF_ACTION(act, actions, index);
987 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
988 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
989 rte_flow_error_set(error, EINVAL,
990 RTE_FLOW_ERROR_TYPE_ACTION,
991 act, "Not supported action.");
996 /* must be input direction */
997 if (!attr->ingress) {
998 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
999 rte_flow_error_set(error, EINVAL,
1000 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1001 attr, "Only support ingress.");
1007 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1008 rte_flow_error_set(error, EINVAL,
1009 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1010 attr, "Not support egress.");
1014 /* Support 2 priorities, the lowest or highest. */
1015 if (!attr->priority) {
1016 filter->hig_pri = 0;
1017 } else if (attr->priority == (uint32_t)~0U) {
1018 filter->hig_pri = 1;
1020 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1021 rte_flow_error_set(error, EINVAL,
1022 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1023 attr, "Not support priority.");
1031 ixgbe_parse_syn_filter(const struct rte_flow_attr *attr,
1032 const struct rte_flow_item pattern[],
1033 const struct rte_flow_action actions[],
1034 struct rte_eth_syn_filter *filter,
1035 struct rte_flow_error *error)
1039 ret = cons_parse_syn_filter(attr, pattern,
1040 actions, filter, error);
1049 * Parse the rule to see if it is a L2 tunnel rule.
1050 * And get the L2 tunnel filter info BTW.
1051 * Only support E-tag now.
1053 * The first not void item can be E_TAG.
1054 * The next not void item must be END.
1056 * The first not void action should be QUEUE.
1057 * The next not void action should be END.
1061 e_cid_base 0x309 0xFFF
1063 * other members in mask and spec should set to 0x00.
1064 * item->last should be NULL.
1067 cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
1068 const struct rte_flow_item pattern[],
1069 const struct rte_flow_action actions[],
1070 struct rte_eth_l2_tunnel_conf *filter,
1071 struct rte_flow_error *error)
1073 const struct rte_flow_item *item;
1074 const struct rte_flow_item_e_tag *e_tag_spec;
1075 const struct rte_flow_item_e_tag *e_tag_mask;
1076 const struct rte_flow_action *act;
1077 const struct rte_flow_action_queue *act_q;
1081 rte_flow_error_set(error, EINVAL,
1082 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1083 NULL, "NULL pattern.");
1088 rte_flow_error_set(error, EINVAL,
1089 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1090 NULL, "NULL action.");
1095 rte_flow_error_set(error, EINVAL,
1096 RTE_FLOW_ERROR_TYPE_ATTR,
1097 NULL, "NULL attribute.");
1103 /* The first not void item should be e-tag. */
1104 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1105 if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1106 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1107 rte_flow_error_set(error, EINVAL,
1108 RTE_FLOW_ERROR_TYPE_ITEM,
1109 item, "Not supported by L2 tunnel filter");
1113 if (!item->spec || !item->mask) {
1114 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1115 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1116 item, "Not supported by L2 tunnel filter");
1120 /*Not supported last point for range*/
1122 rte_flow_error_set(error, EINVAL,
1123 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1124 item, "Not supported last point for range");
1128 e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
1129 e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
1131 /* Only care about GRP and E cid base. */
1132 if (e_tag_mask->epcp_edei_in_ecid_b ||
1133 e_tag_mask->in_ecid_e ||
1134 e_tag_mask->ecid_e ||
1135 e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1136 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1137 rte_flow_error_set(error, EINVAL,
1138 RTE_FLOW_ERROR_TYPE_ITEM,
1139 item, "Not supported by L2 tunnel filter");
1143 filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1145 * grp and e_cid_base are bit fields and only use 14 bits.
1146 * e-tag id is taken as little endian by HW.
1148 filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1150 /* check if the next not void item is END */
1152 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1153 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1154 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1155 rte_flow_error_set(error, EINVAL,
1156 RTE_FLOW_ERROR_TYPE_ITEM,
1157 item, "Not supported by L2 tunnel filter");
1162 /* must be input direction */
1163 if (!attr->ingress) {
1164 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1165 rte_flow_error_set(error, EINVAL,
1166 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1167 attr, "Only support ingress.");
1173 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1174 rte_flow_error_set(error, EINVAL,
1175 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1176 attr, "Not support egress.");
1181 if (attr->priority) {
1182 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1183 rte_flow_error_set(error, EINVAL,
1184 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1185 attr, "Not support priority.");
1192 /* check if the first not void action is QUEUE. */
1193 NEXT_ITEM_OF_ACTION(act, actions, index);
1194 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1195 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1196 rte_flow_error_set(error, EINVAL,
1197 RTE_FLOW_ERROR_TYPE_ACTION,
1198 act, "Not supported action.");
1202 act_q = (const struct rte_flow_action_queue *)act->conf;
1203 filter->pool = act_q->index;
1205 /* check if the next not void item is END */
1207 NEXT_ITEM_OF_ACTION(act, actions, index);
1208 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1209 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1210 rte_flow_error_set(error, EINVAL,
1211 RTE_FLOW_ERROR_TYPE_ACTION,
1212 act, "Not supported action.");
1220 ixgbe_validate_l2_tn_filter(struct rte_eth_dev *dev,
1221 const struct rte_flow_attr *attr,
1222 const struct rte_flow_item pattern[],
1223 const struct rte_flow_action actions[],
1224 struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1225 struct rte_flow_error *error)
1228 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1230 ret = cons_parse_l2_tn_filter(attr, pattern,
1231 actions, l2_tn_filter, error);
1233 if (hw->mac.type != ixgbe_mac_X550 &&
1234 hw->mac.type != ixgbe_mac_X550EM_x &&
1235 hw->mac.type != ixgbe_mac_X550EM_a) {
1236 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1237 rte_flow_error_set(error, EINVAL,
1238 RTE_FLOW_ERROR_TYPE_ITEM,
1239 NULL, "Not supported by L2 tunnel filter");
1247 * Check if the flow rule is supported by ixgbe.
1248 * It only checkes the format. Don't guarantee the rule can be programmed into
1249 * the HW. Because there can be no enough room for the rule.
1252 ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
1253 const struct rte_flow_attr *attr,
1254 const struct rte_flow_item pattern[],
1255 const struct rte_flow_action actions[],
1256 struct rte_flow_error *error)
1258 struct rte_eth_ntuple_filter ntuple_filter;
1259 struct rte_eth_ethertype_filter ethertype_filter;
1260 struct rte_eth_syn_filter syn_filter;
1261 struct rte_eth_l2_tunnel_conf l2_tn_filter;
1264 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
1265 ret = ixgbe_parse_ntuple_filter(attr, pattern,
1266 actions, &ntuple_filter, error);
1270 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
1271 ret = ixgbe_parse_ethertype_filter(attr, pattern,
1272 actions, ðertype_filter, error);
1276 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
1277 ret = ixgbe_parse_syn_filter(attr, pattern,
1278 actions, &syn_filter, error);
1282 memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1283 ret = ixgbe_validate_l2_tn_filter(dev, attr, pattern,
1284 actions, &l2_tn_filter, error);
1289 /* Destroy all flow rules associated with a port on ixgbe. */
1291 ixgbe_flow_flush(struct rte_eth_dev *dev,
1292 struct rte_flow_error *error)
1296 ixgbe_clear_all_ntuple_filter(dev);
1297 ixgbe_clear_all_ethertype_filter(dev);
1298 ixgbe_clear_syn_filter(dev);
1300 ret = ixgbe_clear_all_fdir_filter(dev);
1302 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
1303 NULL, "Failed to flush rule");
1307 ret = ixgbe_clear_all_l2_tn_filter(dev);
1309 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
1310 NULL, "Failed to flush rule");