4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
47 #include <rte_interrupts.h>
49 #include <rte_debug.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_atomic.h>
60 #include <rte_malloc.h>
61 #include <rte_random.h>
63 #include <rte_hash_crc.h>
65 #include <rte_flow_driver.h>
67 #include "ixgbe_logs.h"
68 #include "base/ixgbe_api.h"
69 #include "base/ixgbe_vf.h"
70 #include "base/ixgbe_common.h"
71 #include "ixgbe_ethdev.h"
72 #include "ixgbe_bypass.h"
73 #include "ixgbe_rxtx.h"
74 #include "base/ixgbe_type.h"
75 #include "base/ixgbe_phy.h"
76 #include "rte_pmd_ixgbe.h"
79 #define IXGBE_MIN_N_TUPLE_PRIO 1
80 #define IXGBE_MAX_N_TUPLE_PRIO 7
81 #define NEXT_ITEM_OF_PATTERN(item, pattern, index)\
83 item = pattern + index;\
84 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\
86 item = pattern + index; \
90 #define NEXT_ITEM_OF_ACTION(act, actions, index)\
92 act = actions + index; \
93 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
95 act = actions + index; \
100 * Please aware there's an asumption for all the parsers.
101 * rte_flow_item is using big endian, rte_flow_attr and
102 * rte_flow_action are using CPU order.
103 * Because the pattern is used to describe the packets,
104 * normally the packets should use network order.
108 * Parse the rule to see if it is a n-tuple rule.
109 * And get the n-tuple filter info BTW.
111 * The first not void item can be ETH or IPV4.
112 * The second not void item must be IPV4 if the first one is ETH.
113 * The third not void item must be UDP or TCP.
114 * The next not void item must be END.
116 * The first not void action should be QUEUE.
117 * The next not void action should be END.
121 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
122 * dst_addr 192.167.3.50 0xFFFFFFFF
123 * next_proto_id 17 0xFF
124 * UDP/TCP src_port 80 0xFFFF
127 * other members in mask and spec should set to 0x00.
128 * item->last should be NULL.
131 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
132 const struct rte_flow_item pattern[],
133 const struct rte_flow_action actions[],
134 struct rte_eth_ntuple_filter *filter,
135 struct rte_flow_error *error)
137 const struct rte_flow_item *item;
138 const struct rte_flow_action *act;
139 const struct rte_flow_item_ipv4 *ipv4_spec;
140 const struct rte_flow_item_ipv4 *ipv4_mask;
141 const struct rte_flow_item_tcp *tcp_spec;
142 const struct rte_flow_item_tcp *tcp_mask;
143 const struct rte_flow_item_udp *udp_spec;
144 const struct rte_flow_item_udp *udp_mask;
148 rte_flow_error_set(error,
149 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
150 NULL, "NULL pattern.");
155 rte_flow_error_set(error, EINVAL,
156 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
157 NULL, "NULL action.");
161 rte_flow_error_set(error, EINVAL,
162 RTE_FLOW_ERROR_TYPE_ATTR,
163 NULL, "NULL attribute.");
170 /* the first not void item can be MAC or IPv4 */
171 NEXT_ITEM_OF_PATTERN(item, pattern, index);
173 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
174 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
175 rte_flow_error_set(error, EINVAL,
176 RTE_FLOW_ERROR_TYPE_ITEM,
177 item, "Not supported by ntuple filter");
181 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
182 /*Not supported last point for range*/
184 rte_flow_error_set(error,
186 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
187 item, "Not supported last point for range");
191 /* if the first item is MAC, the content should be NULL */
192 if (item->spec || item->mask) {
193 rte_flow_error_set(error, EINVAL,
194 RTE_FLOW_ERROR_TYPE_ITEM,
195 item, "Not supported by ntuple filter");
198 /* check if the next not void item is IPv4 */
200 NEXT_ITEM_OF_PATTERN(item, pattern, index);
201 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
202 rte_flow_error_set(error,
203 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
204 item, "Not supported by ntuple filter");
209 /* get the IPv4 info */
210 if (!item->spec || !item->mask) {
211 rte_flow_error_set(error, EINVAL,
212 RTE_FLOW_ERROR_TYPE_ITEM,
213 item, "Invalid ntuple mask");
216 /*Not supported last point for range*/
218 rte_flow_error_set(error, EINVAL,
219 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
220 item, "Not supported last point for range");
225 ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
227 * Only support src & dst addresses, protocol,
228 * others should be masked.
230 if (ipv4_mask->hdr.version_ihl ||
231 ipv4_mask->hdr.type_of_service ||
232 ipv4_mask->hdr.total_length ||
233 ipv4_mask->hdr.packet_id ||
234 ipv4_mask->hdr.fragment_offset ||
235 ipv4_mask->hdr.time_to_live ||
236 ipv4_mask->hdr.hdr_checksum) {
237 rte_flow_error_set(error,
238 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
239 item, "Not supported by ntuple filter");
243 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
244 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
245 filter->proto_mask = ipv4_mask->hdr.next_proto_id;
247 ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
248 filter->dst_ip = ipv4_spec->hdr.dst_addr;
249 filter->src_ip = ipv4_spec->hdr.src_addr;
250 filter->proto = ipv4_spec->hdr.next_proto_id;
252 /* check if the next not void item is TCP or UDP */
254 NEXT_ITEM_OF_PATTERN(item, pattern, index);
255 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
256 item->type != RTE_FLOW_ITEM_TYPE_UDP) {
257 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
258 rte_flow_error_set(error, EINVAL,
259 RTE_FLOW_ERROR_TYPE_ITEM,
260 item, "Not supported by ntuple filter");
264 /* get the TCP/UDP info */
265 if (!item->spec || !item->mask) {
266 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
267 rte_flow_error_set(error, EINVAL,
268 RTE_FLOW_ERROR_TYPE_ITEM,
269 item, "Invalid ntuple mask");
273 /*Not supported last point for range*/
275 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
276 rte_flow_error_set(error, EINVAL,
277 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
278 item, "Not supported last point for range");
283 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
284 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
287 * Only support src & dst ports, tcp flags,
288 * others should be masked.
290 if (tcp_mask->hdr.sent_seq ||
291 tcp_mask->hdr.recv_ack ||
292 tcp_mask->hdr.data_off ||
293 tcp_mask->hdr.rx_win ||
294 tcp_mask->hdr.cksum ||
295 tcp_mask->hdr.tcp_urp) {
297 sizeof(struct rte_eth_ntuple_filter));
298 rte_flow_error_set(error, EINVAL,
299 RTE_FLOW_ERROR_TYPE_ITEM,
300 item, "Not supported by ntuple filter");
304 filter->dst_port_mask = tcp_mask->hdr.dst_port;
305 filter->src_port_mask = tcp_mask->hdr.src_port;
306 if (tcp_mask->hdr.tcp_flags == 0xFF) {
307 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
308 } else if (!tcp_mask->hdr.tcp_flags) {
309 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
311 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
312 rte_flow_error_set(error, EINVAL,
313 RTE_FLOW_ERROR_TYPE_ITEM,
314 item, "Not supported by ntuple filter");
318 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
319 filter->dst_port = tcp_spec->hdr.dst_port;
320 filter->src_port = tcp_spec->hdr.src_port;
321 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
323 udp_mask = (const struct rte_flow_item_udp *)item->mask;
326 * Only support src & dst ports,
327 * others should be masked.
329 if (udp_mask->hdr.dgram_len ||
330 udp_mask->hdr.dgram_cksum) {
332 sizeof(struct rte_eth_ntuple_filter));
333 rte_flow_error_set(error, EINVAL,
334 RTE_FLOW_ERROR_TYPE_ITEM,
335 item, "Not supported by ntuple filter");
339 filter->dst_port_mask = udp_mask->hdr.dst_port;
340 filter->src_port_mask = udp_mask->hdr.src_port;
342 udp_spec = (const struct rte_flow_item_udp *)item->spec;
343 filter->dst_port = udp_spec->hdr.dst_port;
344 filter->src_port = udp_spec->hdr.src_port;
347 /* check if the next not void item is END */
349 NEXT_ITEM_OF_PATTERN(item, pattern, index);
350 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
351 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
352 rte_flow_error_set(error, EINVAL,
353 RTE_FLOW_ERROR_TYPE_ITEM,
354 item, "Not supported by ntuple filter");
362 * n-tuple only supports forwarding,
363 * check if the first not void action is QUEUE.
365 NEXT_ITEM_OF_ACTION(act, actions, index);
366 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
367 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
368 rte_flow_error_set(error, EINVAL,
369 RTE_FLOW_ERROR_TYPE_ACTION,
370 item, "Not supported action.");
374 ((const struct rte_flow_action_queue *)act->conf)->index;
376 /* check if the next not void item is END */
378 NEXT_ITEM_OF_ACTION(act, actions, index);
379 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
380 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
381 rte_flow_error_set(error, EINVAL,
382 RTE_FLOW_ERROR_TYPE_ACTION,
383 act, "Not supported action.");
388 /* must be input direction */
389 if (!attr->ingress) {
390 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
391 rte_flow_error_set(error, EINVAL,
392 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
393 attr, "Only support ingress.");
399 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
400 rte_flow_error_set(error, EINVAL,
401 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
402 attr, "Not support egress.");
406 if (attr->priority > 0xFFFF) {
407 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
408 rte_flow_error_set(error, EINVAL,
409 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
410 attr, "Error priority.");
413 filter->priority = (uint16_t)attr->priority;
414 if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
415 attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
416 filter->priority = 1;
421 /* a specific function for ixgbe because the flags is specific */
423 ixgbe_parse_ntuple_filter(const struct rte_flow_attr *attr,
424 const struct rte_flow_item pattern[],
425 const struct rte_flow_action actions[],
426 struct rte_eth_ntuple_filter *filter,
427 struct rte_flow_error *error)
431 ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
436 /* Ixgbe doesn't support tcp flags. */
437 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
438 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
439 rte_flow_error_set(error, EINVAL,
440 RTE_FLOW_ERROR_TYPE_ITEM,
441 NULL, "Not supported by ntuple filter");
445 /* Ixgbe doesn't support many priorities. */
446 if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
447 filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
448 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
449 rte_flow_error_set(error, EINVAL,
450 RTE_FLOW_ERROR_TYPE_ITEM,
451 NULL, "Priority not supported by ntuple filter");
455 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
456 filter->priority > IXGBE_5TUPLE_MAX_PRI ||
457 filter->priority < IXGBE_5TUPLE_MIN_PRI)
460 /* fixed value for ixgbe */
461 filter->flags = RTE_5TUPLE_FLAGS;
466 * Parse the rule to see if it is a ethertype rule.
467 * And get the ethertype filter info BTW.
469 * The first not void item can be ETH.
470 * The next not void item must be END.
472 * The first not void action should be QUEUE.
473 * The next not void action should be END.
476 * ETH type 0x0807 0xFFFF
478 * other members in mask and spec should set to 0x00.
479 * item->last should be NULL.
482 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
483 const struct rte_flow_item *pattern,
484 const struct rte_flow_action *actions,
485 struct rte_eth_ethertype_filter *filter,
486 struct rte_flow_error *error)
488 const struct rte_flow_item *item;
489 const struct rte_flow_action *act;
490 const struct rte_flow_item_eth *eth_spec;
491 const struct rte_flow_item_eth *eth_mask;
492 const struct rte_flow_action_queue *act_q;
496 rte_flow_error_set(error, EINVAL,
497 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
498 NULL, "NULL pattern.");
503 rte_flow_error_set(error, EINVAL,
504 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
505 NULL, "NULL action.");
510 rte_flow_error_set(error, EINVAL,
511 RTE_FLOW_ERROR_TYPE_ATTR,
512 NULL, "NULL attribute.");
519 /* The first non-void item should be MAC. */
520 item = pattern + index;
521 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
523 item = pattern + index;
525 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
526 rte_flow_error_set(error, EINVAL,
527 RTE_FLOW_ERROR_TYPE_ITEM,
528 item, "Not supported by ethertype filter");
532 /*Not supported last point for range*/
534 rte_flow_error_set(error, EINVAL,
535 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
536 item, "Not supported last point for range");
540 /* Get the MAC info. */
541 if (!item->spec || !item->mask) {
542 rte_flow_error_set(error, EINVAL,
543 RTE_FLOW_ERROR_TYPE_ITEM,
544 item, "Not supported by ethertype filter");
548 eth_spec = (const struct rte_flow_item_eth *)item->spec;
549 eth_mask = (const struct rte_flow_item_eth *)item->mask;
551 /* Mask bits of source MAC address must be full of 0.
552 * Mask bits of destination MAC address must be full
555 if (!is_zero_ether_addr(ð_mask->src) ||
556 (!is_zero_ether_addr(ð_mask->dst) &&
557 !is_broadcast_ether_addr(ð_mask->dst))) {
558 rte_flow_error_set(error, EINVAL,
559 RTE_FLOW_ERROR_TYPE_ITEM,
560 item, "Invalid ether address mask");
564 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
565 rte_flow_error_set(error, EINVAL,
566 RTE_FLOW_ERROR_TYPE_ITEM,
567 item, "Invalid ethertype mask");
571 /* If mask bits of destination MAC address
572 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
574 if (is_broadcast_ether_addr(ð_mask->dst)) {
575 filter->mac_addr = eth_spec->dst;
576 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
578 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
580 filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
582 /* Check if the next non-void item is END. */
584 item = pattern + index;
585 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
587 item = pattern + index;
589 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
590 rte_flow_error_set(error, EINVAL,
591 RTE_FLOW_ERROR_TYPE_ITEM,
592 item, "Not supported by ethertype filter.");
599 /* Check if the first non-void action is QUEUE or DROP. */
600 act = actions + index;
601 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
603 act = actions + index;
605 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
606 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
607 rte_flow_error_set(error, EINVAL,
608 RTE_FLOW_ERROR_TYPE_ACTION,
609 act, "Not supported action.");
613 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
614 act_q = (const struct rte_flow_action_queue *)act->conf;
615 filter->queue = act_q->index;
617 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
620 /* Check if the next non-void item is END */
622 act = actions + index;
623 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
625 act = actions + index;
627 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
628 rte_flow_error_set(error, EINVAL,
629 RTE_FLOW_ERROR_TYPE_ACTION,
630 act, "Not supported action.");
635 /* Must be input direction */
636 if (!attr->ingress) {
637 rte_flow_error_set(error, EINVAL,
638 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
639 attr, "Only support ingress.");
645 rte_flow_error_set(error, EINVAL,
646 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
647 attr, "Not support egress.");
652 if (attr->priority) {
653 rte_flow_error_set(error, EINVAL,
654 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
655 attr, "Not support priority.");
661 rte_flow_error_set(error, EINVAL,
662 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
663 attr, "Not support group.");
671 ixgbe_parse_ethertype_filter(const struct rte_flow_attr *attr,
672 const struct rte_flow_item pattern[],
673 const struct rte_flow_action actions[],
674 struct rte_eth_ethertype_filter *filter,
675 struct rte_flow_error *error)
679 ret = cons_parse_ethertype_filter(attr, pattern,
680 actions, filter, error);
685 /* Ixgbe doesn't support MAC address. */
686 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
687 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
688 rte_flow_error_set(error, EINVAL,
689 RTE_FLOW_ERROR_TYPE_ITEM,
690 NULL, "Not supported by ethertype filter");
694 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
695 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
696 rte_flow_error_set(error, EINVAL,
697 RTE_FLOW_ERROR_TYPE_ITEM,
698 NULL, "queue index much too big");
702 if (filter->ether_type == ETHER_TYPE_IPv4 ||
703 filter->ether_type == ETHER_TYPE_IPv6) {
704 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
705 rte_flow_error_set(error, EINVAL,
706 RTE_FLOW_ERROR_TYPE_ITEM,
707 NULL, "IPv4/IPv6 not supported by ethertype filter");
711 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
712 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
713 rte_flow_error_set(error, EINVAL,
714 RTE_FLOW_ERROR_TYPE_ITEM,
715 NULL, "mac compare is unsupported");
719 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
720 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
721 rte_flow_error_set(error, EINVAL,
722 RTE_FLOW_ERROR_TYPE_ITEM,
723 NULL, "drop option is unsupported");
731 * Parse the rule to see if it is a TCP SYN rule.
732 * And get the TCP SYN filter info BTW.
734 * The first not void item must be ETH.
735 * The second not void item must be IPV4 or IPV6.
736 * The third not void item must be TCP.
737 * The next not void item must be END.
739 * The first not void action should be QUEUE.
740 * The next not void action should be END.
744 * IPV4/IPV6 NULL NULL
745 * TCP tcp_flags 0x02 0xFF
747 * other members in mask and spec should set to 0x00.
748 * item->last should be NULL.
751 cons_parse_syn_filter(const struct rte_flow_attr *attr,
752 const struct rte_flow_item pattern[],
753 const struct rte_flow_action actions[],
754 struct rte_eth_syn_filter *filter,
755 struct rte_flow_error *error)
757 const struct rte_flow_item *item;
758 const struct rte_flow_action *act;
759 const struct rte_flow_item_tcp *tcp_spec;
760 const struct rte_flow_item_tcp *tcp_mask;
761 const struct rte_flow_action_queue *act_q;
765 rte_flow_error_set(error, EINVAL,
766 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
767 NULL, "NULL pattern.");
772 rte_flow_error_set(error, EINVAL,
773 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
774 NULL, "NULL action.");
779 rte_flow_error_set(error, EINVAL,
780 RTE_FLOW_ERROR_TYPE_ATTR,
781 NULL, "NULL attribute.");
788 /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
789 NEXT_ITEM_OF_PATTERN(item, pattern, index);
790 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
791 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
792 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
793 item->type != RTE_FLOW_ITEM_TYPE_TCP) {
794 rte_flow_error_set(error, EINVAL,
795 RTE_FLOW_ERROR_TYPE_ITEM,
796 item, "Not supported by syn filter");
799 /*Not supported last point for range*/
801 rte_flow_error_set(error, EINVAL,
802 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
803 item, "Not supported last point for range");
808 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
809 /* if the item is MAC, the content should be NULL */
810 if (item->spec || item->mask) {
811 rte_flow_error_set(error, EINVAL,
812 RTE_FLOW_ERROR_TYPE_ITEM,
813 item, "Invalid SYN address mask");
817 /* check if the next not void item is IPv4 or IPv6 */
819 NEXT_ITEM_OF_PATTERN(item, pattern, index);
820 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
821 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
822 rte_flow_error_set(error, EINVAL,
823 RTE_FLOW_ERROR_TYPE_ITEM,
824 item, "Not supported by syn filter");
830 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
831 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
832 /* if the item is IP, the content should be NULL */
833 if (item->spec || item->mask) {
834 rte_flow_error_set(error, EINVAL,
835 RTE_FLOW_ERROR_TYPE_ITEM,
836 item, "Invalid SYN mask");
840 /* check if the next not void item is TCP */
842 NEXT_ITEM_OF_PATTERN(item, pattern, index);
843 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
844 rte_flow_error_set(error, EINVAL,
845 RTE_FLOW_ERROR_TYPE_ITEM,
846 item, "Not supported by syn filter");
851 /* Get the TCP info. Only support SYN. */
852 if (!item->spec || !item->mask) {
853 rte_flow_error_set(error, EINVAL,
854 RTE_FLOW_ERROR_TYPE_ITEM,
855 item, "Invalid SYN mask");
858 /*Not supported last point for range*/
860 rte_flow_error_set(error, EINVAL,
861 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
862 item, "Not supported last point for range");
866 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
867 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
868 if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
869 tcp_mask->hdr.src_port ||
870 tcp_mask->hdr.dst_port ||
871 tcp_mask->hdr.sent_seq ||
872 tcp_mask->hdr.recv_ack ||
873 tcp_mask->hdr.data_off ||
874 tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
875 tcp_mask->hdr.rx_win ||
876 tcp_mask->hdr.cksum ||
877 tcp_mask->hdr.tcp_urp) {
878 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
879 rte_flow_error_set(error, EINVAL,
880 RTE_FLOW_ERROR_TYPE_ITEM,
881 item, "Not supported by syn filter");
885 /* check if the next not void item is END */
887 NEXT_ITEM_OF_PATTERN(item, pattern, index);
888 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
889 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
890 rte_flow_error_set(error, EINVAL,
891 RTE_FLOW_ERROR_TYPE_ITEM,
892 item, "Not supported by syn filter");
899 /* check if the first not void action is QUEUE. */
900 NEXT_ITEM_OF_ACTION(act, actions, index);
901 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
902 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
903 rte_flow_error_set(error, EINVAL,
904 RTE_FLOW_ERROR_TYPE_ACTION,
905 act, "Not supported action.");
909 act_q = (const struct rte_flow_action_queue *)act->conf;
910 filter->queue = act_q->index;
911 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
912 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
913 rte_flow_error_set(error, EINVAL,
914 RTE_FLOW_ERROR_TYPE_ACTION,
915 act, "Not supported action.");
919 /* check if the next not void item is END */
921 NEXT_ITEM_OF_ACTION(act, actions, index);
922 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
923 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
924 rte_flow_error_set(error, EINVAL,
925 RTE_FLOW_ERROR_TYPE_ACTION,
926 act, "Not supported action.");
931 /* must be input direction */
932 if (!attr->ingress) {
933 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
934 rte_flow_error_set(error, EINVAL,
935 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
936 attr, "Only support ingress.");
942 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
943 rte_flow_error_set(error, EINVAL,
944 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
945 attr, "Not support egress.");
949 /* Support 2 priorities, the lowest or highest. */
950 if (!attr->priority) {
952 } else if (attr->priority == (uint32_t)~0U) {
955 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
956 rte_flow_error_set(error, EINVAL,
957 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
958 attr, "Not support priority.");
966 ixgbe_parse_syn_filter(const struct rte_flow_attr *attr,
967 const struct rte_flow_item pattern[],
968 const struct rte_flow_action actions[],
969 struct rte_eth_syn_filter *filter,
970 struct rte_flow_error *error)
974 ret = cons_parse_syn_filter(attr, pattern,
975 actions, filter, error);
984 * Parse the rule to see if it is a L2 tunnel rule.
985 * And get the L2 tunnel filter info BTW.
986 * Only support E-tag now.
988 * The first not void item can be E_TAG.
989 * The next not void item must be END.
991 * The first not void action should be QUEUE.
992 * The next not void action should be END.
996 e_cid_base 0x309 0xFFF
998 * other members in mask and spec should set to 0x00.
999 * item->last should be NULL.
1002 cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
1003 const struct rte_flow_item pattern[],
1004 const struct rte_flow_action actions[],
1005 struct rte_eth_l2_tunnel_conf *filter,
1006 struct rte_flow_error *error)
1008 const struct rte_flow_item *item;
1009 const struct rte_flow_item_e_tag *e_tag_spec;
1010 const struct rte_flow_item_e_tag *e_tag_mask;
1011 const struct rte_flow_action *act;
1012 const struct rte_flow_action_queue *act_q;
1016 rte_flow_error_set(error, EINVAL,
1017 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1018 NULL, "NULL pattern.");
1023 rte_flow_error_set(error, EINVAL,
1024 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1025 NULL, "NULL action.");
1030 rte_flow_error_set(error, EINVAL,
1031 RTE_FLOW_ERROR_TYPE_ATTR,
1032 NULL, "NULL attribute.");
1038 /* The first not void item should be e-tag. */
1039 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1040 if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1041 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1042 rte_flow_error_set(error, EINVAL,
1043 RTE_FLOW_ERROR_TYPE_ITEM,
1044 item, "Not supported by L2 tunnel filter");
1048 if (!item->spec || !item->mask) {
1049 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1050 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1051 item, "Not supported by L2 tunnel filter");
1055 /*Not supported last point for range*/
1057 rte_flow_error_set(error, EINVAL,
1058 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1059 item, "Not supported last point for range");
1063 e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
1064 e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
1066 /* Only care about GRP and E cid base. */
1067 if (e_tag_mask->epcp_edei_in_ecid_b ||
1068 e_tag_mask->in_ecid_e ||
1069 e_tag_mask->ecid_e ||
1070 e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1071 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1072 rte_flow_error_set(error, EINVAL,
1073 RTE_FLOW_ERROR_TYPE_ITEM,
1074 item, "Not supported by L2 tunnel filter");
1078 filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1080 * grp and e_cid_base are bit fields and only use 14 bits.
1081 * e-tag id is taken as little endian by HW.
1083 filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1085 /* check if the next not void item is END */
1087 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1088 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1089 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1090 rte_flow_error_set(error, EINVAL,
1091 RTE_FLOW_ERROR_TYPE_ITEM,
1092 item, "Not supported by L2 tunnel filter");
1097 /* must be input direction */
1098 if (!attr->ingress) {
1099 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1100 rte_flow_error_set(error, EINVAL,
1101 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1102 attr, "Only support ingress.");
1108 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1109 rte_flow_error_set(error, EINVAL,
1110 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1111 attr, "Not support egress.");
1116 if (attr->priority) {
1117 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1118 rte_flow_error_set(error, EINVAL,
1119 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1120 attr, "Not support priority.");
1127 /* check if the first not void action is QUEUE. */
1128 NEXT_ITEM_OF_ACTION(act, actions, index);
1129 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1130 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1131 rte_flow_error_set(error, EINVAL,
1132 RTE_FLOW_ERROR_TYPE_ACTION,
1133 act, "Not supported action.");
1137 act_q = (const struct rte_flow_action_queue *)act->conf;
1138 filter->pool = act_q->index;
1140 /* check if the next not void item is END */
1142 NEXT_ITEM_OF_ACTION(act, actions, index);
1143 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1144 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1145 rte_flow_error_set(error, EINVAL,
1146 RTE_FLOW_ERROR_TYPE_ACTION,
1147 act, "Not supported action.");
1155 ixgbe_validate_l2_tn_filter(struct rte_eth_dev *dev,
1156 const struct rte_flow_attr *attr,
1157 const struct rte_flow_item pattern[],
1158 const struct rte_flow_action actions[],
1159 struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1160 struct rte_flow_error *error)
1163 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1165 ret = cons_parse_l2_tn_filter(attr, pattern,
1166 actions, l2_tn_filter, error);
1168 if (hw->mac.type != ixgbe_mac_X550 &&
1169 hw->mac.type != ixgbe_mac_X550EM_x &&
1170 hw->mac.type != ixgbe_mac_X550EM_a) {
1171 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1172 rte_flow_error_set(error, EINVAL,
1173 RTE_FLOW_ERROR_TYPE_ITEM,
1174 NULL, "Not supported by L2 tunnel filter");
1181 /* Parse to get the attr and action info of flow director rule. */
1183 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1184 const struct rte_flow_action actions[],
1185 struct ixgbe_fdir_rule *rule,
1186 struct rte_flow_error *error)
1188 const struct rte_flow_action *act;
1189 const struct rte_flow_action_queue *act_q;
1190 const struct rte_flow_action_mark *mark;
1194 /* must be input direction */
1195 if (!attr->ingress) {
1196 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1197 rte_flow_error_set(error, EINVAL,
1198 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1199 attr, "Only support ingress.");
1205 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1206 rte_flow_error_set(error, EINVAL,
1207 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1208 attr, "Not support egress.");
1213 if (attr->priority) {
1214 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1215 rte_flow_error_set(error, EINVAL,
1216 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1217 attr, "Not support priority.");
1224 /* check if the first not void action is QUEUE or DROP. */
1225 NEXT_ITEM_OF_ACTION(act, actions, index);
1226 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1227 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1228 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1229 rte_flow_error_set(error, EINVAL,
1230 RTE_FLOW_ERROR_TYPE_ACTION,
1231 act, "Not supported action.");
1235 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1236 act_q = (const struct rte_flow_action_queue *)act->conf;
1237 rule->queue = act_q->index;
1239 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1242 /* check if the next not void item is MARK */
1244 NEXT_ITEM_OF_ACTION(act, actions, index);
1245 if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1246 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1247 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1248 rte_flow_error_set(error, EINVAL,
1249 RTE_FLOW_ERROR_TYPE_ACTION,
1250 act, "Not supported action.");
1256 if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1257 mark = (const struct rte_flow_action_mark *)act->conf;
1258 rule->soft_id = mark->id;
1260 NEXT_ITEM_OF_ACTION(act, actions, index);
1263 /* check if the next not void item is END */
1264 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1265 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1266 rte_flow_error_set(error, EINVAL,
1267 RTE_FLOW_ERROR_TYPE_ACTION,
1268 act, "Not supported action.");
1276 * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1277 * And get the flow director filter info BTW.
1278 * UDP/TCP/SCTP PATTERN:
1279 * The first not void item can be ETH or IPV4.
1280 * The second not void item must be IPV4 if the first one is ETH.
1281 * The third not void item must be UDP or TCP or SCTP.
1282 * The next not void item must be END.
1284 * The first not void item must be ETH.
1285 * The second not void item must be MAC VLAN.
1286 * The next not void item must be END.
1288 * The first not void action should be QUEUE or DROP.
1289 * The second not void optional action should be MARK,
1290 * mark_id is a uint32_t number.
1291 * The next not void action should be END.
1292 * UDP/TCP/SCTP pattern example:
1295 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
1296 * dst_addr 192.167.3.50 0xFFFFFFFF
1297 * UDP/TCP/SCTP src_port 80 0xFFFF
1298 * dst_port 80 0xFFFF
1300 * MAC VLAN pattern example:
1303 {0xAC, 0x7B, 0xA1, {0xFF, 0xFF, 0xFF,
1304 0x2C, 0x6D, 0x36} 0xFF, 0xFF, 0xFF}
1305 * MAC VLAN tci 0x2016 0xEFFF
1306 * tpid 0x8100 0xFFFF
1308 * Other members in mask and spec should set to 0x00.
1309 * Item->last should be NULL.
1312 ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
1313 const struct rte_flow_item pattern[],
1314 const struct rte_flow_action actions[],
1315 struct ixgbe_fdir_rule *rule,
1316 struct rte_flow_error *error)
1318 const struct rte_flow_item *item;
1319 const struct rte_flow_item_eth *eth_spec;
1320 const struct rte_flow_item_eth *eth_mask;
1321 const struct rte_flow_item_ipv4 *ipv4_spec;
1322 const struct rte_flow_item_ipv4 *ipv4_mask;
1323 const struct rte_flow_item_tcp *tcp_spec;
1324 const struct rte_flow_item_tcp *tcp_mask;
1325 const struct rte_flow_item_udp *udp_spec;
1326 const struct rte_flow_item_udp *udp_mask;
1327 const struct rte_flow_item_sctp *sctp_spec;
1328 const struct rte_flow_item_sctp *sctp_mask;
1329 const struct rte_flow_item_vlan *vlan_spec;
1330 const struct rte_flow_item_vlan *vlan_mask;
1335 rte_flow_error_set(error, EINVAL,
1336 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1337 NULL, "NULL pattern.");
1342 rte_flow_error_set(error, EINVAL,
1343 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1344 NULL, "NULL action.");
1349 rte_flow_error_set(error, EINVAL,
1350 RTE_FLOW_ERROR_TYPE_ATTR,
1351 NULL, "NULL attribute.");
1356 * Some fields may not be provided. Set spec to 0 and mask to default
1357 * value. So, we need not do anything for the not provided fields later.
1359 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1360 memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1361 rule->mask.vlan_tci_mask = 0;
1367 * The first not void item should be
1368 * MAC or IPv4 or TCP or UDP or SCTP.
1370 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1371 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1372 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1373 item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1374 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1375 item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1376 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1377 rte_flow_error_set(error, EINVAL,
1378 RTE_FLOW_ERROR_TYPE_ITEM,
1379 item, "Not supported by fdir filter");
1383 rule->mode = RTE_FDIR_MODE_PERFECT;
1385 /*Not supported last point for range*/
1387 rte_flow_error_set(error, EINVAL,
1388 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1389 item, "Not supported last point for range");
1393 /* Get the MAC info. */
1394 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1396 * Only support vlan and dst MAC address,
1397 * others should be masked.
1399 if (item->spec && !item->mask) {
1400 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1401 rte_flow_error_set(error, EINVAL,
1402 RTE_FLOW_ERROR_TYPE_ITEM,
1403 item, "Not supported by fdir filter");
1408 rule->b_spec = TRUE;
1409 eth_spec = (const struct rte_flow_item_eth *)item->spec;
1411 /* Get the dst MAC. */
1412 for (j = 0; j < ETHER_ADDR_LEN; j++) {
1413 rule->ixgbe_fdir.formatted.inner_mac[j] =
1414 eth_spec->dst.addr_bytes[j];
1420 /* If ethernet has meaning, it means MAC VLAN mode. */
1421 rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1423 rule->b_mask = TRUE;
1424 eth_mask = (const struct rte_flow_item_eth *)item->mask;
1426 /* Ether type should be masked. */
1427 if (eth_mask->type) {
1428 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1429 rte_flow_error_set(error, EINVAL,
1430 RTE_FLOW_ERROR_TYPE_ITEM,
1431 item, "Not supported by fdir filter");
1436 * src MAC address must be masked,
1437 * and don't support dst MAC address mask.
1439 for (j = 0; j < ETHER_ADDR_LEN; j++) {
1440 if (eth_mask->src.addr_bytes[j] ||
1441 eth_mask->dst.addr_bytes[j] != 0xFF) {
1443 sizeof(struct ixgbe_fdir_rule));
1444 rte_flow_error_set(error, EINVAL,
1445 RTE_FLOW_ERROR_TYPE_ITEM,
1446 item, "Not supported by fdir filter");
1451 /* When no VLAN, considered as full mask. */
1452 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1454 /*** If both spec and mask are item,
1455 * it means don't care about ETH.
1460 * Check if the next not void item is vlan or ipv4.
1461 * IPv6 is not supported.
1464 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1465 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1466 if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1467 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1468 rte_flow_error_set(error, EINVAL,
1469 RTE_FLOW_ERROR_TYPE_ITEM,
1470 item, "Not supported by fdir filter");
1474 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
1475 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1476 rte_flow_error_set(error, EINVAL,
1477 RTE_FLOW_ERROR_TYPE_ITEM,
1478 item, "Not supported by fdir filter");
1484 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1485 if (!(item->spec && item->mask)) {
1486 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1487 rte_flow_error_set(error, EINVAL,
1488 RTE_FLOW_ERROR_TYPE_ITEM,
1489 item, "Not supported by fdir filter");
1493 /*Not supported last point for range*/
1495 rte_flow_error_set(error, EINVAL,
1496 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1497 item, "Not supported last point for range");
1501 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
1502 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
1504 if (vlan_spec->tpid != rte_cpu_to_be_16(ETHER_TYPE_VLAN)) {
1505 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1506 rte_flow_error_set(error, EINVAL,
1507 RTE_FLOW_ERROR_TYPE_ITEM,
1508 item, "Not supported by fdir filter");
1512 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1514 if (vlan_mask->tpid != (uint16_t)~0U) {
1515 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1516 rte_flow_error_set(error, EINVAL,
1517 RTE_FLOW_ERROR_TYPE_ITEM,
1518 item, "Not supported by fdir filter");
1521 rule->mask.vlan_tci_mask = vlan_mask->tci;
1522 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
1523 /* More than one tags are not supported. */
1526 * Check if the next not void item is not vlan.
1529 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1530 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1531 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1532 rte_flow_error_set(error, EINVAL,
1533 RTE_FLOW_ERROR_TYPE_ITEM,
1534 item, "Not supported by fdir filter");
1536 } else if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1537 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1538 rte_flow_error_set(error, EINVAL,
1539 RTE_FLOW_ERROR_TYPE_ITEM,
1540 item, "Not supported by fdir filter");
1545 /* Get the IP info. */
1546 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1548 * Set the flow type even if there's no content
1549 * as we must have a flow type.
1551 rule->ixgbe_fdir.formatted.flow_type =
1552 IXGBE_ATR_FLOW_TYPE_IPV4;
1553 /*Not supported last point for range*/
1555 rte_flow_error_set(error, EINVAL,
1556 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1557 item, "Not supported last point for range");
1561 * Only care about src & dst addresses,
1562 * others should be masked.
1565 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1566 rte_flow_error_set(error, EINVAL,
1567 RTE_FLOW_ERROR_TYPE_ITEM,
1568 item, "Not supported by fdir filter");
1571 rule->b_mask = TRUE;
1573 (const struct rte_flow_item_ipv4 *)item->mask;
1574 if (ipv4_mask->hdr.version_ihl ||
1575 ipv4_mask->hdr.type_of_service ||
1576 ipv4_mask->hdr.total_length ||
1577 ipv4_mask->hdr.packet_id ||
1578 ipv4_mask->hdr.fragment_offset ||
1579 ipv4_mask->hdr.time_to_live ||
1580 ipv4_mask->hdr.next_proto_id ||
1581 ipv4_mask->hdr.hdr_checksum) {
1582 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1583 rte_flow_error_set(error, EINVAL,
1584 RTE_FLOW_ERROR_TYPE_ITEM,
1585 item, "Not supported by fdir filter");
1588 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1589 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1592 rule->b_spec = TRUE;
1594 (const struct rte_flow_item_ipv4 *)item->spec;
1595 rule->ixgbe_fdir.formatted.dst_ip[0] =
1596 ipv4_spec->hdr.dst_addr;
1597 rule->ixgbe_fdir.formatted.src_ip[0] =
1598 ipv4_spec->hdr.src_addr;
1602 * Check if the next not void item is
1603 * TCP or UDP or SCTP or END.
1606 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1607 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1608 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1609 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1610 item->type != RTE_FLOW_ITEM_TYPE_END) {
1611 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1612 rte_flow_error_set(error, EINVAL,
1613 RTE_FLOW_ERROR_TYPE_ITEM,
1614 item, "Not supported by fdir filter");
1619 /* Get the TCP info. */
1620 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1622 * Set the flow type even if there's no content
1623 * as we must have a flow type.
1625 rule->ixgbe_fdir.formatted.flow_type =
1626 IXGBE_ATR_FLOW_TYPE_TCPV4;
1627 /*Not supported last point for range*/
1629 rte_flow_error_set(error, EINVAL,
1630 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1631 item, "Not supported last point for range");
1635 * Only care about src & dst ports,
1636 * others should be masked.
1639 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1640 rte_flow_error_set(error, EINVAL,
1641 RTE_FLOW_ERROR_TYPE_ITEM,
1642 item, "Not supported by fdir filter");
1645 rule->b_mask = TRUE;
1646 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1647 if (tcp_mask->hdr.sent_seq ||
1648 tcp_mask->hdr.recv_ack ||
1649 tcp_mask->hdr.data_off ||
1650 tcp_mask->hdr.tcp_flags ||
1651 tcp_mask->hdr.rx_win ||
1652 tcp_mask->hdr.cksum ||
1653 tcp_mask->hdr.tcp_urp) {
1654 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1655 rte_flow_error_set(error, EINVAL,
1656 RTE_FLOW_ERROR_TYPE_ITEM,
1657 item, "Not supported by fdir filter");
1660 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1661 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1664 rule->b_spec = TRUE;
1665 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1666 rule->ixgbe_fdir.formatted.src_port =
1667 tcp_spec->hdr.src_port;
1668 rule->ixgbe_fdir.formatted.dst_port =
1669 tcp_spec->hdr.dst_port;
1673 /* Get the UDP info */
1674 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1676 * Set the flow type even if there's no content
1677 * as we must have a flow type.
1679 rule->ixgbe_fdir.formatted.flow_type =
1680 IXGBE_ATR_FLOW_TYPE_UDPV4;
1681 /*Not supported last point for range*/
1683 rte_flow_error_set(error, EINVAL,
1684 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1685 item, "Not supported last point for range");
1689 * Only care about src & dst ports,
1690 * others should be masked.
1693 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1694 rte_flow_error_set(error, EINVAL,
1695 RTE_FLOW_ERROR_TYPE_ITEM,
1696 item, "Not supported by fdir filter");
1699 rule->b_mask = TRUE;
1700 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1701 if (udp_mask->hdr.dgram_len ||
1702 udp_mask->hdr.dgram_cksum) {
1703 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1704 rte_flow_error_set(error, EINVAL,
1705 RTE_FLOW_ERROR_TYPE_ITEM,
1706 item, "Not supported by fdir filter");
1709 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1710 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1713 rule->b_spec = TRUE;
1714 udp_spec = (const struct rte_flow_item_udp *)item->spec;
1715 rule->ixgbe_fdir.formatted.src_port =
1716 udp_spec->hdr.src_port;
1717 rule->ixgbe_fdir.formatted.dst_port =
1718 udp_spec->hdr.dst_port;
1722 /* Get the SCTP info */
1723 if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
1725 * Set the flow type even if there's no content
1726 * as we must have a flow type.
1728 rule->ixgbe_fdir.formatted.flow_type =
1729 IXGBE_ATR_FLOW_TYPE_SCTPV4;
1730 /*Not supported last point for range*/
1732 rte_flow_error_set(error, EINVAL,
1733 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1734 item, "Not supported last point for range");
1738 * Only care about src & dst ports,
1739 * others should be masked.
1742 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1743 rte_flow_error_set(error, EINVAL,
1744 RTE_FLOW_ERROR_TYPE_ITEM,
1745 item, "Not supported by fdir filter");
1748 rule->b_mask = TRUE;
1750 (const struct rte_flow_item_sctp *)item->mask;
1751 if (sctp_mask->hdr.tag ||
1752 sctp_mask->hdr.cksum) {
1753 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1754 rte_flow_error_set(error, EINVAL,
1755 RTE_FLOW_ERROR_TYPE_ITEM,
1756 item, "Not supported by fdir filter");
1759 rule->mask.src_port_mask = sctp_mask->hdr.src_port;
1760 rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
1763 rule->b_spec = TRUE;
1765 (const struct rte_flow_item_sctp *)item->spec;
1766 rule->ixgbe_fdir.formatted.src_port =
1767 sctp_spec->hdr.src_port;
1768 rule->ixgbe_fdir.formatted.dst_port =
1769 sctp_spec->hdr.dst_port;
1773 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1774 /* check if the next not void item is END */
1776 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1777 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1778 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1779 rte_flow_error_set(error, EINVAL,
1780 RTE_FLOW_ERROR_TYPE_ITEM,
1781 item, "Not supported by fdir filter");
1786 return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
1789 #define NVGRE_PROTOCOL 0x6558
1792 * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
1793 * And get the flow director filter info BTW.
1795 * The first not void item must be ETH.
1796 * The second not void item must be IPV4/ IPV6.
1797 * The third not void item must be NVGRE.
1798 * The next not void item must be END.
1800 * The first not void item must be ETH.
1801 * The second not void item must be IPV4/ IPV6.
1802 * The third not void item must be NVGRE.
1803 * The next not void item must be END.
1805 * The first not void action should be QUEUE or DROP.
1806 * The second not void optional action should be MARK,
1807 * mark_id is a uint32_t number.
1808 * The next not void action should be END.
1809 * VxLAN pattern example:
1812 * IPV4/IPV6 NULL NULL
1814 * VxLAN vni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
1816 * NEGRV pattern example:
1819 * IPV4/IPV6 NULL NULL
1820 * NVGRE protocol 0x6558 0xFFFF
1821 * tni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
1823 * other members in mask and spec should set to 0x00.
1824 * item->last should be NULL.
1827 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
1828 const struct rte_flow_item pattern[],
1829 const struct rte_flow_action actions[],
1830 struct ixgbe_fdir_rule *rule,
1831 struct rte_flow_error *error)
1833 const struct rte_flow_item *item;
1834 const struct rte_flow_item_vxlan *vxlan_spec;
1835 const struct rte_flow_item_vxlan *vxlan_mask;
1836 const struct rte_flow_item_nvgre *nvgre_spec;
1837 const struct rte_flow_item_nvgre *nvgre_mask;
1838 const struct rte_flow_item_eth *eth_spec;
1839 const struct rte_flow_item_eth *eth_mask;
1840 const struct rte_flow_item_vlan *vlan_spec;
1841 const struct rte_flow_item_vlan *vlan_mask;
1845 rte_flow_error_set(error, EINVAL,
1846 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1847 NULL, "NULL pattern.");
1852 rte_flow_error_set(error, EINVAL,
1853 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1854 NULL, "NULL action.");
1859 rte_flow_error_set(error, EINVAL,
1860 RTE_FLOW_ERROR_TYPE_ATTR,
1861 NULL, "NULL attribute.");
1866 * Some fields may not be provided. Set spec to 0 and mask to default
1867 * value. So, we need not do anything for the not provided fields later.
1869 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1870 memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1871 rule->mask.vlan_tci_mask = 0;
1877 * The first not void item should be
1878 * MAC or IPv4 or IPv6 or UDP or VxLAN.
1880 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1881 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1882 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1883 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1884 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1885 item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
1886 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
1887 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1888 rte_flow_error_set(error, EINVAL,
1889 RTE_FLOW_ERROR_TYPE_ITEM,
1890 item, "Not supported by fdir filter");
1894 rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
1897 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1898 /* Only used to describe the protocol stack. */
1899 if (item->spec || item->mask) {
1900 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1901 rte_flow_error_set(error, EINVAL,
1902 RTE_FLOW_ERROR_TYPE_ITEM,
1903 item, "Not supported by fdir filter");
1906 /*Not supported last point for range*/
1908 rte_flow_error_set(error, EINVAL,
1909 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1910 item, "Not supported last point for range");
1914 /* Check if the next not void item is IPv4 or IPv6. */
1916 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1917 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1918 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
1919 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1920 rte_flow_error_set(error, EINVAL,
1921 RTE_FLOW_ERROR_TYPE_ITEM,
1922 item, "Not supported by fdir filter");
1928 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
1929 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1930 /* Only used to describe the protocol stack. */
1931 if (item->spec || item->mask) {
1932 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1933 rte_flow_error_set(error, EINVAL,
1934 RTE_FLOW_ERROR_TYPE_ITEM,
1935 item, "Not supported by fdir filter");
1938 /*Not supported last point for range*/
1940 rte_flow_error_set(error, EINVAL,
1941 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1942 item, "Not supported last point for range");
1946 /* Check if the next not void item is UDP or NVGRE. */
1948 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1949 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1950 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
1951 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1952 rte_flow_error_set(error, EINVAL,
1953 RTE_FLOW_ERROR_TYPE_ITEM,
1954 item, "Not supported by fdir filter");
1960 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1961 /* Only used to describe the protocol stack. */
1962 if (item->spec || item->mask) {
1963 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1964 rte_flow_error_set(error, EINVAL,
1965 RTE_FLOW_ERROR_TYPE_ITEM,
1966 item, "Not supported by fdir filter");
1969 /*Not supported last point for range*/
1971 rte_flow_error_set(error, EINVAL,
1972 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1973 item, "Not supported last point for range");
1977 /* Check if the next not void item is VxLAN. */
1979 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1980 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1981 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1982 rte_flow_error_set(error, EINVAL,
1983 RTE_FLOW_ERROR_TYPE_ITEM,
1984 item, "Not supported by fdir filter");
1989 /* Get the VxLAN info */
1990 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
1991 rule->ixgbe_fdir.formatted.tunnel_type =
1992 RTE_FDIR_TUNNEL_TYPE_VXLAN;
1994 /* Only care about VNI, others should be masked. */
1996 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1997 rte_flow_error_set(error, EINVAL,
1998 RTE_FLOW_ERROR_TYPE_ITEM,
1999 item, "Not supported by fdir filter");
2002 /*Not supported last point for range*/
2004 rte_flow_error_set(error, EINVAL,
2005 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2006 item, "Not supported last point for range");
2009 rule->b_mask = TRUE;
2011 /* Tunnel type is always meaningful. */
2012 rule->mask.tunnel_type_mask = 1;
2015 (const struct rte_flow_item_vxlan *)item->mask;
2016 if (vxlan_mask->flags) {
2017 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2018 rte_flow_error_set(error, EINVAL,
2019 RTE_FLOW_ERROR_TYPE_ITEM,
2020 item, "Not supported by fdir filter");
2023 /* VNI must be totally masked or not. */
2024 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2025 vxlan_mask->vni[2]) &&
2026 ((vxlan_mask->vni[0] != 0xFF) ||
2027 (vxlan_mask->vni[1] != 0xFF) ||
2028 (vxlan_mask->vni[2] != 0xFF))) {
2029 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2030 rte_flow_error_set(error, EINVAL,
2031 RTE_FLOW_ERROR_TYPE_ITEM,
2032 item, "Not supported by fdir filter");
2036 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2037 RTE_DIM(vxlan_mask->vni));
2040 rule->b_spec = TRUE;
2041 vxlan_spec = (const struct rte_flow_item_vxlan *)
2043 rte_memcpy(((uint8_t *)
2044 &rule->ixgbe_fdir.formatted.tni_vni + 1),
2045 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2046 rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
2047 rule->ixgbe_fdir.formatted.tni_vni);
2051 /* Get the NVGRE info */
2052 if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2053 rule->ixgbe_fdir.formatted.tunnel_type =
2054 RTE_FDIR_TUNNEL_TYPE_NVGRE;
2057 * Only care about flags0, flags1, protocol and TNI,
2058 * others should be masked.
2061 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2062 rte_flow_error_set(error, EINVAL,
2063 RTE_FLOW_ERROR_TYPE_ITEM,
2064 item, "Not supported by fdir filter");
2067 /*Not supported last point for range*/
2069 rte_flow_error_set(error, EINVAL,
2070 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2071 item, "Not supported last point for range");
2074 rule->b_mask = TRUE;
2076 /* Tunnel type is always meaningful. */
2077 rule->mask.tunnel_type_mask = 1;
2080 (const struct rte_flow_item_nvgre *)item->mask;
2081 if (nvgre_mask->flow_id) {
2082 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2083 rte_flow_error_set(error, EINVAL,
2084 RTE_FLOW_ERROR_TYPE_ITEM,
2085 item, "Not supported by fdir filter");
2088 if (nvgre_mask->c_k_s_rsvd0_ver !=
2089 rte_cpu_to_be_16(0x3000) ||
2090 nvgre_mask->protocol != 0xFFFF) {
2091 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2092 rte_flow_error_set(error, EINVAL,
2093 RTE_FLOW_ERROR_TYPE_ITEM,
2094 item, "Not supported by fdir filter");
2097 /* TNI must be totally masked or not. */
2098 if (nvgre_mask->tni[0] &&
2099 ((nvgre_mask->tni[0] != 0xFF) ||
2100 (nvgre_mask->tni[1] != 0xFF) ||
2101 (nvgre_mask->tni[2] != 0xFF))) {
2102 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2103 rte_flow_error_set(error, EINVAL,
2104 RTE_FLOW_ERROR_TYPE_ITEM,
2105 item, "Not supported by fdir filter");
2108 /* tni is a 24-bits bit field */
2109 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2110 RTE_DIM(nvgre_mask->tni));
2111 rule->mask.tunnel_id_mask <<= 8;
2114 rule->b_spec = TRUE;
2116 (const struct rte_flow_item_nvgre *)item->spec;
2117 if (nvgre_spec->c_k_s_rsvd0_ver !=
2118 rte_cpu_to_be_16(0x2000) ||
2119 nvgre_spec->protocol !=
2120 rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2121 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2122 rte_flow_error_set(error, EINVAL,
2123 RTE_FLOW_ERROR_TYPE_ITEM,
2124 item, "Not supported by fdir filter");
2127 /* tni is a 24-bits bit field */
2128 rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2129 nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2130 rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2134 /* check if the next not void item is MAC */
2136 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2137 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2138 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2139 rte_flow_error_set(error, EINVAL,
2140 RTE_FLOW_ERROR_TYPE_ITEM,
2141 item, "Not supported by fdir filter");
2146 * Only support vlan and dst MAC address,
2147 * others should be masked.
2151 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2152 rte_flow_error_set(error, EINVAL,
2153 RTE_FLOW_ERROR_TYPE_ITEM,
2154 item, "Not supported by fdir filter");
2157 /*Not supported last point for range*/
2159 rte_flow_error_set(error, EINVAL,
2160 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2161 item, "Not supported last point for range");
2164 rule->b_mask = TRUE;
2165 eth_mask = (const struct rte_flow_item_eth *)item->mask;
2167 /* Ether type should be masked. */
2168 if (eth_mask->type) {
2169 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2170 rte_flow_error_set(error, EINVAL,
2171 RTE_FLOW_ERROR_TYPE_ITEM,
2172 item, "Not supported by fdir filter");
2176 /* src MAC address should be masked. */
2177 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2178 if (eth_mask->src.addr_bytes[j]) {
2180 sizeof(struct ixgbe_fdir_rule));
2181 rte_flow_error_set(error, EINVAL,
2182 RTE_FLOW_ERROR_TYPE_ITEM,
2183 item, "Not supported by fdir filter");
2187 rule->mask.mac_addr_byte_mask = 0;
2188 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2189 /* It's a per byte mask. */
2190 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2191 rule->mask.mac_addr_byte_mask |= 0x1 << j;
2192 } else if (eth_mask->dst.addr_bytes[j]) {
2193 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2194 rte_flow_error_set(error, EINVAL,
2195 RTE_FLOW_ERROR_TYPE_ITEM,
2196 item, "Not supported by fdir filter");
2201 /* When no vlan, considered as full mask. */
2202 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2205 rule->b_spec = TRUE;
2206 eth_spec = (const struct rte_flow_item_eth *)item->spec;
2208 /* Get the dst MAC. */
2209 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2210 rule->ixgbe_fdir.formatted.inner_mac[j] =
2211 eth_spec->dst.addr_bytes[j];
2216 * Check if the next not void item is vlan or ipv4.
2217 * IPv6 is not supported.
2220 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2221 if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2222 (item->type != RTE_FLOW_ITEM_TYPE_VLAN)) {
2223 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2224 rte_flow_error_set(error, EINVAL,
2225 RTE_FLOW_ERROR_TYPE_ITEM,
2226 item, "Not supported by fdir filter");
2229 /*Not supported last point for range*/
2231 rte_flow_error_set(error, EINVAL,
2232 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2233 item, "Not supported last point for range");
2237 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2238 if (!(item->spec && item->mask)) {
2239 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2240 rte_flow_error_set(error, EINVAL,
2241 RTE_FLOW_ERROR_TYPE_ITEM,
2242 item, "Not supported by fdir filter");
2246 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
2247 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
2249 if (vlan_spec->tpid != rte_cpu_to_be_16(ETHER_TYPE_VLAN)) {
2250 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2251 rte_flow_error_set(error, EINVAL,
2252 RTE_FLOW_ERROR_TYPE_ITEM,
2253 item, "Not supported by fdir filter");
2257 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2259 if (vlan_mask->tpid != (uint16_t)~0U) {
2260 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2261 rte_flow_error_set(error, EINVAL,
2262 RTE_FLOW_ERROR_TYPE_ITEM,
2263 item, "Not supported by fdir filter");
2266 rule->mask.vlan_tci_mask = vlan_mask->tci;
2267 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
2268 /* More than one tags are not supported. */
2271 * Check if the next not void item is not vlan.
2274 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2275 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2276 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2277 rte_flow_error_set(error, EINVAL,
2278 RTE_FLOW_ERROR_TYPE_ITEM,
2279 item, "Not supported by fdir filter");
2281 } else if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2282 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2283 rte_flow_error_set(error, EINVAL,
2284 RTE_FLOW_ERROR_TYPE_ITEM,
2285 item, "Not supported by fdir filter");
2288 /* check if the next not void item is END */
2290 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2291 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2292 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2293 rte_flow_error_set(error, EINVAL,
2294 RTE_FLOW_ERROR_TYPE_ITEM,
2295 item, "Not supported by fdir filter");
2301 * If the tags is 0, it means don't care about the VLAN.
2305 return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2309 ixgbe_parse_fdir_filter(const struct rte_flow_attr *attr,
2310 const struct rte_flow_item pattern[],
2311 const struct rte_flow_action actions[],
2312 struct ixgbe_fdir_rule *rule,
2313 struct rte_flow_error *error)
2317 ret = ixgbe_parse_fdir_filter_normal(attr, pattern,
2318 actions, rule, error);
2323 ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2324 actions, rule, error);
2330 ixgbe_validate_fdir_filter(struct rte_eth_dev *dev,
2331 const struct rte_flow_attr *attr,
2332 const struct rte_flow_item pattern[],
2333 const struct rte_flow_action actions[],
2334 struct ixgbe_fdir_rule *rule,
2335 struct rte_flow_error *error)
2339 enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2341 ixgbe_parse_fdir_filter(attr, pattern, actions,
2345 if (fdir_mode == RTE_FDIR_MODE_NONE ||
2346 fdir_mode != rule->mode)
2353 ixgbe_filterlist_flush(void)
2355 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2356 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2357 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2358 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2359 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2360 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2362 while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2363 TAILQ_REMOVE(&filter_ntuple_list,
2366 rte_free(ntuple_filter_ptr);
2369 while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2370 TAILQ_REMOVE(&filter_ethertype_list,
2371 ethertype_filter_ptr,
2373 rte_free(ethertype_filter_ptr);
2376 while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2377 TAILQ_REMOVE(&filter_syn_list,
2380 rte_free(syn_filter_ptr);
2383 while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2384 TAILQ_REMOVE(&filter_l2_tunnel_list,
2387 rte_free(l2_tn_filter_ptr);
2390 while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2391 TAILQ_REMOVE(&filter_fdir_list,
2394 rte_free(fdir_rule_ptr);
2397 while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
2398 TAILQ_REMOVE(&ixgbe_flow_list,
2401 rte_free(ixgbe_flow_mem_ptr->flow);
2402 rte_free(ixgbe_flow_mem_ptr);
2407 * Create or destroy a flow rule.
2408 * Theorically one rule can match more than one filters.
2409 * We will let it use the filter which it hitt first.
2410 * So, the sequence matters.
2412 static struct rte_flow *
2413 ixgbe_flow_create(struct rte_eth_dev *dev,
2414 const struct rte_flow_attr *attr,
2415 const struct rte_flow_item pattern[],
2416 const struct rte_flow_action actions[],
2417 struct rte_flow_error *error)
2420 struct rte_eth_ntuple_filter ntuple_filter;
2421 struct rte_eth_ethertype_filter ethertype_filter;
2422 struct rte_eth_syn_filter syn_filter;
2423 struct ixgbe_fdir_rule fdir_rule;
2424 struct rte_eth_l2_tunnel_conf l2_tn_filter;
2425 struct ixgbe_hw_fdir_info *fdir_info =
2426 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2427 struct rte_flow *flow = NULL;
2428 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2429 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2430 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2431 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2432 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2433 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2435 flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
2437 PMD_DRV_LOG(ERR, "failed to allocate memory");
2438 return (struct rte_flow *)flow;
2440 ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
2441 sizeof(struct ixgbe_flow_mem), 0);
2442 if (!ixgbe_flow_mem_ptr) {
2443 PMD_DRV_LOG(ERR, "failed to allocate memory");
2447 ixgbe_flow_mem_ptr->flow = flow;
2448 TAILQ_INSERT_TAIL(&ixgbe_flow_list,
2449 ixgbe_flow_mem_ptr, entries);
2451 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2452 ret = ixgbe_parse_ntuple_filter(attr, pattern,
2453 actions, &ntuple_filter, error);
2455 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2457 ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
2458 sizeof(struct ixgbe_ntuple_filter_ele), 0);
2459 (void)rte_memcpy(&ntuple_filter_ptr->filter_info,
2461 sizeof(struct rte_eth_ntuple_filter));
2462 TAILQ_INSERT_TAIL(&filter_ntuple_list,
2463 ntuple_filter_ptr, entries);
2464 flow->rule = ntuple_filter_ptr;
2465 flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2471 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2472 ret = ixgbe_parse_ethertype_filter(attr, pattern,
2473 actions, ðertype_filter, error);
2475 ret = ixgbe_add_del_ethertype_filter(dev,
2476 ðertype_filter, TRUE);
2478 ethertype_filter_ptr = rte_zmalloc(
2479 "ixgbe_ethertype_filter",
2480 sizeof(struct ixgbe_ethertype_filter_ele), 0);
2481 (void)rte_memcpy(ðertype_filter_ptr->filter_info,
2483 sizeof(struct rte_eth_ethertype_filter));
2484 TAILQ_INSERT_TAIL(&filter_ethertype_list,
2485 ethertype_filter_ptr, entries);
2486 flow->rule = ethertype_filter_ptr;
2487 flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2493 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2494 ret = cons_parse_syn_filter(attr, pattern, actions, &syn_filter, error);
2496 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
2498 syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
2499 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
2500 (void)rte_memcpy(&syn_filter_ptr->filter_info,
2502 sizeof(struct rte_eth_syn_filter));
2503 TAILQ_INSERT_TAIL(&filter_syn_list,
2506 flow->rule = syn_filter_ptr;
2507 flow->filter_type = RTE_ETH_FILTER_SYN;
2513 memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2514 ret = ixgbe_parse_fdir_filter(attr, pattern,
2515 actions, &fdir_rule, error);
2517 /* A mask cannot be deleted. */
2518 if (fdir_rule.b_mask) {
2519 if (!fdir_info->mask_added) {
2520 /* It's the first time the mask is set. */
2521 rte_memcpy(&fdir_info->mask,
2523 sizeof(struct ixgbe_hw_fdir_mask));
2524 ret = ixgbe_fdir_set_input_mask(dev);
2528 fdir_info->mask_added = TRUE;
2531 * Only support one global mask,
2532 * all the masks should be the same.
2534 ret = memcmp(&fdir_info->mask,
2536 sizeof(struct ixgbe_hw_fdir_mask));
2542 if (fdir_rule.b_spec) {
2543 ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
2546 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
2547 sizeof(struct ixgbe_fdir_rule_ele), 0);
2548 (void)rte_memcpy(&fdir_rule_ptr->filter_info,
2550 sizeof(struct ixgbe_fdir_rule));
2551 TAILQ_INSERT_TAIL(&filter_fdir_list,
2552 fdir_rule_ptr, entries);
2553 flow->rule = fdir_rule_ptr;
2554 flow->filter_type = RTE_ETH_FILTER_FDIR;
2566 memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2567 ret = cons_parse_l2_tn_filter(attr, pattern,
2568 actions, &l2_tn_filter, error);
2570 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
2572 l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
2573 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
2574 (void)rte_memcpy(&l2_tn_filter_ptr->filter_info,
2576 sizeof(struct rte_eth_l2_tunnel_conf));
2577 TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
2578 l2_tn_filter_ptr, entries);
2579 flow->rule = l2_tn_filter_ptr;
2580 flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
2586 TAILQ_REMOVE(&ixgbe_flow_list,
2587 ixgbe_flow_mem_ptr, entries);
2588 rte_free(ixgbe_flow_mem_ptr);
2594 * Check if the flow rule is supported by ixgbe.
2595 * It only checkes the format. Don't guarantee the rule can be programmed into
2596 * the HW. Because there can be no enough room for the rule.
2599 ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
2600 const struct rte_flow_attr *attr,
2601 const struct rte_flow_item pattern[],
2602 const struct rte_flow_action actions[],
2603 struct rte_flow_error *error)
2605 struct rte_eth_ntuple_filter ntuple_filter;
2606 struct rte_eth_ethertype_filter ethertype_filter;
2607 struct rte_eth_syn_filter syn_filter;
2608 struct rte_eth_l2_tunnel_conf l2_tn_filter;
2609 struct ixgbe_fdir_rule fdir_rule;
2612 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2613 ret = ixgbe_parse_ntuple_filter(attr, pattern,
2614 actions, &ntuple_filter, error);
2618 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2619 ret = ixgbe_parse_ethertype_filter(attr, pattern,
2620 actions, ðertype_filter, error);
2624 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2625 ret = ixgbe_parse_syn_filter(attr, pattern,
2626 actions, &syn_filter, error);
2630 memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2631 ret = ixgbe_validate_fdir_filter(dev, attr, pattern,
2632 actions, &fdir_rule, error);
2636 memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2637 ret = ixgbe_validate_l2_tn_filter(dev, attr, pattern,
2638 actions, &l2_tn_filter, error);
2643 /* Destroy a flow rule on ixgbe. */
2645 ixgbe_flow_destroy(struct rte_eth_dev *dev,
2646 struct rte_flow *flow,
2647 struct rte_flow_error *error)
2650 struct rte_flow *pmd_flow = flow;
2651 enum rte_filter_type filter_type = pmd_flow->filter_type;
2652 struct rte_eth_ntuple_filter ntuple_filter;
2653 struct rte_eth_ethertype_filter ethertype_filter;
2654 struct rte_eth_syn_filter syn_filter;
2655 struct ixgbe_fdir_rule fdir_rule;
2656 struct rte_eth_l2_tunnel_conf l2_tn_filter;
2657 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2658 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2659 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2660 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2661 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2662 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2664 switch (filter_type) {
2665 case RTE_ETH_FILTER_NTUPLE:
2666 ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
2668 (void)rte_memcpy(&ntuple_filter,
2669 &ntuple_filter_ptr->filter_info,
2670 sizeof(struct rte_eth_ntuple_filter));
2671 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
2673 TAILQ_REMOVE(&filter_ntuple_list,
2674 ntuple_filter_ptr, entries);
2675 rte_free(ntuple_filter_ptr);
2678 case RTE_ETH_FILTER_ETHERTYPE:
2679 ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
2681 (void)rte_memcpy(ðertype_filter,
2682 ðertype_filter_ptr->filter_info,
2683 sizeof(struct rte_eth_ethertype_filter));
2684 ret = ixgbe_add_del_ethertype_filter(dev,
2685 ðertype_filter, FALSE);
2687 TAILQ_REMOVE(&filter_ethertype_list,
2688 ethertype_filter_ptr, entries);
2689 rte_free(ethertype_filter_ptr);
2692 case RTE_ETH_FILTER_SYN:
2693 syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
2695 (void)rte_memcpy(&syn_filter,
2696 &syn_filter_ptr->filter_info,
2697 sizeof(struct rte_eth_syn_filter));
2698 ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
2700 TAILQ_REMOVE(&filter_syn_list,
2701 syn_filter_ptr, entries);
2702 rte_free(syn_filter_ptr);
2705 case RTE_ETH_FILTER_FDIR:
2706 fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
2707 (void)rte_memcpy(&fdir_rule,
2708 &fdir_rule_ptr->filter_info,
2709 sizeof(struct ixgbe_fdir_rule));
2710 ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
2712 TAILQ_REMOVE(&filter_fdir_list,
2713 fdir_rule_ptr, entries);
2714 rte_free(fdir_rule_ptr);
2717 case RTE_ETH_FILTER_L2_TUNNEL:
2718 l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
2720 (void)rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
2721 sizeof(struct rte_eth_l2_tunnel_conf));
2722 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
2724 TAILQ_REMOVE(&filter_l2_tunnel_list,
2725 l2_tn_filter_ptr, entries);
2726 rte_free(l2_tn_filter_ptr);
2730 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
2737 rte_flow_error_set(error, EINVAL,
2738 RTE_FLOW_ERROR_TYPE_HANDLE,
2739 NULL, "Failed to destroy flow");
2743 TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
2744 if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
2745 TAILQ_REMOVE(&ixgbe_flow_list,
2746 ixgbe_flow_mem_ptr, entries);
2747 rte_free(ixgbe_flow_mem_ptr);
2755 /* Destroy all flow rules associated with a port on ixgbe. */
2757 ixgbe_flow_flush(struct rte_eth_dev *dev,
2758 struct rte_flow_error *error)
2762 ixgbe_clear_all_ntuple_filter(dev);
2763 ixgbe_clear_all_ethertype_filter(dev);
2764 ixgbe_clear_syn_filter(dev);
2766 ret = ixgbe_clear_all_fdir_filter(dev);
2768 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
2769 NULL, "Failed to flush rule");
2773 ret = ixgbe_clear_all_l2_tn_filter(dev);
2775 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
2776 NULL, "Failed to flush rule");
2780 ixgbe_filterlist_flush();
2785 const struct rte_flow_ops ixgbe_flow_ops = {
2786 ixgbe_flow_validate,