4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
47 #include <rte_interrupts.h>
49 #include <rte_debug.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_atomic.h>
60 #include <rte_malloc.h>
61 #include <rte_random.h>
63 #include <rte_hash_crc.h>
65 #include <rte_flow_driver.h>
67 #include "ixgbe_logs.h"
68 #include "base/ixgbe_api.h"
69 #include "base/ixgbe_vf.h"
70 #include "base/ixgbe_common.h"
71 #include "ixgbe_ethdev.h"
72 #include "ixgbe_bypass.h"
73 #include "ixgbe_rxtx.h"
74 #include "base/ixgbe_type.h"
75 #include "base/ixgbe_phy.h"
76 #include "rte_pmd_ixgbe.h"
79 #define IXGBE_MIN_N_TUPLE_PRIO 1
80 #define IXGBE_MAX_N_TUPLE_PRIO 7
81 #define NEXT_ITEM_OF_PATTERN(item, pattern, index)\
83 item = pattern + index;\
84 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\
86 item = pattern + index; \
90 #define NEXT_ITEM_OF_ACTION(act, actions, index)\
92 act = actions + index; \
93 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
95 act = actions + index; \
100 * Please aware there's an asumption for all the parsers.
101 * rte_flow_item is using big endian, rte_flow_attr and
102 * rte_flow_action are using CPU order.
103 * Because the pattern is used to describe the packets,
104 * normally the packets should use network order.
108 * Parse the rule to see if it is a n-tuple rule.
109 * And get the n-tuple filter info BTW.
111 * The first not void item can be ETH or IPV4.
112 * The second not void item must be IPV4 if the first one is ETH.
113 * The third not void item must be UDP or TCP.
114 * The next not void item must be END.
116 * The first not void action should be QUEUE.
117 * The next not void action should be END.
121 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
122 * dst_addr 192.167.3.50 0xFFFFFFFF
123 * next_proto_id 17 0xFF
124 * UDP/TCP src_port 80 0xFFFF
127 * other members in mask and spec should set to 0x00.
128 * item->last should be NULL.
131 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
132 const struct rte_flow_item pattern[],
133 const struct rte_flow_action actions[],
134 struct rte_eth_ntuple_filter *filter,
135 struct rte_flow_error *error)
137 const struct rte_flow_item *item;
138 const struct rte_flow_action *act;
139 const struct rte_flow_item_ipv4 *ipv4_spec;
140 const struct rte_flow_item_ipv4 *ipv4_mask;
141 const struct rte_flow_item_tcp *tcp_spec;
142 const struct rte_flow_item_tcp *tcp_mask;
143 const struct rte_flow_item_udp *udp_spec;
144 const struct rte_flow_item_udp *udp_mask;
148 rte_flow_error_set(error,
149 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
150 NULL, "NULL pattern.");
155 rte_flow_error_set(error, EINVAL,
156 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
157 NULL, "NULL action.");
161 rte_flow_error_set(error, EINVAL,
162 RTE_FLOW_ERROR_TYPE_ATTR,
163 NULL, "NULL attribute.");
170 /* the first not void item can be MAC or IPv4 */
171 NEXT_ITEM_OF_PATTERN(item, pattern, index);
173 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
174 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
175 rte_flow_error_set(error, EINVAL,
176 RTE_FLOW_ERROR_TYPE_ITEM,
177 item, "Not supported by ntuple filter");
181 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
182 /*Not supported last point for range*/
184 rte_flow_error_set(error,
186 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
187 item, "Not supported last point for range");
191 /* if the first item is MAC, the content should be NULL */
192 if (item->spec || item->mask) {
193 rte_flow_error_set(error, EINVAL,
194 RTE_FLOW_ERROR_TYPE_ITEM,
195 item, "Not supported by ntuple filter");
198 /* check if the next not void item is IPv4 */
200 NEXT_ITEM_OF_PATTERN(item, pattern, index);
201 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
202 rte_flow_error_set(error,
203 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
204 item, "Not supported by ntuple filter");
209 /* get the IPv4 info */
210 if (!item->spec || !item->mask) {
211 rte_flow_error_set(error, EINVAL,
212 RTE_FLOW_ERROR_TYPE_ITEM,
213 item, "Invalid ntuple mask");
216 /*Not supported last point for range*/
218 rte_flow_error_set(error, EINVAL,
219 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
220 item, "Not supported last point for range");
225 ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
227 * Only support src & dst addresses, protocol,
228 * others should be masked.
230 if (ipv4_mask->hdr.version_ihl ||
231 ipv4_mask->hdr.type_of_service ||
232 ipv4_mask->hdr.total_length ||
233 ipv4_mask->hdr.packet_id ||
234 ipv4_mask->hdr.fragment_offset ||
235 ipv4_mask->hdr.time_to_live ||
236 ipv4_mask->hdr.hdr_checksum) {
237 rte_flow_error_set(error,
238 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
239 item, "Not supported by ntuple filter");
243 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
244 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
245 filter->proto_mask = ipv4_mask->hdr.next_proto_id;
247 ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
248 filter->dst_ip = ipv4_spec->hdr.dst_addr;
249 filter->src_ip = ipv4_spec->hdr.src_addr;
250 filter->proto = ipv4_spec->hdr.next_proto_id;
252 /* check if the next not void item is TCP or UDP */
254 NEXT_ITEM_OF_PATTERN(item, pattern, index);
255 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
256 item->type != RTE_FLOW_ITEM_TYPE_UDP) {
257 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
258 rte_flow_error_set(error, EINVAL,
259 RTE_FLOW_ERROR_TYPE_ITEM,
260 item, "Not supported by ntuple filter");
264 /* get the TCP/UDP info */
265 if (!item->spec || !item->mask) {
266 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
267 rte_flow_error_set(error, EINVAL,
268 RTE_FLOW_ERROR_TYPE_ITEM,
269 item, "Invalid ntuple mask");
273 /*Not supported last point for range*/
275 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
276 rte_flow_error_set(error, EINVAL,
277 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
278 item, "Not supported last point for range");
283 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
284 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
287 * Only support src & dst ports, tcp flags,
288 * others should be masked.
290 if (tcp_mask->hdr.sent_seq ||
291 tcp_mask->hdr.recv_ack ||
292 tcp_mask->hdr.data_off ||
293 tcp_mask->hdr.rx_win ||
294 tcp_mask->hdr.cksum ||
295 tcp_mask->hdr.tcp_urp) {
297 sizeof(struct rte_eth_ntuple_filter));
298 rte_flow_error_set(error, EINVAL,
299 RTE_FLOW_ERROR_TYPE_ITEM,
300 item, "Not supported by ntuple filter");
304 filter->dst_port_mask = tcp_mask->hdr.dst_port;
305 filter->src_port_mask = tcp_mask->hdr.src_port;
306 if (tcp_mask->hdr.tcp_flags == 0xFF) {
307 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
308 } else if (!tcp_mask->hdr.tcp_flags) {
309 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
311 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
312 rte_flow_error_set(error, EINVAL,
313 RTE_FLOW_ERROR_TYPE_ITEM,
314 item, "Not supported by ntuple filter");
318 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
319 filter->dst_port = tcp_spec->hdr.dst_port;
320 filter->src_port = tcp_spec->hdr.src_port;
321 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
323 udp_mask = (const struct rte_flow_item_udp *)item->mask;
326 * Only support src & dst ports,
327 * others should be masked.
329 if (udp_mask->hdr.dgram_len ||
330 udp_mask->hdr.dgram_cksum) {
332 sizeof(struct rte_eth_ntuple_filter));
333 rte_flow_error_set(error, EINVAL,
334 RTE_FLOW_ERROR_TYPE_ITEM,
335 item, "Not supported by ntuple filter");
339 filter->dst_port_mask = udp_mask->hdr.dst_port;
340 filter->src_port_mask = udp_mask->hdr.src_port;
342 udp_spec = (const struct rte_flow_item_udp *)item->spec;
343 filter->dst_port = udp_spec->hdr.dst_port;
344 filter->src_port = udp_spec->hdr.src_port;
347 /* check if the next not void item is END */
349 NEXT_ITEM_OF_PATTERN(item, pattern, index);
350 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
351 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
352 rte_flow_error_set(error, EINVAL,
353 RTE_FLOW_ERROR_TYPE_ITEM,
354 item, "Not supported by ntuple filter");
362 * n-tuple only supports forwarding,
363 * check if the first not void action is QUEUE.
365 NEXT_ITEM_OF_ACTION(act, actions, index);
366 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
367 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
368 rte_flow_error_set(error, EINVAL,
369 RTE_FLOW_ERROR_TYPE_ACTION,
370 item, "Not supported action.");
374 ((const struct rte_flow_action_queue *)act->conf)->index;
376 /* check if the next not void item is END */
378 NEXT_ITEM_OF_ACTION(act, actions, index);
379 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
380 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
381 rte_flow_error_set(error, EINVAL,
382 RTE_FLOW_ERROR_TYPE_ACTION,
383 act, "Not supported action.");
388 /* must be input direction */
389 if (!attr->ingress) {
390 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
391 rte_flow_error_set(error, EINVAL,
392 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
393 attr, "Only support ingress.");
399 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
400 rte_flow_error_set(error, EINVAL,
401 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
402 attr, "Not support egress.");
406 if (attr->priority > 0xFFFF) {
407 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
408 rte_flow_error_set(error, EINVAL,
409 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
410 attr, "Error priority.");
413 filter->priority = (uint16_t)attr->priority;
414 if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
415 attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
416 filter->priority = 1;
421 /* a specific function for ixgbe because the flags is specific */
423 ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
424 const struct rte_flow_attr *attr,
425 const struct rte_flow_item pattern[],
426 const struct rte_flow_action actions[],
427 struct rte_eth_ntuple_filter *filter,
428 struct rte_flow_error *error)
431 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
433 MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
435 ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
440 /* Ixgbe doesn't support tcp flags. */
441 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
442 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
443 rte_flow_error_set(error, EINVAL,
444 RTE_FLOW_ERROR_TYPE_ITEM,
445 NULL, "Not supported by ntuple filter");
449 /* Ixgbe doesn't support many priorities. */
450 if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
451 filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
452 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
453 rte_flow_error_set(error, EINVAL,
454 RTE_FLOW_ERROR_TYPE_ITEM,
455 NULL, "Priority not supported by ntuple filter");
459 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
460 filter->priority > IXGBE_5TUPLE_MAX_PRI ||
461 filter->priority < IXGBE_5TUPLE_MIN_PRI)
464 /* fixed value for ixgbe */
465 filter->flags = RTE_5TUPLE_FLAGS;
470 * Parse the rule to see if it is a ethertype rule.
471 * And get the ethertype filter info BTW.
473 * The first not void item can be ETH.
474 * The next not void item must be END.
476 * The first not void action should be QUEUE.
477 * The next not void action should be END.
480 * ETH type 0x0807 0xFFFF
482 * other members in mask and spec should set to 0x00.
483 * item->last should be NULL.
486 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
487 const struct rte_flow_item *pattern,
488 const struct rte_flow_action *actions,
489 struct rte_eth_ethertype_filter *filter,
490 struct rte_flow_error *error)
492 const struct rte_flow_item *item;
493 const struct rte_flow_action *act;
494 const struct rte_flow_item_eth *eth_spec;
495 const struct rte_flow_item_eth *eth_mask;
496 const struct rte_flow_action_queue *act_q;
500 rte_flow_error_set(error, EINVAL,
501 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
502 NULL, "NULL pattern.");
507 rte_flow_error_set(error, EINVAL,
508 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
509 NULL, "NULL action.");
514 rte_flow_error_set(error, EINVAL,
515 RTE_FLOW_ERROR_TYPE_ATTR,
516 NULL, "NULL attribute.");
523 /* The first non-void item should be MAC. */
524 item = pattern + index;
525 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
527 item = pattern + index;
529 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
530 rte_flow_error_set(error, EINVAL,
531 RTE_FLOW_ERROR_TYPE_ITEM,
532 item, "Not supported by ethertype filter");
536 /*Not supported last point for range*/
538 rte_flow_error_set(error, EINVAL,
539 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
540 item, "Not supported last point for range");
544 /* Get the MAC info. */
545 if (!item->spec || !item->mask) {
546 rte_flow_error_set(error, EINVAL,
547 RTE_FLOW_ERROR_TYPE_ITEM,
548 item, "Not supported by ethertype filter");
552 eth_spec = (const struct rte_flow_item_eth *)item->spec;
553 eth_mask = (const struct rte_flow_item_eth *)item->mask;
555 /* Mask bits of source MAC address must be full of 0.
556 * Mask bits of destination MAC address must be full
559 if (!is_zero_ether_addr(ð_mask->src) ||
560 (!is_zero_ether_addr(ð_mask->dst) &&
561 !is_broadcast_ether_addr(ð_mask->dst))) {
562 rte_flow_error_set(error, EINVAL,
563 RTE_FLOW_ERROR_TYPE_ITEM,
564 item, "Invalid ether address mask");
568 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
569 rte_flow_error_set(error, EINVAL,
570 RTE_FLOW_ERROR_TYPE_ITEM,
571 item, "Invalid ethertype mask");
575 /* If mask bits of destination MAC address
576 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
578 if (is_broadcast_ether_addr(ð_mask->dst)) {
579 filter->mac_addr = eth_spec->dst;
580 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
582 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
584 filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
586 /* Check if the next non-void item is END. */
588 item = pattern + index;
589 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
591 item = pattern + index;
593 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
594 rte_flow_error_set(error, EINVAL,
595 RTE_FLOW_ERROR_TYPE_ITEM,
596 item, "Not supported by ethertype filter.");
603 /* Check if the first non-void action is QUEUE or DROP. */
604 act = actions + index;
605 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
607 act = actions + index;
609 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
610 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
611 rte_flow_error_set(error, EINVAL,
612 RTE_FLOW_ERROR_TYPE_ACTION,
613 act, "Not supported action.");
617 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
618 act_q = (const struct rte_flow_action_queue *)act->conf;
619 filter->queue = act_q->index;
621 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
624 /* Check if the next non-void item is END */
626 act = actions + index;
627 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
629 act = actions + index;
631 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
632 rte_flow_error_set(error, EINVAL,
633 RTE_FLOW_ERROR_TYPE_ACTION,
634 act, "Not supported action.");
639 /* Must be input direction */
640 if (!attr->ingress) {
641 rte_flow_error_set(error, EINVAL,
642 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
643 attr, "Only support ingress.");
649 rte_flow_error_set(error, EINVAL,
650 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
651 attr, "Not support egress.");
656 if (attr->priority) {
657 rte_flow_error_set(error, EINVAL,
658 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
659 attr, "Not support priority.");
665 rte_flow_error_set(error, EINVAL,
666 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
667 attr, "Not support group.");
675 ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
676 const struct rte_flow_attr *attr,
677 const struct rte_flow_item pattern[],
678 const struct rte_flow_action actions[],
679 struct rte_eth_ethertype_filter *filter,
680 struct rte_flow_error *error)
683 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
685 MAC_TYPE_FILTER_SUP(hw->mac.type);
687 ret = cons_parse_ethertype_filter(attr, pattern,
688 actions, filter, error);
693 /* Ixgbe doesn't support MAC address. */
694 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
695 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
696 rte_flow_error_set(error, EINVAL,
697 RTE_FLOW_ERROR_TYPE_ITEM,
698 NULL, "Not supported by ethertype filter");
702 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
703 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
704 rte_flow_error_set(error, EINVAL,
705 RTE_FLOW_ERROR_TYPE_ITEM,
706 NULL, "queue index much too big");
710 if (filter->ether_type == ETHER_TYPE_IPv4 ||
711 filter->ether_type == ETHER_TYPE_IPv6) {
712 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
713 rte_flow_error_set(error, EINVAL,
714 RTE_FLOW_ERROR_TYPE_ITEM,
715 NULL, "IPv4/IPv6 not supported by ethertype filter");
719 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
720 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
721 rte_flow_error_set(error, EINVAL,
722 RTE_FLOW_ERROR_TYPE_ITEM,
723 NULL, "mac compare is unsupported");
727 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
728 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
729 rte_flow_error_set(error, EINVAL,
730 RTE_FLOW_ERROR_TYPE_ITEM,
731 NULL, "drop option is unsupported");
739 * Parse the rule to see if it is a TCP SYN rule.
740 * And get the TCP SYN filter info BTW.
742 * The first not void item must be ETH.
743 * The second not void item must be IPV4 or IPV6.
744 * The third not void item must be TCP.
745 * The next not void item must be END.
747 * The first not void action should be QUEUE.
748 * The next not void action should be END.
752 * IPV4/IPV6 NULL NULL
753 * TCP tcp_flags 0x02 0xFF
755 * other members in mask and spec should set to 0x00.
756 * item->last should be NULL.
759 cons_parse_syn_filter(const struct rte_flow_attr *attr,
760 const struct rte_flow_item pattern[],
761 const struct rte_flow_action actions[],
762 struct rte_eth_syn_filter *filter,
763 struct rte_flow_error *error)
765 const struct rte_flow_item *item;
766 const struct rte_flow_action *act;
767 const struct rte_flow_item_tcp *tcp_spec;
768 const struct rte_flow_item_tcp *tcp_mask;
769 const struct rte_flow_action_queue *act_q;
773 rte_flow_error_set(error, EINVAL,
774 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
775 NULL, "NULL pattern.");
780 rte_flow_error_set(error, EINVAL,
781 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
782 NULL, "NULL action.");
787 rte_flow_error_set(error, EINVAL,
788 RTE_FLOW_ERROR_TYPE_ATTR,
789 NULL, "NULL attribute.");
796 /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
797 NEXT_ITEM_OF_PATTERN(item, pattern, index);
798 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
799 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
800 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
801 item->type != RTE_FLOW_ITEM_TYPE_TCP) {
802 rte_flow_error_set(error, EINVAL,
803 RTE_FLOW_ERROR_TYPE_ITEM,
804 item, "Not supported by syn filter");
807 /*Not supported last point for range*/
809 rte_flow_error_set(error, EINVAL,
810 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
811 item, "Not supported last point for range");
816 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
817 /* if the item is MAC, the content should be NULL */
818 if (item->spec || item->mask) {
819 rte_flow_error_set(error, EINVAL,
820 RTE_FLOW_ERROR_TYPE_ITEM,
821 item, "Invalid SYN address mask");
825 /* check if the next not void item is IPv4 or IPv6 */
827 NEXT_ITEM_OF_PATTERN(item, pattern, index);
828 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
829 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
830 rte_flow_error_set(error, EINVAL,
831 RTE_FLOW_ERROR_TYPE_ITEM,
832 item, "Not supported by syn filter");
838 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
839 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
840 /* if the item is IP, the content should be NULL */
841 if (item->spec || item->mask) {
842 rte_flow_error_set(error, EINVAL,
843 RTE_FLOW_ERROR_TYPE_ITEM,
844 item, "Invalid SYN mask");
848 /* check if the next not void item is TCP */
850 NEXT_ITEM_OF_PATTERN(item, pattern, index);
851 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
852 rte_flow_error_set(error, EINVAL,
853 RTE_FLOW_ERROR_TYPE_ITEM,
854 item, "Not supported by syn filter");
859 /* Get the TCP info. Only support SYN. */
860 if (!item->spec || !item->mask) {
861 rte_flow_error_set(error, EINVAL,
862 RTE_FLOW_ERROR_TYPE_ITEM,
863 item, "Invalid SYN mask");
866 /*Not supported last point for range*/
868 rte_flow_error_set(error, EINVAL,
869 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
870 item, "Not supported last point for range");
874 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
875 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
876 if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
877 tcp_mask->hdr.src_port ||
878 tcp_mask->hdr.dst_port ||
879 tcp_mask->hdr.sent_seq ||
880 tcp_mask->hdr.recv_ack ||
881 tcp_mask->hdr.data_off ||
882 tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
883 tcp_mask->hdr.rx_win ||
884 tcp_mask->hdr.cksum ||
885 tcp_mask->hdr.tcp_urp) {
886 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
887 rte_flow_error_set(error, EINVAL,
888 RTE_FLOW_ERROR_TYPE_ITEM,
889 item, "Not supported by syn filter");
893 /* check if the next not void item is END */
895 NEXT_ITEM_OF_PATTERN(item, pattern, index);
896 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
897 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
898 rte_flow_error_set(error, EINVAL,
899 RTE_FLOW_ERROR_TYPE_ITEM,
900 item, "Not supported by syn filter");
907 /* check if the first not void action is QUEUE. */
908 NEXT_ITEM_OF_ACTION(act, actions, index);
909 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
910 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
911 rte_flow_error_set(error, EINVAL,
912 RTE_FLOW_ERROR_TYPE_ACTION,
913 act, "Not supported action.");
917 act_q = (const struct rte_flow_action_queue *)act->conf;
918 filter->queue = act_q->index;
919 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
920 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
921 rte_flow_error_set(error, EINVAL,
922 RTE_FLOW_ERROR_TYPE_ACTION,
923 act, "Not supported action.");
927 /* check if the next not void item is END */
929 NEXT_ITEM_OF_ACTION(act, actions, index);
930 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
931 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
932 rte_flow_error_set(error, EINVAL,
933 RTE_FLOW_ERROR_TYPE_ACTION,
934 act, "Not supported action.");
939 /* must be input direction */
940 if (!attr->ingress) {
941 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
942 rte_flow_error_set(error, EINVAL,
943 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
944 attr, "Only support ingress.");
950 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
951 rte_flow_error_set(error, EINVAL,
952 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
953 attr, "Not support egress.");
957 /* Support 2 priorities, the lowest or highest. */
958 if (!attr->priority) {
960 } else if (attr->priority == (uint32_t)~0U) {
963 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
964 rte_flow_error_set(error, EINVAL,
965 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
966 attr, "Not support priority.");
974 ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
975 const struct rte_flow_attr *attr,
976 const struct rte_flow_item pattern[],
977 const struct rte_flow_action actions[],
978 struct rte_eth_syn_filter *filter,
979 struct rte_flow_error *error)
982 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
984 MAC_TYPE_FILTER_SUP(hw->mac.type);
986 ret = cons_parse_syn_filter(attr, pattern,
987 actions, filter, error);
996 * Parse the rule to see if it is a L2 tunnel rule.
997 * And get the L2 tunnel filter info BTW.
998 * Only support E-tag now.
1000 * The first not void item can be E_TAG.
1001 * The next not void item must be END.
1003 * The first not void action should be QUEUE.
1004 * The next not void action should be END.
1008 e_cid_base 0x309 0xFFF
1010 * other members in mask and spec should set to 0x00.
1011 * item->last should be NULL.
1014 cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
1015 const struct rte_flow_item pattern[],
1016 const struct rte_flow_action actions[],
1017 struct rte_eth_l2_tunnel_conf *filter,
1018 struct rte_flow_error *error)
1020 const struct rte_flow_item *item;
1021 const struct rte_flow_item_e_tag *e_tag_spec;
1022 const struct rte_flow_item_e_tag *e_tag_mask;
1023 const struct rte_flow_action *act;
1024 const struct rte_flow_action_queue *act_q;
1028 rte_flow_error_set(error, EINVAL,
1029 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1030 NULL, "NULL pattern.");
1035 rte_flow_error_set(error, EINVAL,
1036 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1037 NULL, "NULL action.");
1042 rte_flow_error_set(error, EINVAL,
1043 RTE_FLOW_ERROR_TYPE_ATTR,
1044 NULL, "NULL attribute.");
1050 /* The first not void item should be e-tag. */
1051 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1052 if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1053 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1054 rte_flow_error_set(error, EINVAL,
1055 RTE_FLOW_ERROR_TYPE_ITEM,
1056 item, "Not supported by L2 tunnel filter");
1060 if (!item->spec || !item->mask) {
1061 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1062 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1063 item, "Not supported by L2 tunnel filter");
1067 /*Not supported last point for range*/
1069 rte_flow_error_set(error, EINVAL,
1070 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1071 item, "Not supported last point for range");
1075 e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
1076 e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
1078 /* Only care about GRP and E cid base. */
1079 if (e_tag_mask->epcp_edei_in_ecid_b ||
1080 e_tag_mask->in_ecid_e ||
1081 e_tag_mask->ecid_e ||
1082 e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1083 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1084 rte_flow_error_set(error, EINVAL,
1085 RTE_FLOW_ERROR_TYPE_ITEM,
1086 item, "Not supported by L2 tunnel filter");
1090 filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1092 * grp and e_cid_base are bit fields and only use 14 bits.
1093 * e-tag id is taken as little endian by HW.
1095 filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1097 /* check if the next not void item is END */
1099 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1100 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1101 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1102 rte_flow_error_set(error, EINVAL,
1103 RTE_FLOW_ERROR_TYPE_ITEM,
1104 item, "Not supported by L2 tunnel filter");
1109 /* must be input direction */
1110 if (!attr->ingress) {
1111 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1112 rte_flow_error_set(error, EINVAL,
1113 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1114 attr, "Only support ingress.");
1120 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1121 rte_flow_error_set(error, EINVAL,
1122 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1123 attr, "Not support egress.");
1128 if (attr->priority) {
1129 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1130 rte_flow_error_set(error, EINVAL,
1131 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1132 attr, "Not support priority.");
1139 /* check if the first not void action is QUEUE. */
1140 NEXT_ITEM_OF_ACTION(act, actions, index);
1141 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1142 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1143 rte_flow_error_set(error, EINVAL,
1144 RTE_FLOW_ERROR_TYPE_ACTION,
1145 act, "Not supported action.");
1149 act_q = (const struct rte_flow_action_queue *)act->conf;
1150 filter->pool = act_q->index;
1152 /* check if the next not void item is END */
1154 NEXT_ITEM_OF_ACTION(act, actions, index);
1155 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1156 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1157 rte_flow_error_set(error, EINVAL,
1158 RTE_FLOW_ERROR_TYPE_ACTION,
1159 act, "Not supported action.");
1167 ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1168 const struct rte_flow_attr *attr,
1169 const struct rte_flow_item pattern[],
1170 const struct rte_flow_action actions[],
1171 struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1172 struct rte_flow_error *error)
1175 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1177 ret = cons_parse_l2_tn_filter(attr, pattern,
1178 actions, l2_tn_filter, error);
1180 if (hw->mac.type != ixgbe_mac_X550 &&
1181 hw->mac.type != ixgbe_mac_X550EM_x &&
1182 hw->mac.type != ixgbe_mac_X550EM_a) {
1183 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1184 rte_flow_error_set(error, EINVAL,
1185 RTE_FLOW_ERROR_TYPE_ITEM,
1186 NULL, "Not supported by L2 tunnel filter");
1193 /* Parse to get the attr and action info of flow director rule. */
1195 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1196 const struct rte_flow_action actions[],
1197 struct ixgbe_fdir_rule *rule,
1198 struct rte_flow_error *error)
1200 const struct rte_flow_action *act;
1201 const struct rte_flow_action_queue *act_q;
1202 const struct rte_flow_action_mark *mark;
1206 /* must be input direction */
1207 if (!attr->ingress) {
1208 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1209 rte_flow_error_set(error, EINVAL,
1210 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1211 attr, "Only support ingress.");
1217 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1218 rte_flow_error_set(error, EINVAL,
1219 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1220 attr, "Not support egress.");
1225 if (attr->priority) {
1226 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1227 rte_flow_error_set(error, EINVAL,
1228 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1229 attr, "Not support priority.");
1236 /* check if the first not void action is QUEUE or DROP. */
1237 NEXT_ITEM_OF_ACTION(act, actions, index);
1238 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1239 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1240 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1241 rte_flow_error_set(error, EINVAL,
1242 RTE_FLOW_ERROR_TYPE_ACTION,
1243 act, "Not supported action.");
1247 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1248 act_q = (const struct rte_flow_action_queue *)act->conf;
1249 rule->queue = act_q->index;
1251 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1254 /* check if the next not void item is MARK */
1256 NEXT_ITEM_OF_ACTION(act, actions, index);
1257 if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1258 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1259 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1260 rte_flow_error_set(error, EINVAL,
1261 RTE_FLOW_ERROR_TYPE_ACTION,
1262 act, "Not supported action.");
1268 if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1269 mark = (const struct rte_flow_action_mark *)act->conf;
1270 rule->soft_id = mark->id;
1272 NEXT_ITEM_OF_ACTION(act, actions, index);
1275 /* check if the next not void item is END */
1276 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1277 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1278 rte_flow_error_set(error, EINVAL,
1279 RTE_FLOW_ERROR_TYPE_ACTION,
1280 act, "Not supported action.");
1288 * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1289 * And get the flow director filter info BTW.
1290 * UDP/TCP/SCTP PATTERN:
1291 * The first not void item can be ETH or IPV4.
1292 * The second not void item must be IPV4 if the first one is ETH.
1293 * The third not void item must be UDP or TCP or SCTP.
1294 * The next not void item must be END.
1296 * The first not void item must be ETH.
1297 * The second not void item must be MAC VLAN.
1298 * The next not void item must be END.
1300 * The first not void action should be QUEUE or DROP.
1301 * The second not void optional action should be MARK,
1302 * mark_id is a uint32_t number.
1303 * The next not void action should be END.
1304 * UDP/TCP/SCTP pattern example:
1307 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
1308 * dst_addr 192.167.3.50 0xFFFFFFFF
1309 * UDP/TCP/SCTP src_port 80 0xFFFF
1310 * dst_port 80 0xFFFF
1312 * MAC VLAN pattern example:
1315 {0xAC, 0x7B, 0xA1, {0xFF, 0xFF, 0xFF,
1316 0x2C, 0x6D, 0x36} 0xFF, 0xFF, 0xFF}
1317 * MAC VLAN tci 0x2016 0xEFFF
1318 * tpid 0x8100 0xFFFF
1320 * Other members in mask and spec should set to 0x00.
1321 * Item->last should be NULL.
1324 ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
1325 const struct rte_flow_item pattern[],
1326 const struct rte_flow_action actions[],
1327 struct ixgbe_fdir_rule *rule,
1328 struct rte_flow_error *error)
1330 const struct rte_flow_item *item;
1331 const struct rte_flow_item_eth *eth_spec;
1332 const struct rte_flow_item_eth *eth_mask;
1333 const struct rte_flow_item_ipv4 *ipv4_spec;
1334 const struct rte_flow_item_ipv4 *ipv4_mask;
1335 const struct rte_flow_item_tcp *tcp_spec;
1336 const struct rte_flow_item_tcp *tcp_mask;
1337 const struct rte_flow_item_udp *udp_spec;
1338 const struct rte_flow_item_udp *udp_mask;
1339 const struct rte_flow_item_sctp *sctp_spec;
1340 const struct rte_flow_item_sctp *sctp_mask;
1341 const struct rte_flow_item_vlan *vlan_spec;
1342 const struct rte_flow_item_vlan *vlan_mask;
1347 rte_flow_error_set(error, EINVAL,
1348 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1349 NULL, "NULL pattern.");
1354 rte_flow_error_set(error, EINVAL,
1355 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1356 NULL, "NULL action.");
1361 rte_flow_error_set(error, EINVAL,
1362 RTE_FLOW_ERROR_TYPE_ATTR,
1363 NULL, "NULL attribute.");
1368 * Some fields may not be provided. Set spec to 0 and mask to default
1369 * value. So, we need not do anything for the not provided fields later.
1371 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1372 memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1373 rule->mask.vlan_tci_mask = 0;
1379 * The first not void item should be
1380 * MAC or IPv4 or TCP or UDP or SCTP.
1382 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1383 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1384 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1385 item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1386 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1387 item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1388 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1389 rte_flow_error_set(error, EINVAL,
1390 RTE_FLOW_ERROR_TYPE_ITEM,
1391 item, "Not supported by fdir filter");
1395 rule->mode = RTE_FDIR_MODE_PERFECT;
1397 /*Not supported last point for range*/
1399 rte_flow_error_set(error, EINVAL,
1400 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1401 item, "Not supported last point for range");
1405 /* Get the MAC info. */
1406 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1408 * Only support vlan and dst MAC address,
1409 * others should be masked.
1411 if (item->spec && !item->mask) {
1412 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1413 rte_flow_error_set(error, EINVAL,
1414 RTE_FLOW_ERROR_TYPE_ITEM,
1415 item, "Not supported by fdir filter");
1420 rule->b_spec = TRUE;
1421 eth_spec = (const struct rte_flow_item_eth *)item->spec;
1423 /* Get the dst MAC. */
1424 for (j = 0; j < ETHER_ADDR_LEN; j++) {
1425 rule->ixgbe_fdir.formatted.inner_mac[j] =
1426 eth_spec->dst.addr_bytes[j];
1432 /* If ethernet has meaning, it means MAC VLAN mode. */
1433 rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1435 rule->b_mask = TRUE;
1436 eth_mask = (const struct rte_flow_item_eth *)item->mask;
1438 /* Ether type should be masked. */
1439 if (eth_mask->type) {
1440 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1441 rte_flow_error_set(error, EINVAL,
1442 RTE_FLOW_ERROR_TYPE_ITEM,
1443 item, "Not supported by fdir filter");
1448 * src MAC address must be masked,
1449 * and don't support dst MAC address mask.
1451 for (j = 0; j < ETHER_ADDR_LEN; j++) {
1452 if (eth_mask->src.addr_bytes[j] ||
1453 eth_mask->dst.addr_bytes[j] != 0xFF) {
1455 sizeof(struct ixgbe_fdir_rule));
1456 rte_flow_error_set(error, EINVAL,
1457 RTE_FLOW_ERROR_TYPE_ITEM,
1458 item, "Not supported by fdir filter");
1463 /* When no VLAN, considered as full mask. */
1464 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1466 /*** If both spec and mask are item,
1467 * it means don't care about ETH.
1472 * Check if the next not void item is vlan or ipv4.
1473 * IPv6 is not supported.
1476 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1477 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1478 if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1479 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1480 rte_flow_error_set(error, EINVAL,
1481 RTE_FLOW_ERROR_TYPE_ITEM,
1482 item, "Not supported by fdir filter");
1486 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
1487 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1488 rte_flow_error_set(error, EINVAL,
1489 RTE_FLOW_ERROR_TYPE_ITEM,
1490 item, "Not supported by fdir filter");
1496 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1497 if (!(item->spec && item->mask)) {
1498 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1499 rte_flow_error_set(error, EINVAL,
1500 RTE_FLOW_ERROR_TYPE_ITEM,
1501 item, "Not supported by fdir filter");
1505 /*Not supported last point for range*/
1507 rte_flow_error_set(error, EINVAL,
1508 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1509 item, "Not supported last point for range");
1513 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
1514 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
1516 if (vlan_spec->tpid != rte_cpu_to_be_16(ETHER_TYPE_VLAN)) {
1517 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1518 rte_flow_error_set(error, EINVAL,
1519 RTE_FLOW_ERROR_TYPE_ITEM,
1520 item, "Not supported by fdir filter");
1524 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1526 if (vlan_mask->tpid != (uint16_t)~0U) {
1527 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1528 rte_flow_error_set(error, EINVAL,
1529 RTE_FLOW_ERROR_TYPE_ITEM,
1530 item, "Not supported by fdir filter");
1533 rule->mask.vlan_tci_mask = vlan_mask->tci;
1534 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
1535 /* More than one tags are not supported. */
1538 * Check if the next not void item is not vlan.
1541 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1542 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1543 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1544 rte_flow_error_set(error, EINVAL,
1545 RTE_FLOW_ERROR_TYPE_ITEM,
1546 item, "Not supported by fdir filter");
1548 } else if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1549 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1550 rte_flow_error_set(error, EINVAL,
1551 RTE_FLOW_ERROR_TYPE_ITEM,
1552 item, "Not supported by fdir filter");
1557 /* Get the IP info. */
1558 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1560 * Set the flow type even if there's no content
1561 * as we must have a flow type.
1563 rule->ixgbe_fdir.formatted.flow_type =
1564 IXGBE_ATR_FLOW_TYPE_IPV4;
1565 /*Not supported last point for range*/
1567 rte_flow_error_set(error, EINVAL,
1568 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1569 item, "Not supported last point for range");
1573 * Only care about src & dst addresses,
1574 * others should be masked.
1577 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1578 rte_flow_error_set(error, EINVAL,
1579 RTE_FLOW_ERROR_TYPE_ITEM,
1580 item, "Not supported by fdir filter");
1583 rule->b_mask = TRUE;
1585 (const struct rte_flow_item_ipv4 *)item->mask;
1586 if (ipv4_mask->hdr.version_ihl ||
1587 ipv4_mask->hdr.type_of_service ||
1588 ipv4_mask->hdr.total_length ||
1589 ipv4_mask->hdr.packet_id ||
1590 ipv4_mask->hdr.fragment_offset ||
1591 ipv4_mask->hdr.time_to_live ||
1592 ipv4_mask->hdr.next_proto_id ||
1593 ipv4_mask->hdr.hdr_checksum) {
1594 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1595 rte_flow_error_set(error, EINVAL,
1596 RTE_FLOW_ERROR_TYPE_ITEM,
1597 item, "Not supported by fdir filter");
1600 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1601 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1604 rule->b_spec = TRUE;
1606 (const struct rte_flow_item_ipv4 *)item->spec;
1607 rule->ixgbe_fdir.formatted.dst_ip[0] =
1608 ipv4_spec->hdr.dst_addr;
1609 rule->ixgbe_fdir.formatted.src_ip[0] =
1610 ipv4_spec->hdr.src_addr;
1614 * Check if the next not void item is
1615 * TCP or UDP or SCTP or END.
1618 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1619 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1620 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1621 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1622 item->type != RTE_FLOW_ITEM_TYPE_END) {
1623 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1624 rte_flow_error_set(error, EINVAL,
1625 RTE_FLOW_ERROR_TYPE_ITEM,
1626 item, "Not supported by fdir filter");
1631 /* Get the TCP info. */
1632 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1634 * Set the flow type even if there's no content
1635 * as we must have a flow type.
1637 rule->ixgbe_fdir.formatted.flow_type =
1638 IXGBE_ATR_FLOW_TYPE_TCPV4;
1639 /*Not supported last point for range*/
1641 rte_flow_error_set(error, EINVAL,
1642 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1643 item, "Not supported last point for range");
1647 * Only care about src & dst ports,
1648 * others should be masked.
1651 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1652 rte_flow_error_set(error, EINVAL,
1653 RTE_FLOW_ERROR_TYPE_ITEM,
1654 item, "Not supported by fdir filter");
1657 rule->b_mask = TRUE;
1658 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1659 if (tcp_mask->hdr.sent_seq ||
1660 tcp_mask->hdr.recv_ack ||
1661 tcp_mask->hdr.data_off ||
1662 tcp_mask->hdr.tcp_flags ||
1663 tcp_mask->hdr.rx_win ||
1664 tcp_mask->hdr.cksum ||
1665 tcp_mask->hdr.tcp_urp) {
1666 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1667 rte_flow_error_set(error, EINVAL,
1668 RTE_FLOW_ERROR_TYPE_ITEM,
1669 item, "Not supported by fdir filter");
1672 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1673 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1676 rule->b_spec = TRUE;
1677 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1678 rule->ixgbe_fdir.formatted.src_port =
1679 tcp_spec->hdr.src_port;
1680 rule->ixgbe_fdir.formatted.dst_port =
1681 tcp_spec->hdr.dst_port;
1685 /* Get the UDP info */
1686 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1688 * Set the flow type even if there's no content
1689 * as we must have a flow type.
1691 rule->ixgbe_fdir.formatted.flow_type =
1692 IXGBE_ATR_FLOW_TYPE_UDPV4;
1693 /*Not supported last point for range*/
1695 rte_flow_error_set(error, EINVAL,
1696 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1697 item, "Not supported last point for range");
1701 * Only care about src & dst ports,
1702 * others should be masked.
1705 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1706 rte_flow_error_set(error, EINVAL,
1707 RTE_FLOW_ERROR_TYPE_ITEM,
1708 item, "Not supported by fdir filter");
1711 rule->b_mask = TRUE;
1712 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1713 if (udp_mask->hdr.dgram_len ||
1714 udp_mask->hdr.dgram_cksum) {
1715 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1716 rte_flow_error_set(error, EINVAL,
1717 RTE_FLOW_ERROR_TYPE_ITEM,
1718 item, "Not supported by fdir filter");
1721 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1722 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1725 rule->b_spec = TRUE;
1726 udp_spec = (const struct rte_flow_item_udp *)item->spec;
1727 rule->ixgbe_fdir.formatted.src_port =
1728 udp_spec->hdr.src_port;
1729 rule->ixgbe_fdir.formatted.dst_port =
1730 udp_spec->hdr.dst_port;
1734 /* Get the SCTP info */
1735 if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
1737 * Set the flow type even if there's no content
1738 * as we must have a flow type.
1740 rule->ixgbe_fdir.formatted.flow_type =
1741 IXGBE_ATR_FLOW_TYPE_SCTPV4;
1742 /*Not supported last point for range*/
1744 rte_flow_error_set(error, EINVAL,
1745 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1746 item, "Not supported last point for range");
1750 * Only care about src & dst ports,
1751 * others should be masked.
1754 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1755 rte_flow_error_set(error, EINVAL,
1756 RTE_FLOW_ERROR_TYPE_ITEM,
1757 item, "Not supported by fdir filter");
1760 rule->b_mask = TRUE;
1762 (const struct rte_flow_item_sctp *)item->mask;
1763 if (sctp_mask->hdr.tag ||
1764 sctp_mask->hdr.cksum) {
1765 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1766 rte_flow_error_set(error, EINVAL,
1767 RTE_FLOW_ERROR_TYPE_ITEM,
1768 item, "Not supported by fdir filter");
1771 rule->mask.src_port_mask = sctp_mask->hdr.src_port;
1772 rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
1775 rule->b_spec = TRUE;
1777 (const struct rte_flow_item_sctp *)item->spec;
1778 rule->ixgbe_fdir.formatted.src_port =
1779 sctp_spec->hdr.src_port;
1780 rule->ixgbe_fdir.formatted.dst_port =
1781 sctp_spec->hdr.dst_port;
1785 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1786 /* check if the next not void item is END */
1788 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1789 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1790 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1791 rte_flow_error_set(error, EINVAL,
1792 RTE_FLOW_ERROR_TYPE_ITEM,
1793 item, "Not supported by fdir filter");
1798 return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
1801 #define NVGRE_PROTOCOL 0x6558
1804 * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
1805 * And get the flow director filter info BTW.
1807 * The first not void item must be ETH.
1808 * The second not void item must be IPV4/ IPV6.
1809 * The third not void item must be NVGRE.
1810 * The next not void item must be END.
1812 * The first not void item must be ETH.
1813 * The second not void item must be IPV4/ IPV6.
1814 * The third not void item must be NVGRE.
1815 * The next not void item must be END.
1817 * The first not void action should be QUEUE or DROP.
1818 * The second not void optional action should be MARK,
1819 * mark_id is a uint32_t number.
1820 * The next not void action should be END.
1821 * VxLAN pattern example:
1824 * IPV4/IPV6 NULL NULL
1826 * VxLAN vni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
1828 * NEGRV pattern example:
1831 * IPV4/IPV6 NULL NULL
1832 * NVGRE protocol 0x6558 0xFFFF
1833 * tni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
1835 * other members in mask and spec should set to 0x00.
1836 * item->last should be NULL.
1839 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
1840 const struct rte_flow_item pattern[],
1841 const struct rte_flow_action actions[],
1842 struct ixgbe_fdir_rule *rule,
1843 struct rte_flow_error *error)
1845 const struct rte_flow_item *item;
1846 const struct rte_flow_item_vxlan *vxlan_spec;
1847 const struct rte_flow_item_vxlan *vxlan_mask;
1848 const struct rte_flow_item_nvgre *nvgre_spec;
1849 const struct rte_flow_item_nvgre *nvgre_mask;
1850 const struct rte_flow_item_eth *eth_spec;
1851 const struct rte_flow_item_eth *eth_mask;
1852 const struct rte_flow_item_vlan *vlan_spec;
1853 const struct rte_flow_item_vlan *vlan_mask;
1857 rte_flow_error_set(error, EINVAL,
1858 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1859 NULL, "NULL pattern.");
1864 rte_flow_error_set(error, EINVAL,
1865 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1866 NULL, "NULL action.");
1871 rte_flow_error_set(error, EINVAL,
1872 RTE_FLOW_ERROR_TYPE_ATTR,
1873 NULL, "NULL attribute.");
1878 * Some fields may not be provided. Set spec to 0 and mask to default
1879 * value. So, we need not do anything for the not provided fields later.
1881 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1882 memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1883 rule->mask.vlan_tci_mask = 0;
1889 * The first not void item should be
1890 * MAC or IPv4 or IPv6 or UDP or VxLAN.
1892 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1893 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1894 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1895 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1896 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1897 item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
1898 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
1899 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1900 rte_flow_error_set(error, EINVAL,
1901 RTE_FLOW_ERROR_TYPE_ITEM,
1902 item, "Not supported by fdir filter");
1906 rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
1909 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1910 /* Only used to describe the protocol stack. */
1911 if (item->spec || item->mask) {
1912 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1913 rte_flow_error_set(error, EINVAL,
1914 RTE_FLOW_ERROR_TYPE_ITEM,
1915 item, "Not supported by fdir filter");
1918 /*Not supported last point for range*/
1920 rte_flow_error_set(error, EINVAL,
1921 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1922 item, "Not supported last point for range");
1926 /* Check if the next not void item is IPv4 or IPv6. */
1928 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1929 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1930 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
1931 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1932 rte_flow_error_set(error, EINVAL,
1933 RTE_FLOW_ERROR_TYPE_ITEM,
1934 item, "Not supported by fdir filter");
1940 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
1941 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1942 /* Only used to describe the protocol stack. */
1943 if (item->spec || item->mask) {
1944 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1945 rte_flow_error_set(error, EINVAL,
1946 RTE_FLOW_ERROR_TYPE_ITEM,
1947 item, "Not supported by fdir filter");
1950 /*Not supported last point for range*/
1952 rte_flow_error_set(error, EINVAL,
1953 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1954 item, "Not supported last point for range");
1958 /* Check if the next not void item is UDP or NVGRE. */
1960 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1961 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1962 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
1963 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1964 rte_flow_error_set(error, EINVAL,
1965 RTE_FLOW_ERROR_TYPE_ITEM,
1966 item, "Not supported by fdir filter");
1972 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1973 /* Only used to describe the protocol stack. */
1974 if (item->spec || item->mask) {
1975 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1976 rte_flow_error_set(error, EINVAL,
1977 RTE_FLOW_ERROR_TYPE_ITEM,
1978 item, "Not supported by fdir filter");
1981 /*Not supported last point for range*/
1983 rte_flow_error_set(error, EINVAL,
1984 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1985 item, "Not supported last point for range");
1989 /* Check if the next not void item is VxLAN. */
1991 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1992 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
1993 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1994 rte_flow_error_set(error, EINVAL,
1995 RTE_FLOW_ERROR_TYPE_ITEM,
1996 item, "Not supported by fdir filter");
2001 /* Get the VxLAN info */
2002 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2003 rule->ixgbe_fdir.formatted.tunnel_type =
2004 RTE_FDIR_TUNNEL_TYPE_VXLAN;
2006 /* Only care about VNI, others should be masked. */
2008 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2009 rte_flow_error_set(error, EINVAL,
2010 RTE_FLOW_ERROR_TYPE_ITEM,
2011 item, "Not supported by fdir filter");
2014 /*Not supported last point for range*/
2016 rte_flow_error_set(error, EINVAL,
2017 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2018 item, "Not supported last point for range");
2021 rule->b_mask = TRUE;
2023 /* Tunnel type is always meaningful. */
2024 rule->mask.tunnel_type_mask = 1;
2027 (const struct rte_flow_item_vxlan *)item->mask;
2028 if (vxlan_mask->flags) {
2029 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2030 rte_flow_error_set(error, EINVAL,
2031 RTE_FLOW_ERROR_TYPE_ITEM,
2032 item, "Not supported by fdir filter");
2035 /* VNI must be totally masked or not. */
2036 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2037 vxlan_mask->vni[2]) &&
2038 ((vxlan_mask->vni[0] != 0xFF) ||
2039 (vxlan_mask->vni[1] != 0xFF) ||
2040 (vxlan_mask->vni[2] != 0xFF))) {
2041 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2042 rte_flow_error_set(error, EINVAL,
2043 RTE_FLOW_ERROR_TYPE_ITEM,
2044 item, "Not supported by fdir filter");
2048 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2049 RTE_DIM(vxlan_mask->vni));
2052 rule->b_spec = TRUE;
2053 vxlan_spec = (const struct rte_flow_item_vxlan *)
2055 rte_memcpy(((uint8_t *)
2056 &rule->ixgbe_fdir.formatted.tni_vni + 1),
2057 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2058 rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
2059 rule->ixgbe_fdir.formatted.tni_vni);
2063 /* Get the NVGRE info */
2064 if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2065 rule->ixgbe_fdir.formatted.tunnel_type =
2066 RTE_FDIR_TUNNEL_TYPE_NVGRE;
2069 * Only care about flags0, flags1, protocol and TNI,
2070 * others should be masked.
2073 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2074 rte_flow_error_set(error, EINVAL,
2075 RTE_FLOW_ERROR_TYPE_ITEM,
2076 item, "Not supported by fdir filter");
2079 /*Not supported last point for range*/
2081 rte_flow_error_set(error, EINVAL,
2082 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2083 item, "Not supported last point for range");
2086 rule->b_mask = TRUE;
2088 /* Tunnel type is always meaningful. */
2089 rule->mask.tunnel_type_mask = 1;
2092 (const struct rte_flow_item_nvgre *)item->mask;
2093 if (nvgre_mask->flow_id) {
2094 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2095 rte_flow_error_set(error, EINVAL,
2096 RTE_FLOW_ERROR_TYPE_ITEM,
2097 item, "Not supported by fdir filter");
2100 if (nvgre_mask->c_k_s_rsvd0_ver !=
2101 rte_cpu_to_be_16(0x3000) ||
2102 nvgre_mask->protocol != 0xFFFF) {
2103 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2104 rte_flow_error_set(error, EINVAL,
2105 RTE_FLOW_ERROR_TYPE_ITEM,
2106 item, "Not supported by fdir filter");
2109 /* TNI must be totally masked or not. */
2110 if (nvgre_mask->tni[0] &&
2111 ((nvgre_mask->tni[0] != 0xFF) ||
2112 (nvgre_mask->tni[1] != 0xFF) ||
2113 (nvgre_mask->tni[2] != 0xFF))) {
2114 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2115 rte_flow_error_set(error, EINVAL,
2116 RTE_FLOW_ERROR_TYPE_ITEM,
2117 item, "Not supported by fdir filter");
2120 /* tni is a 24-bits bit field */
2121 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2122 RTE_DIM(nvgre_mask->tni));
2123 rule->mask.tunnel_id_mask <<= 8;
2126 rule->b_spec = TRUE;
2128 (const struct rte_flow_item_nvgre *)item->spec;
2129 if (nvgre_spec->c_k_s_rsvd0_ver !=
2130 rte_cpu_to_be_16(0x2000) ||
2131 nvgre_spec->protocol !=
2132 rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2133 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2134 rte_flow_error_set(error, EINVAL,
2135 RTE_FLOW_ERROR_TYPE_ITEM,
2136 item, "Not supported by fdir filter");
2139 /* tni is a 24-bits bit field */
2140 rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2141 nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2142 rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2146 /* check if the next not void item is MAC */
2148 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2149 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2150 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2151 rte_flow_error_set(error, EINVAL,
2152 RTE_FLOW_ERROR_TYPE_ITEM,
2153 item, "Not supported by fdir filter");
2158 * Only support vlan and dst MAC address,
2159 * others should be masked.
2163 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2164 rte_flow_error_set(error, EINVAL,
2165 RTE_FLOW_ERROR_TYPE_ITEM,
2166 item, "Not supported by fdir filter");
2169 /*Not supported last point for range*/
2171 rte_flow_error_set(error, EINVAL,
2172 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2173 item, "Not supported last point for range");
2176 rule->b_mask = TRUE;
2177 eth_mask = (const struct rte_flow_item_eth *)item->mask;
2179 /* Ether type should be masked. */
2180 if (eth_mask->type) {
2181 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2182 rte_flow_error_set(error, EINVAL,
2183 RTE_FLOW_ERROR_TYPE_ITEM,
2184 item, "Not supported by fdir filter");
2188 /* src MAC address should be masked. */
2189 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2190 if (eth_mask->src.addr_bytes[j]) {
2192 sizeof(struct ixgbe_fdir_rule));
2193 rte_flow_error_set(error, EINVAL,
2194 RTE_FLOW_ERROR_TYPE_ITEM,
2195 item, "Not supported by fdir filter");
2199 rule->mask.mac_addr_byte_mask = 0;
2200 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2201 /* It's a per byte mask. */
2202 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2203 rule->mask.mac_addr_byte_mask |= 0x1 << j;
2204 } else if (eth_mask->dst.addr_bytes[j]) {
2205 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2206 rte_flow_error_set(error, EINVAL,
2207 RTE_FLOW_ERROR_TYPE_ITEM,
2208 item, "Not supported by fdir filter");
2213 /* When no vlan, considered as full mask. */
2214 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2217 rule->b_spec = TRUE;
2218 eth_spec = (const struct rte_flow_item_eth *)item->spec;
2220 /* Get the dst MAC. */
2221 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2222 rule->ixgbe_fdir.formatted.inner_mac[j] =
2223 eth_spec->dst.addr_bytes[j];
2228 * Check if the next not void item is vlan or ipv4.
2229 * IPv6 is not supported.
2232 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2233 if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2234 (item->type != RTE_FLOW_ITEM_TYPE_VLAN)) {
2235 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2236 rte_flow_error_set(error, EINVAL,
2237 RTE_FLOW_ERROR_TYPE_ITEM,
2238 item, "Not supported by fdir filter");
2241 /*Not supported last point for range*/
2243 rte_flow_error_set(error, EINVAL,
2244 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2245 item, "Not supported last point for range");
2249 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2250 if (!(item->spec && item->mask)) {
2251 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2252 rte_flow_error_set(error, EINVAL,
2253 RTE_FLOW_ERROR_TYPE_ITEM,
2254 item, "Not supported by fdir filter");
2258 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
2259 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
2261 if (vlan_spec->tpid != rte_cpu_to_be_16(ETHER_TYPE_VLAN)) {
2262 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2263 rte_flow_error_set(error, EINVAL,
2264 RTE_FLOW_ERROR_TYPE_ITEM,
2265 item, "Not supported by fdir filter");
2269 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2271 if (vlan_mask->tpid != (uint16_t)~0U) {
2272 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2273 rte_flow_error_set(error, EINVAL,
2274 RTE_FLOW_ERROR_TYPE_ITEM,
2275 item, "Not supported by fdir filter");
2278 rule->mask.vlan_tci_mask = vlan_mask->tci;
2279 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
2280 /* More than one tags are not supported. */
2283 * Check if the next not void item is not vlan.
2286 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2287 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2288 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2289 rte_flow_error_set(error, EINVAL,
2290 RTE_FLOW_ERROR_TYPE_ITEM,
2291 item, "Not supported by fdir filter");
2293 } else if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2294 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2295 rte_flow_error_set(error, EINVAL,
2296 RTE_FLOW_ERROR_TYPE_ITEM,
2297 item, "Not supported by fdir filter");
2300 /* check if the next not void item is END */
2302 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2303 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2304 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2305 rte_flow_error_set(error, EINVAL,
2306 RTE_FLOW_ERROR_TYPE_ITEM,
2307 item, "Not supported by fdir filter");
2313 * If the tags is 0, it means don't care about the VLAN.
2317 return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2321 ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2322 const struct rte_flow_attr *attr,
2323 const struct rte_flow_item pattern[],
2324 const struct rte_flow_action actions[],
2325 struct ixgbe_fdir_rule *rule,
2326 struct rte_flow_error *error)
2329 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2330 enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2332 if (hw->mac.type != ixgbe_mac_82599EB &&
2333 hw->mac.type != ixgbe_mac_X540 &&
2334 hw->mac.type != ixgbe_mac_X550 &&
2335 hw->mac.type != ixgbe_mac_X550EM_x &&
2336 hw->mac.type != ixgbe_mac_X550EM_a)
2339 ret = ixgbe_parse_fdir_filter_normal(attr, pattern,
2340 actions, rule, error);
2345 ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2346 actions, rule, error);
2349 if (fdir_mode == RTE_FDIR_MODE_NONE ||
2350 fdir_mode != rule->mode)
2356 ixgbe_filterlist_flush(void)
2358 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2359 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2360 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2361 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2362 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2363 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2365 while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2366 TAILQ_REMOVE(&filter_ntuple_list,
2369 rte_free(ntuple_filter_ptr);
2372 while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2373 TAILQ_REMOVE(&filter_ethertype_list,
2374 ethertype_filter_ptr,
2376 rte_free(ethertype_filter_ptr);
2379 while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2380 TAILQ_REMOVE(&filter_syn_list,
2383 rte_free(syn_filter_ptr);
2386 while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2387 TAILQ_REMOVE(&filter_l2_tunnel_list,
2390 rte_free(l2_tn_filter_ptr);
2393 while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2394 TAILQ_REMOVE(&filter_fdir_list,
2397 rte_free(fdir_rule_ptr);
2400 while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
2401 TAILQ_REMOVE(&ixgbe_flow_list,
2404 rte_free(ixgbe_flow_mem_ptr->flow);
2405 rte_free(ixgbe_flow_mem_ptr);
2410 * Create or destroy a flow rule.
2411 * Theorically one rule can match more than one filters.
2412 * We will let it use the filter which it hitt first.
2413 * So, the sequence matters.
2415 static struct rte_flow *
2416 ixgbe_flow_create(struct rte_eth_dev *dev,
2417 const struct rte_flow_attr *attr,
2418 const struct rte_flow_item pattern[],
2419 const struct rte_flow_action actions[],
2420 struct rte_flow_error *error)
2423 struct rte_eth_ntuple_filter ntuple_filter;
2424 struct rte_eth_ethertype_filter ethertype_filter;
2425 struct rte_eth_syn_filter syn_filter;
2426 struct ixgbe_fdir_rule fdir_rule;
2427 struct rte_eth_l2_tunnel_conf l2_tn_filter;
2428 struct ixgbe_hw_fdir_info *fdir_info =
2429 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2430 struct rte_flow *flow = NULL;
2431 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2432 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2433 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2434 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2435 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2436 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2438 flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
2440 PMD_DRV_LOG(ERR, "failed to allocate memory");
2441 return (struct rte_flow *)flow;
2443 ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
2444 sizeof(struct ixgbe_flow_mem), 0);
2445 if (!ixgbe_flow_mem_ptr) {
2446 PMD_DRV_LOG(ERR, "failed to allocate memory");
2450 ixgbe_flow_mem_ptr->flow = flow;
2451 TAILQ_INSERT_TAIL(&ixgbe_flow_list,
2452 ixgbe_flow_mem_ptr, entries);
2454 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2455 ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2456 actions, &ntuple_filter, error);
2458 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2460 ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
2461 sizeof(struct ixgbe_ntuple_filter_ele), 0);
2462 (void)rte_memcpy(&ntuple_filter_ptr->filter_info,
2464 sizeof(struct rte_eth_ntuple_filter));
2465 TAILQ_INSERT_TAIL(&filter_ntuple_list,
2466 ntuple_filter_ptr, entries);
2467 flow->rule = ntuple_filter_ptr;
2468 flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2474 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2475 ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2476 actions, ðertype_filter, error);
2478 ret = ixgbe_add_del_ethertype_filter(dev,
2479 ðertype_filter, TRUE);
2481 ethertype_filter_ptr = rte_zmalloc(
2482 "ixgbe_ethertype_filter",
2483 sizeof(struct ixgbe_ethertype_filter_ele), 0);
2484 (void)rte_memcpy(ðertype_filter_ptr->filter_info,
2486 sizeof(struct rte_eth_ethertype_filter));
2487 TAILQ_INSERT_TAIL(&filter_ethertype_list,
2488 ethertype_filter_ptr, entries);
2489 flow->rule = ethertype_filter_ptr;
2490 flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2496 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2497 ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2498 actions, &syn_filter, error);
2500 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
2502 syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
2503 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
2504 (void)rte_memcpy(&syn_filter_ptr->filter_info,
2506 sizeof(struct rte_eth_syn_filter));
2507 TAILQ_INSERT_TAIL(&filter_syn_list,
2510 flow->rule = syn_filter_ptr;
2511 flow->filter_type = RTE_ETH_FILTER_SYN;
2517 memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2518 ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2519 actions, &fdir_rule, error);
2521 /* A mask cannot be deleted. */
2522 if (fdir_rule.b_mask) {
2523 if (!fdir_info->mask_added) {
2524 /* It's the first time the mask is set. */
2525 rte_memcpy(&fdir_info->mask,
2527 sizeof(struct ixgbe_hw_fdir_mask));
2528 ret = ixgbe_fdir_set_input_mask(dev);
2532 fdir_info->mask_added = TRUE;
2535 * Only support one global mask,
2536 * all the masks should be the same.
2538 ret = memcmp(&fdir_info->mask,
2540 sizeof(struct ixgbe_hw_fdir_mask));
2546 if (fdir_rule.b_spec) {
2547 ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
2550 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
2551 sizeof(struct ixgbe_fdir_rule_ele), 0);
2552 (void)rte_memcpy(&fdir_rule_ptr->filter_info,
2554 sizeof(struct ixgbe_fdir_rule));
2555 TAILQ_INSERT_TAIL(&filter_fdir_list,
2556 fdir_rule_ptr, entries);
2557 flow->rule = fdir_rule_ptr;
2558 flow->filter_type = RTE_ETH_FILTER_FDIR;
2570 memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2571 ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2572 actions, &l2_tn_filter, error);
2574 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
2576 l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
2577 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
2578 (void)rte_memcpy(&l2_tn_filter_ptr->filter_info,
2580 sizeof(struct rte_eth_l2_tunnel_conf));
2581 TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
2582 l2_tn_filter_ptr, entries);
2583 flow->rule = l2_tn_filter_ptr;
2584 flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
2590 TAILQ_REMOVE(&ixgbe_flow_list,
2591 ixgbe_flow_mem_ptr, entries);
2592 rte_free(ixgbe_flow_mem_ptr);
2598 * Check if the flow rule is supported by ixgbe.
2599 * It only checkes the format. Don't guarantee the rule can be programmed into
2600 * the HW. Because there can be no enough room for the rule.
2603 ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
2604 const struct rte_flow_attr *attr,
2605 const struct rte_flow_item pattern[],
2606 const struct rte_flow_action actions[],
2607 struct rte_flow_error *error)
2609 struct rte_eth_ntuple_filter ntuple_filter;
2610 struct rte_eth_ethertype_filter ethertype_filter;
2611 struct rte_eth_syn_filter syn_filter;
2612 struct rte_eth_l2_tunnel_conf l2_tn_filter;
2613 struct ixgbe_fdir_rule fdir_rule;
2616 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2617 ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2618 actions, &ntuple_filter, error);
2622 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2623 ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2624 actions, ðertype_filter, error);
2628 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2629 ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2630 actions, &syn_filter, error);
2634 memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2635 ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2636 actions, &fdir_rule, error);
2640 memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2641 ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2642 actions, &l2_tn_filter, error);
2647 /* Destroy a flow rule on ixgbe. */
2649 ixgbe_flow_destroy(struct rte_eth_dev *dev,
2650 struct rte_flow *flow,
2651 struct rte_flow_error *error)
2654 struct rte_flow *pmd_flow = flow;
2655 enum rte_filter_type filter_type = pmd_flow->filter_type;
2656 struct rte_eth_ntuple_filter ntuple_filter;
2657 struct rte_eth_ethertype_filter ethertype_filter;
2658 struct rte_eth_syn_filter syn_filter;
2659 struct ixgbe_fdir_rule fdir_rule;
2660 struct rte_eth_l2_tunnel_conf l2_tn_filter;
2661 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2662 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2663 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2664 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2665 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2666 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2668 switch (filter_type) {
2669 case RTE_ETH_FILTER_NTUPLE:
2670 ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
2672 (void)rte_memcpy(&ntuple_filter,
2673 &ntuple_filter_ptr->filter_info,
2674 sizeof(struct rte_eth_ntuple_filter));
2675 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
2677 TAILQ_REMOVE(&filter_ntuple_list,
2678 ntuple_filter_ptr, entries);
2679 rte_free(ntuple_filter_ptr);
2682 case RTE_ETH_FILTER_ETHERTYPE:
2683 ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
2685 (void)rte_memcpy(ðertype_filter,
2686 ðertype_filter_ptr->filter_info,
2687 sizeof(struct rte_eth_ethertype_filter));
2688 ret = ixgbe_add_del_ethertype_filter(dev,
2689 ðertype_filter, FALSE);
2691 TAILQ_REMOVE(&filter_ethertype_list,
2692 ethertype_filter_ptr, entries);
2693 rte_free(ethertype_filter_ptr);
2696 case RTE_ETH_FILTER_SYN:
2697 syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
2699 (void)rte_memcpy(&syn_filter,
2700 &syn_filter_ptr->filter_info,
2701 sizeof(struct rte_eth_syn_filter));
2702 ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
2704 TAILQ_REMOVE(&filter_syn_list,
2705 syn_filter_ptr, entries);
2706 rte_free(syn_filter_ptr);
2709 case RTE_ETH_FILTER_FDIR:
2710 fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
2711 (void)rte_memcpy(&fdir_rule,
2712 &fdir_rule_ptr->filter_info,
2713 sizeof(struct ixgbe_fdir_rule));
2714 ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
2716 TAILQ_REMOVE(&filter_fdir_list,
2717 fdir_rule_ptr, entries);
2718 rte_free(fdir_rule_ptr);
2721 case RTE_ETH_FILTER_L2_TUNNEL:
2722 l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
2724 (void)rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
2725 sizeof(struct rte_eth_l2_tunnel_conf));
2726 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
2728 TAILQ_REMOVE(&filter_l2_tunnel_list,
2729 l2_tn_filter_ptr, entries);
2730 rte_free(l2_tn_filter_ptr);
2734 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
2741 rte_flow_error_set(error, EINVAL,
2742 RTE_FLOW_ERROR_TYPE_HANDLE,
2743 NULL, "Failed to destroy flow");
2747 TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
2748 if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
2749 TAILQ_REMOVE(&ixgbe_flow_list,
2750 ixgbe_flow_mem_ptr, entries);
2751 rte_free(ixgbe_flow_mem_ptr);
2759 /* Destroy all flow rules associated with a port on ixgbe. */
2761 ixgbe_flow_flush(struct rte_eth_dev *dev,
2762 struct rte_flow_error *error)
2766 ixgbe_clear_all_ntuple_filter(dev);
2767 ixgbe_clear_all_ethertype_filter(dev);
2768 ixgbe_clear_syn_filter(dev);
2770 ret = ixgbe_clear_all_fdir_filter(dev);
2772 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
2773 NULL, "Failed to flush rule");
2777 ret = ixgbe_clear_all_l2_tn_filter(dev);
2779 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
2780 NULL, "Failed to flush rule");
2784 ixgbe_filterlist_flush();
2789 const struct rte_flow_ops ixgbe_flow_ops = {
2790 ixgbe_flow_validate,