4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
47 #include <rte_interrupts.h>
49 #include <rte_debug.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_atomic.h>
60 #include <rte_malloc.h>
61 #include <rte_random.h>
63 #include <rte_hash_crc.h>
65 #include <rte_flow_driver.h>
67 #include "ixgbe_logs.h"
68 #include "base/ixgbe_api.h"
69 #include "base/ixgbe_vf.h"
70 #include "base/ixgbe_common.h"
71 #include "ixgbe_ethdev.h"
72 #include "ixgbe_bypass.h"
73 #include "ixgbe_rxtx.h"
74 #include "base/ixgbe_type.h"
75 #include "base/ixgbe_phy.h"
76 #include "rte_pmd_ixgbe.h"
79 #define IXGBE_MIN_N_TUPLE_PRIO 1
80 #define IXGBE_MAX_N_TUPLE_PRIO 7
81 #define NEXT_ITEM_OF_PATTERN(item, pattern, index)\
83 item = pattern + index;\
84 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\
86 item = pattern + index; \
90 #define NEXT_ITEM_OF_ACTION(act, actions, index)\
92 act = actions + index; \
93 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
95 act = actions + index; \
100 * Please aware there's an asumption for all the parsers.
101 * rte_flow_item is using big endian, rte_flow_attr and
102 * rte_flow_action are using CPU order.
103 * Because the pattern is used to describe the packets,
104 * normally the packets should use network order.
108 * Parse the rule to see if it is a n-tuple rule.
109 * And get the n-tuple filter info BTW.
111 * The first not void item can be ETH or IPV4.
112 * The second not void item must be IPV4 if the first one is ETH.
113 * The third not void item must be UDP or TCP.
114 * The next not void item must be END.
116 * The first not void action should be QUEUE.
117 * The next not void action should be END.
121 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
122 * dst_addr 192.167.3.50 0xFFFFFFFF
123 * next_proto_id 17 0xFF
124 * UDP/TCP/ src_port 80 0xFFFF
125 * SCTP dst_port 80 0xFFFF
127 * other members in mask and spec should set to 0x00.
128 * item->last should be NULL.
131 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
132 const struct rte_flow_item pattern[],
133 const struct rte_flow_action actions[],
134 struct rte_eth_ntuple_filter *filter,
135 struct rte_flow_error *error)
137 const struct rte_flow_item *item;
138 const struct rte_flow_action *act;
139 const struct rte_flow_item_ipv4 *ipv4_spec;
140 const struct rte_flow_item_ipv4 *ipv4_mask;
141 const struct rte_flow_item_tcp *tcp_spec;
142 const struct rte_flow_item_tcp *tcp_mask;
143 const struct rte_flow_item_udp *udp_spec;
144 const struct rte_flow_item_udp *udp_mask;
145 const struct rte_flow_item_sctp *sctp_spec;
146 const struct rte_flow_item_sctp *sctp_mask;
150 rte_flow_error_set(error,
151 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
152 NULL, "NULL pattern.");
157 rte_flow_error_set(error, EINVAL,
158 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
159 NULL, "NULL action.");
163 rte_flow_error_set(error, EINVAL,
164 RTE_FLOW_ERROR_TYPE_ATTR,
165 NULL, "NULL attribute.");
172 /* the first not void item can be MAC or IPv4 */
173 NEXT_ITEM_OF_PATTERN(item, pattern, index);
175 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
176 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
177 rte_flow_error_set(error, EINVAL,
178 RTE_FLOW_ERROR_TYPE_ITEM,
179 item, "Not supported by ntuple filter");
183 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
184 /*Not supported last point for range*/
186 rte_flow_error_set(error,
188 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
189 item, "Not supported last point for range");
193 /* if the first item is MAC, the content should be NULL */
194 if (item->spec || item->mask) {
195 rte_flow_error_set(error, EINVAL,
196 RTE_FLOW_ERROR_TYPE_ITEM,
197 item, "Not supported by ntuple filter");
200 /* check if the next not void item is IPv4 */
202 NEXT_ITEM_OF_PATTERN(item, pattern, index);
203 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
204 rte_flow_error_set(error,
205 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
206 item, "Not supported by ntuple filter");
211 /* get the IPv4 info */
212 if (!item->spec || !item->mask) {
213 rte_flow_error_set(error, EINVAL,
214 RTE_FLOW_ERROR_TYPE_ITEM,
215 item, "Invalid ntuple mask");
218 /*Not supported last point for range*/
220 rte_flow_error_set(error, EINVAL,
221 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
222 item, "Not supported last point for range");
227 ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
229 * Only support src & dst addresses, protocol,
230 * others should be masked.
232 if (ipv4_mask->hdr.version_ihl ||
233 ipv4_mask->hdr.type_of_service ||
234 ipv4_mask->hdr.total_length ||
235 ipv4_mask->hdr.packet_id ||
236 ipv4_mask->hdr.fragment_offset ||
237 ipv4_mask->hdr.time_to_live ||
238 ipv4_mask->hdr.hdr_checksum) {
239 rte_flow_error_set(error,
240 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
241 item, "Not supported by ntuple filter");
245 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
246 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
247 filter->proto_mask = ipv4_mask->hdr.next_proto_id;
249 ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
250 filter->dst_ip = ipv4_spec->hdr.dst_addr;
251 filter->src_ip = ipv4_spec->hdr.src_addr;
252 filter->proto = ipv4_spec->hdr.next_proto_id;
254 /* check if the next not void item is TCP or UDP */
256 NEXT_ITEM_OF_PATTERN(item, pattern, index);
257 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
258 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
259 item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
260 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
261 rte_flow_error_set(error, EINVAL,
262 RTE_FLOW_ERROR_TYPE_ITEM,
263 item, "Not supported by ntuple filter");
267 /* get the TCP/UDP info */
268 if (!item->spec || !item->mask) {
269 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
270 rte_flow_error_set(error, EINVAL,
271 RTE_FLOW_ERROR_TYPE_ITEM,
272 item, "Invalid ntuple mask");
276 /*Not supported last point for range*/
278 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
279 rte_flow_error_set(error, EINVAL,
280 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
281 item, "Not supported last point for range");
286 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
287 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
290 * Only support src & dst ports, tcp flags,
291 * others should be masked.
293 if (tcp_mask->hdr.sent_seq ||
294 tcp_mask->hdr.recv_ack ||
295 tcp_mask->hdr.data_off ||
296 tcp_mask->hdr.rx_win ||
297 tcp_mask->hdr.cksum ||
298 tcp_mask->hdr.tcp_urp) {
300 sizeof(struct rte_eth_ntuple_filter));
301 rte_flow_error_set(error, EINVAL,
302 RTE_FLOW_ERROR_TYPE_ITEM,
303 item, "Not supported by ntuple filter");
307 filter->dst_port_mask = tcp_mask->hdr.dst_port;
308 filter->src_port_mask = tcp_mask->hdr.src_port;
309 if (tcp_mask->hdr.tcp_flags == 0xFF) {
310 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
311 } else if (!tcp_mask->hdr.tcp_flags) {
312 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
314 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
315 rte_flow_error_set(error, EINVAL,
316 RTE_FLOW_ERROR_TYPE_ITEM,
317 item, "Not supported by ntuple filter");
321 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
322 filter->dst_port = tcp_spec->hdr.dst_port;
323 filter->src_port = tcp_spec->hdr.src_port;
324 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
325 } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
326 udp_mask = (const struct rte_flow_item_udp *)item->mask;
329 * Only support src & dst ports,
330 * others should be masked.
332 if (udp_mask->hdr.dgram_len ||
333 udp_mask->hdr.dgram_cksum) {
335 sizeof(struct rte_eth_ntuple_filter));
336 rte_flow_error_set(error, EINVAL,
337 RTE_FLOW_ERROR_TYPE_ITEM,
338 item, "Not supported by ntuple filter");
342 filter->dst_port_mask = udp_mask->hdr.dst_port;
343 filter->src_port_mask = udp_mask->hdr.src_port;
345 udp_spec = (const struct rte_flow_item_udp *)item->spec;
346 filter->dst_port = udp_spec->hdr.dst_port;
347 filter->src_port = udp_spec->hdr.src_port;
349 sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
352 * Only support src & dst ports,
353 * others should be masked.
355 if (sctp_mask->hdr.tag ||
356 sctp_mask->hdr.cksum) {
358 sizeof(struct rte_eth_ntuple_filter));
359 rte_flow_error_set(error, EINVAL,
360 RTE_FLOW_ERROR_TYPE_ITEM,
361 item, "Not supported by ntuple filter");
365 filter->dst_port_mask = sctp_mask->hdr.dst_port;
366 filter->src_port_mask = sctp_mask->hdr.src_port;
368 sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
369 filter->dst_port = sctp_spec->hdr.dst_port;
370 filter->src_port = sctp_spec->hdr.src_port;
373 /* check if the next not void item is END */
375 NEXT_ITEM_OF_PATTERN(item, pattern, index);
376 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
377 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
378 rte_flow_error_set(error, EINVAL,
379 RTE_FLOW_ERROR_TYPE_ITEM,
380 item, "Not supported by ntuple filter");
388 * n-tuple only supports forwarding,
389 * check if the first not void action is QUEUE.
391 NEXT_ITEM_OF_ACTION(act, actions, index);
392 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
393 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
394 rte_flow_error_set(error, EINVAL,
395 RTE_FLOW_ERROR_TYPE_ACTION,
396 item, "Not supported action.");
400 ((const struct rte_flow_action_queue *)act->conf)->index;
402 /* check if the next not void item is END */
404 NEXT_ITEM_OF_ACTION(act, actions, index);
405 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
406 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
407 rte_flow_error_set(error, EINVAL,
408 RTE_FLOW_ERROR_TYPE_ACTION,
409 act, "Not supported action.");
414 /* must be input direction */
415 if (!attr->ingress) {
416 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
417 rte_flow_error_set(error, EINVAL,
418 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
419 attr, "Only support ingress.");
425 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
426 rte_flow_error_set(error, EINVAL,
427 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
428 attr, "Not support egress.");
432 if (attr->priority > 0xFFFF) {
433 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
434 rte_flow_error_set(error, EINVAL,
435 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
436 attr, "Error priority.");
439 filter->priority = (uint16_t)attr->priority;
440 if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
441 attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
442 filter->priority = 1;
447 /* a specific function for ixgbe because the flags is specific */
449 ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
450 const struct rte_flow_attr *attr,
451 const struct rte_flow_item pattern[],
452 const struct rte_flow_action actions[],
453 struct rte_eth_ntuple_filter *filter,
454 struct rte_flow_error *error)
457 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
459 MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
461 ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
466 /* Ixgbe doesn't support tcp flags. */
467 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
468 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
469 rte_flow_error_set(error, EINVAL,
470 RTE_FLOW_ERROR_TYPE_ITEM,
471 NULL, "Not supported by ntuple filter");
475 /* Ixgbe doesn't support many priorities. */
476 if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
477 filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
478 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
479 rte_flow_error_set(error, EINVAL,
480 RTE_FLOW_ERROR_TYPE_ITEM,
481 NULL, "Priority not supported by ntuple filter");
485 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
486 filter->priority > IXGBE_5TUPLE_MAX_PRI ||
487 filter->priority < IXGBE_5TUPLE_MIN_PRI)
490 /* fixed value for ixgbe */
491 filter->flags = RTE_5TUPLE_FLAGS;
496 * Parse the rule to see if it is a ethertype rule.
497 * And get the ethertype filter info BTW.
499 * The first not void item can be ETH.
500 * The next not void item must be END.
502 * The first not void action should be QUEUE.
503 * The next not void action should be END.
506 * ETH type 0x0807 0xFFFF
508 * other members in mask and spec should set to 0x00.
509 * item->last should be NULL.
512 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
513 const struct rte_flow_item *pattern,
514 const struct rte_flow_action *actions,
515 struct rte_eth_ethertype_filter *filter,
516 struct rte_flow_error *error)
518 const struct rte_flow_item *item;
519 const struct rte_flow_action *act;
520 const struct rte_flow_item_eth *eth_spec;
521 const struct rte_flow_item_eth *eth_mask;
522 const struct rte_flow_action_queue *act_q;
526 rte_flow_error_set(error, EINVAL,
527 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
528 NULL, "NULL pattern.");
533 rte_flow_error_set(error, EINVAL,
534 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
535 NULL, "NULL action.");
540 rte_flow_error_set(error, EINVAL,
541 RTE_FLOW_ERROR_TYPE_ATTR,
542 NULL, "NULL attribute.");
549 /* The first non-void item should be MAC. */
550 item = pattern + index;
551 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
553 item = pattern + index;
555 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
556 rte_flow_error_set(error, EINVAL,
557 RTE_FLOW_ERROR_TYPE_ITEM,
558 item, "Not supported by ethertype filter");
562 /*Not supported last point for range*/
564 rte_flow_error_set(error, EINVAL,
565 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
566 item, "Not supported last point for range");
570 /* Get the MAC info. */
571 if (!item->spec || !item->mask) {
572 rte_flow_error_set(error, EINVAL,
573 RTE_FLOW_ERROR_TYPE_ITEM,
574 item, "Not supported by ethertype filter");
578 eth_spec = (const struct rte_flow_item_eth *)item->spec;
579 eth_mask = (const struct rte_flow_item_eth *)item->mask;
581 /* Mask bits of source MAC address must be full of 0.
582 * Mask bits of destination MAC address must be full
585 if (!is_zero_ether_addr(ð_mask->src) ||
586 (!is_zero_ether_addr(ð_mask->dst) &&
587 !is_broadcast_ether_addr(ð_mask->dst))) {
588 rte_flow_error_set(error, EINVAL,
589 RTE_FLOW_ERROR_TYPE_ITEM,
590 item, "Invalid ether address mask");
594 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
595 rte_flow_error_set(error, EINVAL,
596 RTE_FLOW_ERROR_TYPE_ITEM,
597 item, "Invalid ethertype mask");
601 /* If mask bits of destination MAC address
602 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
604 if (is_broadcast_ether_addr(ð_mask->dst)) {
605 filter->mac_addr = eth_spec->dst;
606 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
608 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
610 filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
612 /* Check if the next non-void item is END. */
614 item = pattern + index;
615 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
617 item = pattern + index;
619 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
620 rte_flow_error_set(error, EINVAL,
621 RTE_FLOW_ERROR_TYPE_ITEM,
622 item, "Not supported by ethertype filter.");
629 /* Check if the first non-void action is QUEUE or DROP. */
630 act = actions + index;
631 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
633 act = actions + index;
635 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
636 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
637 rte_flow_error_set(error, EINVAL,
638 RTE_FLOW_ERROR_TYPE_ACTION,
639 act, "Not supported action.");
643 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
644 act_q = (const struct rte_flow_action_queue *)act->conf;
645 filter->queue = act_q->index;
647 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
650 /* Check if the next non-void item is END */
652 act = actions + index;
653 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
655 act = actions + index;
657 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
658 rte_flow_error_set(error, EINVAL,
659 RTE_FLOW_ERROR_TYPE_ACTION,
660 act, "Not supported action.");
665 /* Must be input direction */
666 if (!attr->ingress) {
667 rte_flow_error_set(error, EINVAL,
668 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
669 attr, "Only support ingress.");
675 rte_flow_error_set(error, EINVAL,
676 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
677 attr, "Not support egress.");
682 if (attr->priority) {
683 rte_flow_error_set(error, EINVAL,
684 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
685 attr, "Not support priority.");
691 rte_flow_error_set(error, EINVAL,
692 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
693 attr, "Not support group.");
701 ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
702 const struct rte_flow_attr *attr,
703 const struct rte_flow_item pattern[],
704 const struct rte_flow_action actions[],
705 struct rte_eth_ethertype_filter *filter,
706 struct rte_flow_error *error)
709 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
711 MAC_TYPE_FILTER_SUP(hw->mac.type);
713 ret = cons_parse_ethertype_filter(attr, pattern,
714 actions, filter, error);
719 /* Ixgbe doesn't support MAC address. */
720 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
721 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
722 rte_flow_error_set(error, EINVAL,
723 RTE_FLOW_ERROR_TYPE_ITEM,
724 NULL, "Not supported by ethertype filter");
728 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
729 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
730 rte_flow_error_set(error, EINVAL,
731 RTE_FLOW_ERROR_TYPE_ITEM,
732 NULL, "queue index much too big");
736 if (filter->ether_type == ETHER_TYPE_IPv4 ||
737 filter->ether_type == ETHER_TYPE_IPv6) {
738 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
739 rte_flow_error_set(error, EINVAL,
740 RTE_FLOW_ERROR_TYPE_ITEM,
741 NULL, "IPv4/IPv6 not supported by ethertype filter");
745 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
746 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
747 rte_flow_error_set(error, EINVAL,
748 RTE_FLOW_ERROR_TYPE_ITEM,
749 NULL, "mac compare is unsupported");
753 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
754 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
755 rte_flow_error_set(error, EINVAL,
756 RTE_FLOW_ERROR_TYPE_ITEM,
757 NULL, "drop option is unsupported");
765 * Parse the rule to see if it is a TCP SYN rule.
766 * And get the TCP SYN filter info BTW.
768 * The first not void item must be ETH.
769 * The second not void item must be IPV4 or IPV6.
770 * The third not void item must be TCP.
771 * The next not void item must be END.
773 * The first not void action should be QUEUE.
774 * The next not void action should be END.
778 * IPV4/IPV6 NULL NULL
779 * TCP tcp_flags 0x02 0xFF
781 * other members in mask and spec should set to 0x00.
782 * item->last should be NULL.
785 cons_parse_syn_filter(const struct rte_flow_attr *attr,
786 const struct rte_flow_item pattern[],
787 const struct rte_flow_action actions[],
788 struct rte_eth_syn_filter *filter,
789 struct rte_flow_error *error)
791 const struct rte_flow_item *item;
792 const struct rte_flow_action *act;
793 const struct rte_flow_item_tcp *tcp_spec;
794 const struct rte_flow_item_tcp *tcp_mask;
795 const struct rte_flow_action_queue *act_q;
799 rte_flow_error_set(error, EINVAL,
800 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
801 NULL, "NULL pattern.");
806 rte_flow_error_set(error, EINVAL,
807 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
808 NULL, "NULL action.");
813 rte_flow_error_set(error, EINVAL,
814 RTE_FLOW_ERROR_TYPE_ATTR,
815 NULL, "NULL attribute.");
822 /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
823 NEXT_ITEM_OF_PATTERN(item, pattern, index);
824 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
825 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
826 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
827 item->type != RTE_FLOW_ITEM_TYPE_TCP) {
828 rte_flow_error_set(error, EINVAL,
829 RTE_FLOW_ERROR_TYPE_ITEM,
830 item, "Not supported by syn filter");
833 /*Not supported last point for range*/
835 rte_flow_error_set(error, EINVAL,
836 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
837 item, "Not supported last point for range");
842 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
843 /* if the item is MAC, the content should be NULL */
844 if (item->spec || item->mask) {
845 rte_flow_error_set(error, EINVAL,
846 RTE_FLOW_ERROR_TYPE_ITEM,
847 item, "Invalid SYN address mask");
851 /* check if the next not void item is IPv4 or IPv6 */
853 NEXT_ITEM_OF_PATTERN(item, pattern, index);
854 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
855 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
856 rte_flow_error_set(error, EINVAL,
857 RTE_FLOW_ERROR_TYPE_ITEM,
858 item, "Not supported by syn filter");
864 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
865 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
866 /* if the item is IP, the content should be NULL */
867 if (item->spec || item->mask) {
868 rte_flow_error_set(error, EINVAL,
869 RTE_FLOW_ERROR_TYPE_ITEM,
870 item, "Invalid SYN mask");
874 /* check if the next not void item is TCP */
876 NEXT_ITEM_OF_PATTERN(item, pattern, index);
877 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
878 rte_flow_error_set(error, EINVAL,
879 RTE_FLOW_ERROR_TYPE_ITEM,
880 item, "Not supported by syn filter");
885 /* Get the TCP info. Only support SYN. */
886 if (!item->spec || !item->mask) {
887 rte_flow_error_set(error, EINVAL,
888 RTE_FLOW_ERROR_TYPE_ITEM,
889 item, "Invalid SYN mask");
892 /*Not supported last point for range*/
894 rte_flow_error_set(error, EINVAL,
895 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
896 item, "Not supported last point for range");
900 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
901 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
902 if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
903 tcp_mask->hdr.src_port ||
904 tcp_mask->hdr.dst_port ||
905 tcp_mask->hdr.sent_seq ||
906 tcp_mask->hdr.recv_ack ||
907 tcp_mask->hdr.data_off ||
908 tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
909 tcp_mask->hdr.rx_win ||
910 tcp_mask->hdr.cksum ||
911 tcp_mask->hdr.tcp_urp) {
912 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
913 rte_flow_error_set(error, EINVAL,
914 RTE_FLOW_ERROR_TYPE_ITEM,
915 item, "Not supported by syn filter");
919 /* check if the next not void item is END */
921 NEXT_ITEM_OF_PATTERN(item, pattern, index);
922 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
923 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
924 rte_flow_error_set(error, EINVAL,
925 RTE_FLOW_ERROR_TYPE_ITEM,
926 item, "Not supported by syn filter");
933 /* check if the first not void action is QUEUE. */
934 NEXT_ITEM_OF_ACTION(act, actions, index);
935 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
936 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
937 rte_flow_error_set(error, EINVAL,
938 RTE_FLOW_ERROR_TYPE_ACTION,
939 act, "Not supported action.");
943 act_q = (const struct rte_flow_action_queue *)act->conf;
944 filter->queue = act_q->index;
945 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
946 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
947 rte_flow_error_set(error, EINVAL,
948 RTE_FLOW_ERROR_TYPE_ACTION,
949 act, "Not supported action.");
953 /* check if the next not void item is END */
955 NEXT_ITEM_OF_ACTION(act, actions, index);
956 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
957 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
958 rte_flow_error_set(error, EINVAL,
959 RTE_FLOW_ERROR_TYPE_ACTION,
960 act, "Not supported action.");
965 /* must be input direction */
966 if (!attr->ingress) {
967 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
968 rte_flow_error_set(error, EINVAL,
969 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
970 attr, "Only support ingress.");
976 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
977 rte_flow_error_set(error, EINVAL,
978 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
979 attr, "Not support egress.");
983 /* Support 2 priorities, the lowest or highest. */
984 if (!attr->priority) {
986 } else if (attr->priority == (uint32_t)~0U) {
989 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
990 rte_flow_error_set(error, EINVAL,
991 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
992 attr, "Not support priority.");
1000 ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
1001 const struct rte_flow_attr *attr,
1002 const struct rte_flow_item pattern[],
1003 const struct rte_flow_action actions[],
1004 struct rte_eth_syn_filter *filter,
1005 struct rte_flow_error *error)
1008 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1010 MAC_TYPE_FILTER_SUP(hw->mac.type);
1012 ret = cons_parse_syn_filter(attr, pattern,
1013 actions, filter, error);
1022 * Parse the rule to see if it is a L2 tunnel rule.
1023 * And get the L2 tunnel filter info BTW.
1024 * Only support E-tag now.
1026 * The first not void item can be E_TAG.
1027 * The next not void item must be END.
1029 * The first not void action should be QUEUE.
1030 * The next not void action should be END.
1034 e_cid_base 0x309 0xFFF
1036 * other members in mask and spec should set to 0x00.
1037 * item->last should be NULL.
1040 cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
1041 const struct rte_flow_item pattern[],
1042 const struct rte_flow_action actions[],
1043 struct rte_eth_l2_tunnel_conf *filter,
1044 struct rte_flow_error *error)
1046 const struct rte_flow_item *item;
1047 const struct rte_flow_item_e_tag *e_tag_spec;
1048 const struct rte_flow_item_e_tag *e_tag_mask;
1049 const struct rte_flow_action *act;
1050 const struct rte_flow_action_queue *act_q;
1054 rte_flow_error_set(error, EINVAL,
1055 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1056 NULL, "NULL pattern.");
1061 rte_flow_error_set(error, EINVAL,
1062 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1063 NULL, "NULL action.");
1068 rte_flow_error_set(error, EINVAL,
1069 RTE_FLOW_ERROR_TYPE_ATTR,
1070 NULL, "NULL attribute.");
1076 /* The first not void item should be e-tag. */
1077 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1078 if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1079 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1080 rte_flow_error_set(error, EINVAL,
1081 RTE_FLOW_ERROR_TYPE_ITEM,
1082 item, "Not supported by L2 tunnel filter");
1086 if (!item->spec || !item->mask) {
1087 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1088 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1089 item, "Not supported by L2 tunnel filter");
1093 /*Not supported last point for range*/
1095 rte_flow_error_set(error, EINVAL,
1096 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1097 item, "Not supported last point for range");
1101 e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
1102 e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
1104 /* Only care about GRP and E cid base. */
1105 if (e_tag_mask->epcp_edei_in_ecid_b ||
1106 e_tag_mask->in_ecid_e ||
1107 e_tag_mask->ecid_e ||
1108 e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1109 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1110 rte_flow_error_set(error, EINVAL,
1111 RTE_FLOW_ERROR_TYPE_ITEM,
1112 item, "Not supported by L2 tunnel filter");
1116 filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1118 * grp and e_cid_base are bit fields and only use 14 bits.
1119 * e-tag id is taken as little endian by HW.
1121 filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1123 /* check if the next not void item is END */
1125 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1126 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1127 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1128 rte_flow_error_set(error, EINVAL,
1129 RTE_FLOW_ERROR_TYPE_ITEM,
1130 item, "Not supported by L2 tunnel filter");
1135 /* must be input direction */
1136 if (!attr->ingress) {
1137 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1138 rte_flow_error_set(error, EINVAL,
1139 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1140 attr, "Only support ingress.");
1146 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1147 rte_flow_error_set(error, EINVAL,
1148 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1149 attr, "Not support egress.");
1154 if (attr->priority) {
1155 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1156 rte_flow_error_set(error, EINVAL,
1157 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1158 attr, "Not support priority.");
1165 /* check if the first not void action is QUEUE. */
1166 NEXT_ITEM_OF_ACTION(act, actions, index);
1167 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1168 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1169 rte_flow_error_set(error, EINVAL,
1170 RTE_FLOW_ERROR_TYPE_ACTION,
1171 act, "Not supported action.");
1175 act_q = (const struct rte_flow_action_queue *)act->conf;
1176 filter->pool = act_q->index;
1178 /* check if the next not void item is END */
1180 NEXT_ITEM_OF_ACTION(act, actions, index);
1181 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1182 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1183 rte_flow_error_set(error, EINVAL,
1184 RTE_FLOW_ERROR_TYPE_ACTION,
1185 act, "Not supported action.");
1193 ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1194 const struct rte_flow_attr *attr,
1195 const struct rte_flow_item pattern[],
1196 const struct rte_flow_action actions[],
1197 struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1198 struct rte_flow_error *error)
1201 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1203 ret = cons_parse_l2_tn_filter(attr, pattern,
1204 actions, l2_tn_filter, error);
1206 if (hw->mac.type != ixgbe_mac_X550 &&
1207 hw->mac.type != ixgbe_mac_X550EM_x &&
1208 hw->mac.type != ixgbe_mac_X550EM_a) {
1209 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1210 rte_flow_error_set(error, EINVAL,
1211 RTE_FLOW_ERROR_TYPE_ITEM,
1212 NULL, "Not supported by L2 tunnel filter");
1219 /* Parse to get the attr and action info of flow director rule. */
1221 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1222 const struct rte_flow_action actions[],
1223 struct ixgbe_fdir_rule *rule,
1224 struct rte_flow_error *error)
1226 const struct rte_flow_action *act;
1227 const struct rte_flow_action_queue *act_q;
1228 const struct rte_flow_action_mark *mark;
1232 /* must be input direction */
1233 if (!attr->ingress) {
1234 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1235 rte_flow_error_set(error, EINVAL,
1236 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1237 attr, "Only support ingress.");
1243 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1244 rte_flow_error_set(error, EINVAL,
1245 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1246 attr, "Not support egress.");
1251 if (attr->priority) {
1252 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1253 rte_flow_error_set(error, EINVAL,
1254 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1255 attr, "Not support priority.");
1262 /* check if the first not void action is QUEUE or DROP. */
1263 NEXT_ITEM_OF_ACTION(act, actions, index);
1264 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1265 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1266 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1267 rte_flow_error_set(error, EINVAL,
1268 RTE_FLOW_ERROR_TYPE_ACTION,
1269 act, "Not supported action.");
1273 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1274 act_q = (const struct rte_flow_action_queue *)act->conf;
1275 rule->queue = act_q->index;
1277 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1280 /* check if the next not void item is MARK */
1282 NEXT_ITEM_OF_ACTION(act, actions, index);
1283 if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1284 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1285 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1286 rte_flow_error_set(error, EINVAL,
1287 RTE_FLOW_ERROR_TYPE_ACTION,
1288 act, "Not supported action.");
1294 if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1295 mark = (const struct rte_flow_action_mark *)act->conf;
1296 rule->soft_id = mark->id;
1298 NEXT_ITEM_OF_ACTION(act, actions, index);
1301 /* check if the next not void item is END */
1302 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1303 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1304 rte_flow_error_set(error, EINVAL,
1305 RTE_FLOW_ERROR_TYPE_ACTION,
1306 act, "Not supported action.");
1314 * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1315 * And get the flow director filter info BTW.
1316 * UDP/TCP/SCTP PATTERN:
1317 * The first not void item can be ETH or IPV4.
1318 * The second not void item must be IPV4 if the first one is ETH.
1319 * The third not void item must be UDP or TCP or SCTP.
1320 * The next not void item must be END.
1322 * The first not void item must be ETH.
1323 * The second not void item must be MAC VLAN.
1324 * The next not void item must be END.
1326 * The first not void action should be QUEUE or DROP.
1327 * The second not void optional action should be MARK,
1328 * mark_id is a uint32_t number.
1329 * The next not void action should be END.
1330 * UDP/TCP/SCTP pattern example:
1333 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
1334 * dst_addr 192.167.3.50 0xFFFFFFFF
1335 * UDP/TCP/SCTP src_port 80 0xFFFF
1336 * dst_port 80 0xFFFF
1338 * MAC VLAN pattern example:
1341 {0xAC, 0x7B, 0xA1, {0xFF, 0xFF, 0xFF,
1342 0x2C, 0x6D, 0x36} 0xFF, 0xFF, 0xFF}
1343 * MAC VLAN tci 0x2016 0xEFFF
1345 * Other members in mask and spec should set to 0x00.
1346 * Item->last should be NULL.
1349 ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
1350 const struct rte_flow_item pattern[],
1351 const struct rte_flow_action actions[],
1352 struct ixgbe_fdir_rule *rule,
1353 struct rte_flow_error *error)
1355 const struct rte_flow_item *item;
1356 const struct rte_flow_item_eth *eth_spec;
1357 const struct rte_flow_item_eth *eth_mask;
1358 const struct rte_flow_item_ipv4 *ipv4_spec;
1359 const struct rte_flow_item_ipv4 *ipv4_mask;
1360 const struct rte_flow_item_tcp *tcp_spec;
1361 const struct rte_flow_item_tcp *tcp_mask;
1362 const struct rte_flow_item_udp *udp_spec;
1363 const struct rte_flow_item_udp *udp_mask;
1364 const struct rte_flow_item_sctp *sctp_spec;
1365 const struct rte_flow_item_sctp *sctp_mask;
1366 const struct rte_flow_item_vlan *vlan_spec;
1367 const struct rte_flow_item_vlan *vlan_mask;
1372 rte_flow_error_set(error, EINVAL,
1373 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1374 NULL, "NULL pattern.");
1379 rte_flow_error_set(error, EINVAL,
1380 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1381 NULL, "NULL action.");
1386 rte_flow_error_set(error, EINVAL,
1387 RTE_FLOW_ERROR_TYPE_ATTR,
1388 NULL, "NULL attribute.");
1393 * Some fields may not be provided. Set spec to 0 and mask to default
1394 * value. So, we need not do anything for the not provided fields later.
1396 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1397 memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1398 rule->mask.vlan_tci_mask = 0;
1404 * The first not void item should be
1405 * MAC or IPv4 or TCP or UDP or SCTP.
1407 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1408 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1409 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1410 item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1411 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1412 item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1413 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1414 rte_flow_error_set(error, EINVAL,
1415 RTE_FLOW_ERROR_TYPE_ITEM,
1416 item, "Not supported by fdir filter");
1420 rule->mode = RTE_FDIR_MODE_PERFECT;
1422 /*Not supported last point for range*/
1424 rte_flow_error_set(error, EINVAL,
1425 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1426 item, "Not supported last point for range");
1430 /* Get the MAC info. */
1431 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1433 * Only support vlan and dst MAC address,
1434 * others should be masked.
1436 if (item->spec && !item->mask) {
1437 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1438 rte_flow_error_set(error, EINVAL,
1439 RTE_FLOW_ERROR_TYPE_ITEM,
1440 item, "Not supported by fdir filter");
1445 rule->b_spec = TRUE;
1446 eth_spec = (const struct rte_flow_item_eth *)item->spec;
1448 /* Get the dst MAC. */
1449 for (j = 0; j < ETHER_ADDR_LEN; j++) {
1450 rule->ixgbe_fdir.formatted.inner_mac[j] =
1451 eth_spec->dst.addr_bytes[j];
1457 /* If ethernet has meaning, it means MAC VLAN mode. */
1458 rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1460 rule->b_mask = TRUE;
1461 eth_mask = (const struct rte_flow_item_eth *)item->mask;
1463 /* Ether type should be masked. */
1464 if (eth_mask->type) {
1465 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1466 rte_flow_error_set(error, EINVAL,
1467 RTE_FLOW_ERROR_TYPE_ITEM,
1468 item, "Not supported by fdir filter");
1473 * src MAC address must be masked,
1474 * and don't support dst MAC address mask.
1476 for (j = 0; j < ETHER_ADDR_LEN; j++) {
1477 if (eth_mask->src.addr_bytes[j] ||
1478 eth_mask->dst.addr_bytes[j] != 0xFF) {
1480 sizeof(struct ixgbe_fdir_rule));
1481 rte_flow_error_set(error, EINVAL,
1482 RTE_FLOW_ERROR_TYPE_ITEM,
1483 item, "Not supported by fdir filter");
1488 /* When no VLAN, considered as full mask. */
1489 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1491 /*** If both spec and mask are item,
1492 * it means don't care about ETH.
1497 * Check if the next not void item is vlan or ipv4.
1498 * IPv6 is not supported.
1501 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1502 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1503 if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1504 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1505 rte_flow_error_set(error, EINVAL,
1506 RTE_FLOW_ERROR_TYPE_ITEM,
1507 item, "Not supported by fdir filter");
1511 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
1512 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1513 rte_flow_error_set(error, EINVAL,
1514 RTE_FLOW_ERROR_TYPE_ITEM,
1515 item, "Not supported by fdir filter");
1521 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1522 if (!(item->spec && item->mask)) {
1523 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1524 rte_flow_error_set(error, EINVAL,
1525 RTE_FLOW_ERROR_TYPE_ITEM,
1526 item, "Not supported by fdir filter");
1530 /*Not supported last point for range*/
1532 rte_flow_error_set(error, EINVAL,
1533 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1534 item, "Not supported last point for range");
1538 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
1539 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
1541 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1543 rule->mask.vlan_tci_mask = vlan_mask->tci;
1544 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
1545 /* More than one tags are not supported. */
1548 * Check if the next not void item is not vlan.
1551 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1552 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1553 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1554 rte_flow_error_set(error, EINVAL,
1555 RTE_FLOW_ERROR_TYPE_ITEM,
1556 item, "Not supported by fdir filter");
1558 } else if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1559 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1560 rte_flow_error_set(error, EINVAL,
1561 RTE_FLOW_ERROR_TYPE_ITEM,
1562 item, "Not supported by fdir filter");
1567 /* Get the IP info. */
1568 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1570 * Set the flow type even if there's no content
1571 * as we must have a flow type.
1573 rule->ixgbe_fdir.formatted.flow_type =
1574 IXGBE_ATR_FLOW_TYPE_IPV4;
1575 /*Not supported last point for range*/
1577 rte_flow_error_set(error, EINVAL,
1578 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1579 item, "Not supported last point for range");
1583 * Only care about src & dst addresses,
1584 * others should be masked.
1587 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1588 rte_flow_error_set(error, EINVAL,
1589 RTE_FLOW_ERROR_TYPE_ITEM,
1590 item, "Not supported by fdir filter");
1593 rule->b_mask = TRUE;
1595 (const struct rte_flow_item_ipv4 *)item->mask;
1596 if (ipv4_mask->hdr.version_ihl ||
1597 ipv4_mask->hdr.type_of_service ||
1598 ipv4_mask->hdr.total_length ||
1599 ipv4_mask->hdr.packet_id ||
1600 ipv4_mask->hdr.fragment_offset ||
1601 ipv4_mask->hdr.time_to_live ||
1602 ipv4_mask->hdr.next_proto_id ||
1603 ipv4_mask->hdr.hdr_checksum) {
1604 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1605 rte_flow_error_set(error, EINVAL,
1606 RTE_FLOW_ERROR_TYPE_ITEM,
1607 item, "Not supported by fdir filter");
1610 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1611 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1614 rule->b_spec = TRUE;
1616 (const struct rte_flow_item_ipv4 *)item->spec;
1617 rule->ixgbe_fdir.formatted.dst_ip[0] =
1618 ipv4_spec->hdr.dst_addr;
1619 rule->ixgbe_fdir.formatted.src_ip[0] =
1620 ipv4_spec->hdr.src_addr;
1624 * Check if the next not void item is
1625 * TCP or UDP or SCTP or END.
1628 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1629 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1630 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1631 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1632 item->type != RTE_FLOW_ITEM_TYPE_END) {
1633 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1634 rte_flow_error_set(error, EINVAL,
1635 RTE_FLOW_ERROR_TYPE_ITEM,
1636 item, "Not supported by fdir filter");
1641 /* Get the TCP info. */
1642 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1644 * Set the flow type even if there's no content
1645 * as we must have a flow type.
1647 rule->ixgbe_fdir.formatted.flow_type =
1648 IXGBE_ATR_FLOW_TYPE_TCPV4;
1649 /*Not supported last point for range*/
1651 rte_flow_error_set(error, EINVAL,
1652 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1653 item, "Not supported last point for range");
1657 * Only care about src & dst ports,
1658 * others should be masked.
1661 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1662 rte_flow_error_set(error, EINVAL,
1663 RTE_FLOW_ERROR_TYPE_ITEM,
1664 item, "Not supported by fdir filter");
1667 rule->b_mask = TRUE;
1668 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1669 if (tcp_mask->hdr.sent_seq ||
1670 tcp_mask->hdr.recv_ack ||
1671 tcp_mask->hdr.data_off ||
1672 tcp_mask->hdr.tcp_flags ||
1673 tcp_mask->hdr.rx_win ||
1674 tcp_mask->hdr.cksum ||
1675 tcp_mask->hdr.tcp_urp) {
1676 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1677 rte_flow_error_set(error, EINVAL,
1678 RTE_FLOW_ERROR_TYPE_ITEM,
1679 item, "Not supported by fdir filter");
1682 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1683 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1686 rule->b_spec = TRUE;
1687 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1688 rule->ixgbe_fdir.formatted.src_port =
1689 tcp_spec->hdr.src_port;
1690 rule->ixgbe_fdir.formatted.dst_port =
1691 tcp_spec->hdr.dst_port;
1695 /* Get the UDP info */
1696 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1698 * Set the flow type even if there's no content
1699 * as we must have a flow type.
1701 rule->ixgbe_fdir.formatted.flow_type =
1702 IXGBE_ATR_FLOW_TYPE_UDPV4;
1703 /*Not supported last point for range*/
1705 rte_flow_error_set(error, EINVAL,
1706 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1707 item, "Not supported last point for range");
1711 * Only care about src & dst ports,
1712 * others should be masked.
1715 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1716 rte_flow_error_set(error, EINVAL,
1717 RTE_FLOW_ERROR_TYPE_ITEM,
1718 item, "Not supported by fdir filter");
1721 rule->b_mask = TRUE;
1722 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1723 if (udp_mask->hdr.dgram_len ||
1724 udp_mask->hdr.dgram_cksum) {
1725 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1726 rte_flow_error_set(error, EINVAL,
1727 RTE_FLOW_ERROR_TYPE_ITEM,
1728 item, "Not supported by fdir filter");
1731 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1732 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1735 rule->b_spec = TRUE;
1736 udp_spec = (const struct rte_flow_item_udp *)item->spec;
1737 rule->ixgbe_fdir.formatted.src_port =
1738 udp_spec->hdr.src_port;
1739 rule->ixgbe_fdir.formatted.dst_port =
1740 udp_spec->hdr.dst_port;
1744 /* Get the SCTP info */
1745 if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
1747 * Set the flow type even if there's no content
1748 * as we must have a flow type.
1750 rule->ixgbe_fdir.formatted.flow_type =
1751 IXGBE_ATR_FLOW_TYPE_SCTPV4;
1752 /*Not supported last point for range*/
1754 rte_flow_error_set(error, EINVAL,
1755 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1756 item, "Not supported last point for range");
1760 * Only care about src & dst ports,
1761 * others should be masked.
1764 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1765 rte_flow_error_set(error, EINVAL,
1766 RTE_FLOW_ERROR_TYPE_ITEM,
1767 item, "Not supported by fdir filter");
1770 rule->b_mask = TRUE;
1772 (const struct rte_flow_item_sctp *)item->mask;
1773 if (sctp_mask->hdr.tag ||
1774 sctp_mask->hdr.cksum) {
1775 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1776 rte_flow_error_set(error, EINVAL,
1777 RTE_FLOW_ERROR_TYPE_ITEM,
1778 item, "Not supported by fdir filter");
1781 rule->mask.src_port_mask = sctp_mask->hdr.src_port;
1782 rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
1785 rule->b_spec = TRUE;
1787 (const struct rte_flow_item_sctp *)item->spec;
1788 rule->ixgbe_fdir.formatted.src_port =
1789 sctp_spec->hdr.src_port;
1790 rule->ixgbe_fdir.formatted.dst_port =
1791 sctp_spec->hdr.dst_port;
1795 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1796 /* check if the next not void item is END */
1798 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1799 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1800 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1801 rte_flow_error_set(error, EINVAL,
1802 RTE_FLOW_ERROR_TYPE_ITEM,
1803 item, "Not supported by fdir filter");
1808 return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
1811 #define NVGRE_PROTOCOL 0x6558
1814 * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
1815 * And get the flow director filter info BTW.
1817 * The first not void item must be ETH.
1818 * The second not void item must be IPV4/ IPV6.
1819 * The third not void item must be NVGRE.
1820 * The next not void item must be END.
1822 * The first not void item must be ETH.
1823 * The second not void item must be IPV4/ IPV6.
1824 * The third not void item must be NVGRE.
1825 * The next not void item must be END.
1827 * The first not void action should be QUEUE or DROP.
1828 * The second not void optional action should be MARK,
1829 * mark_id is a uint32_t number.
1830 * The next not void action should be END.
1831 * VxLAN pattern example:
1834 * IPV4/IPV6 NULL NULL
1836 * VxLAN vni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
1837 * MAC VLAN tci 0x2016 0xEFFF
1839 * NEGRV pattern example:
1842 * IPV4/IPV6 NULL NULL
1843 * NVGRE protocol 0x6558 0xFFFF
1844 * tni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
1845 * MAC VLAN tci 0x2016 0xEFFF
1847 * other members in mask and spec should set to 0x00.
1848 * item->last should be NULL.
1851 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
1852 const struct rte_flow_item pattern[],
1853 const struct rte_flow_action actions[],
1854 struct ixgbe_fdir_rule *rule,
1855 struct rte_flow_error *error)
1857 const struct rte_flow_item *item;
1858 const struct rte_flow_item_vxlan *vxlan_spec;
1859 const struct rte_flow_item_vxlan *vxlan_mask;
1860 const struct rte_flow_item_nvgre *nvgre_spec;
1861 const struct rte_flow_item_nvgre *nvgre_mask;
1862 const struct rte_flow_item_eth *eth_spec;
1863 const struct rte_flow_item_eth *eth_mask;
1864 const struct rte_flow_item_vlan *vlan_spec;
1865 const struct rte_flow_item_vlan *vlan_mask;
1869 rte_flow_error_set(error, EINVAL,
1870 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1871 NULL, "NULL pattern.");
1876 rte_flow_error_set(error, EINVAL,
1877 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1878 NULL, "NULL action.");
1883 rte_flow_error_set(error, EINVAL,
1884 RTE_FLOW_ERROR_TYPE_ATTR,
1885 NULL, "NULL attribute.");
1890 * Some fields may not be provided. Set spec to 0 and mask to default
1891 * value. So, we need not do anything for the not provided fields later.
1893 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1894 memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1895 rule->mask.vlan_tci_mask = 0;
1901 * The first not void item should be
1902 * MAC or IPv4 or IPv6 or UDP or VxLAN.
1904 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1905 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1906 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1907 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1908 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1909 item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
1910 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
1911 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1912 rte_flow_error_set(error, EINVAL,
1913 RTE_FLOW_ERROR_TYPE_ITEM,
1914 item, "Not supported by fdir filter");
1918 rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
1921 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1922 /* Only used to describe the protocol stack. */
1923 if (item->spec || item->mask) {
1924 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1925 rte_flow_error_set(error, EINVAL,
1926 RTE_FLOW_ERROR_TYPE_ITEM,
1927 item, "Not supported by fdir filter");
1930 /*Not supported last point for range*/
1932 rte_flow_error_set(error, EINVAL,
1933 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1934 item, "Not supported last point for range");
1938 /* Check if the next not void item is IPv4 or IPv6. */
1940 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1941 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1942 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
1943 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1944 rte_flow_error_set(error, EINVAL,
1945 RTE_FLOW_ERROR_TYPE_ITEM,
1946 item, "Not supported by fdir filter");
1952 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
1953 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1954 /* Only used to describe the protocol stack. */
1955 if (item->spec || item->mask) {
1956 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1957 rte_flow_error_set(error, EINVAL,
1958 RTE_FLOW_ERROR_TYPE_ITEM,
1959 item, "Not supported by fdir filter");
1962 /*Not supported last point for range*/
1964 rte_flow_error_set(error, EINVAL,
1965 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1966 item, "Not supported last point for range");
1970 /* Check if the next not void item is UDP or NVGRE. */
1972 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1973 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1974 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
1975 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1976 rte_flow_error_set(error, EINVAL,
1977 RTE_FLOW_ERROR_TYPE_ITEM,
1978 item, "Not supported by fdir filter");
1984 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1985 /* Only used to describe the protocol stack. */
1986 if (item->spec || item->mask) {
1987 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1988 rte_flow_error_set(error, EINVAL,
1989 RTE_FLOW_ERROR_TYPE_ITEM,
1990 item, "Not supported by fdir filter");
1993 /*Not supported last point for range*/
1995 rte_flow_error_set(error, EINVAL,
1996 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1997 item, "Not supported last point for range");
2001 /* Check if the next not void item is VxLAN. */
2003 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2004 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2005 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2006 rte_flow_error_set(error, EINVAL,
2007 RTE_FLOW_ERROR_TYPE_ITEM,
2008 item, "Not supported by fdir filter");
2013 /* Get the VxLAN info */
2014 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2015 rule->ixgbe_fdir.formatted.tunnel_type =
2016 RTE_FDIR_TUNNEL_TYPE_VXLAN;
2018 /* Only care about VNI, others should be masked. */
2020 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2021 rte_flow_error_set(error, EINVAL,
2022 RTE_FLOW_ERROR_TYPE_ITEM,
2023 item, "Not supported by fdir filter");
2026 /*Not supported last point for range*/
2028 rte_flow_error_set(error, EINVAL,
2029 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2030 item, "Not supported last point for range");
2033 rule->b_mask = TRUE;
2035 /* Tunnel type is always meaningful. */
2036 rule->mask.tunnel_type_mask = 1;
2039 (const struct rte_flow_item_vxlan *)item->mask;
2040 if (vxlan_mask->flags) {
2041 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2042 rte_flow_error_set(error, EINVAL,
2043 RTE_FLOW_ERROR_TYPE_ITEM,
2044 item, "Not supported by fdir filter");
2047 /* VNI must be totally masked or not. */
2048 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2049 vxlan_mask->vni[2]) &&
2050 ((vxlan_mask->vni[0] != 0xFF) ||
2051 (vxlan_mask->vni[1] != 0xFF) ||
2052 (vxlan_mask->vni[2] != 0xFF))) {
2053 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2054 rte_flow_error_set(error, EINVAL,
2055 RTE_FLOW_ERROR_TYPE_ITEM,
2056 item, "Not supported by fdir filter");
2060 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2061 RTE_DIM(vxlan_mask->vni));
2064 rule->b_spec = TRUE;
2065 vxlan_spec = (const struct rte_flow_item_vxlan *)
2067 rte_memcpy(((uint8_t *)
2068 &rule->ixgbe_fdir.formatted.tni_vni + 1),
2069 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2070 rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
2071 rule->ixgbe_fdir.formatted.tni_vni);
2075 /* Get the NVGRE info */
2076 if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2077 rule->ixgbe_fdir.formatted.tunnel_type =
2078 RTE_FDIR_TUNNEL_TYPE_NVGRE;
2081 * Only care about flags0, flags1, protocol and TNI,
2082 * others should be masked.
2085 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2086 rte_flow_error_set(error, EINVAL,
2087 RTE_FLOW_ERROR_TYPE_ITEM,
2088 item, "Not supported by fdir filter");
2091 /*Not supported last point for range*/
2093 rte_flow_error_set(error, EINVAL,
2094 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2095 item, "Not supported last point for range");
2098 rule->b_mask = TRUE;
2100 /* Tunnel type is always meaningful. */
2101 rule->mask.tunnel_type_mask = 1;
2104 (const struct rte_flow_item_nvgre *)item->mask;
2105 if (nvgre_mask->flow_id) {
2106 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2107 rte_flow_error_set(error, EINVAL,
2108 RTE_FLOW_ERROR_TYPE_ITEM,
2109 item, "Not supported by fdir filter");
2112 if (nvgre_mask->c_k_s_rsvd0_ver !=
2113 rte_cpu_to_be_16(0x3000) ||
2114 nvgre_mask->protocol != 0xFFFF) {
2115 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2116 rte_flow_error_set(error, EINVAL,
2117 RTE_FLOW_ERROR_TYPE_ITEM,
2118 item, "Not supported by fdir filter");
2121 /* TNI must be totally masked or not. */
2122 if (nvgre_mask->tni[0] &&
2123 ((nvgre_mask->tni[0] != 0xFF) ||
2124 (nvgre_mask->tni[1] != 0xFF) ||
2125 (nvgre_mask->tni[2] != 0xFF))) {
2126 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2127 rte_flow_error_set(error, EINVAL,
2128 RTE_FLOW_ERROR_TYPE_ITEM,
2129 item, "Not supported by fdir filter");
2132 /* tni is a 24-bits bit field */
2133 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2134 RTE_DIM(nvgre_mask->tni));
2135 rule->mask.tunnel_id_mask <<= 8;
2138 rule->b_spec = TRUE;
2140 (const struct rte_flow_item_nvgre *)item->spec;
2141 if (nvgre_spec->c_k_s_rsvd0_ver !=
2142 rte_cpu_to_be_16(0x2000) ||
2143 nvgre_spec->protocol !=
2144 rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2145 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2146 rte_flow_error_set(error, EINVAL,
2147 RTE_FLOW_ERROR_TYPE_ITEM,
2148 item, "Not supported by fdir filter");
2151 /* tni is a 24-bits bit field */
2152 rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2153 nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2154 rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2158 /* check if the next not void item is MAC */
2160 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2161 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2162 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2163 rte_flow_error_set(error, EINVAL,
2164 RTE_FLOW_ERROR_TYPE_ITEM,
2165 item, "Not supported by fdir filter");
2170 * Only support vlan and dst MAC address,
2171 * others should be masked.
2175 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2176 rte_flow_error_set(error, EINVAL,
2177 RTE_FLOW_ERROR_TYPE_ITEM,
2178 item, "Not supported by fdir filter");
2181 /*Not supported last point for range*/
2183 rte_flow_error_set(error, EINVAL,
2184 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2185 item, "Not supported last point for range");
2188 rule->b_mask = TRUE;
2189 eth_mask = (const struct rte_flow_item_eth *)item->mask;
2191 /* Ether type should be masked. */
2192 if (eth_mask->type) {
2193 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2194 rte_flow_error_set(error, EINVAL,
2195 RTE_FLOW_ERROR_TYPE_ITEM,
2196 item, "Not supported by fdir filter");
2200 /* src MAC address should be masked. */
2201 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2202 if (eth_mask->src.addr_bytes[j]) {
2204 sizeof(struct ixgbe_fdir_rule));
2205 rte_flow_error_set(error, EINVAL,
2206 RTE_FLOW_ERROR_TYPE_ITEM,
2207 item, "Not supported by fdir filter");
2211 rule->mask.mac_addr_byte_mask = 0;
2212 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2213 /* It's a per byte mask. */
2214 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2215 rule->mask.mac_addr_byte_mask |= 0x1 << j;
2216 } else if (eth_mask->dst.addr_bytes[j]) {
2217 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2218 rte_flow_error_set(error, EINVAL,
2219 RTE_FLOW_ERROR_TYPE_ITEM,
2220 item, "Not supported by fdir filter");
2225 /* When no vlan, considered as full mask. */
2226 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2229 rule->b_spec = TRUE;
2230 eth_spec = (const struct rte_flow_item_eth *)item->spec;
2232 /* Get the dst MAC. */
2233 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2234 rule->ixgbe_fdir.formatted.inner_mac[j] =
2235 eth_spec->dst.addr_bytes[j];
2240 * Check if the next not void item is vlan or ipv4.
2241 * IPv6 is not supported.
2244 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2245 if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2246 (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
2247 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2248 rte_flow_error_set(error, EINVAL,
2249 RTE_FLOW_ERROR_TYPE_ITEM,
2250 item, "Not supported by fdir filter");
2253 /*Not supported last point for range*/
2255 rte_flow_error_set(error, EINVAL,
2256 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2257 item, "Not supported last point for range");
2261 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2262 if (!(item->spec && item->mask)) {
2263 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2264 rte_flow_error_set(error, EINVAL,
2265 RTE_FLOW_ERROR_TYPE_ITEM,
2266 item, "Not supported by fdir filter");
2270 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
2271 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
2273 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2275 rule->mask.vlan_tci_mask = vlan_mask->tci;
2276 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
2277 /* More than one tags are not supported. */
2279 /* check if the next not void item is END */
2281 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2283 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2284 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2285 rte_flow_error_set(error, EINVAL,
2286 RTE_FLOW_ERROR_TYPE_ITEM,
2287 item, "Not supported by fdir filter");
2293 * If the tags is 0, it means don't care about the VLAN.
2297 return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2301 ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2302 const struct rte_flow_attr *attr,
2303 const struct rte_flow_item pattern[],
2304 const struct rte_flow_action actions[],
2305 struct ixgbe_fdir_rule *rule,
2306 struct rte_flow_error *error)
2309 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2310 enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2312 if (hw->mac.type != ixgbe_mac_82599EB &&
2313 hw->mac.type != ixgbe_mac_X540 &&
2314 hw->mac.type != ixgbe_mac_X550 &&
2315 hw->mac.type != ixgbe_mac_X550EM_x &&
2316 hw->mac.type != ixgbe_mac_X550EM_a)
2319 ret = ixgbe_parse_fdir_filter_normal(attr, pattern,
2320 actions, rule, error);
2325 ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2326 actions, rule, error);
2329 if (fdir_mode == RTE_FDIR_MODE_NONE ||
2330 fdir_mode != rule->mode)
2336 ixgbe_filterlist_flush(void)
2338 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2339 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2340 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2341 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2342 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2343 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2345 while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2346 TAILQ_REMOVE(&filter_ntuple_list,
2349 rte_free(ntuple_filter_ptr);
2352 while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2353 TAILQ_REMOVE(&filter_ethertype_list,
2354 ethertype_filter_ptr,
2356 rte_free(ethertype_filter_ptr);
2359 while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2360 TAILQ_REMOVE(&filter_syn_list,
2363 rte_free(syn_filter_ptr);
2366 while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2367 TAILQ_REMOVE(&filter_l2_tunnel_list,
2370 rte_free(l2_tn_filter_ptr);
2373 while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2374 TAILQ_REMOVE(&filter_fdir_list,
2377 rte_free(fdir_rule_ptr);
2380 while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
2381 TAILQ_REMOVE(&ixgbe_flow_list,
2384 rte_free(ixgbe_flow_mem_ptr->flow);
2385 rte_free(ixgbe_flow_mem_ptr);
2390 * Create or destroy a flow rule.
2391 * Theorically one rule can match more than one filters.
2392 * We will let it use the filter which it hitt first.
2393 * So, the sequence matters.
2395 static struct rte_flow *
2396 ixgbe_flow_create(struct rte_eth_dev *dev,
2397 const struct rte_flow_attr *attr,
2398 const struct rte_flow_item pattern[],
2399 const struct rte_flow_action actions[],
2400 struct rte_flow_error *error)
2403 struct rte_eth_ntuple_filter ntuple_filter;
2404 struct rte_eth_ethertype_filter ethertype_filter;
2405 struct rte_eth_syn_filter syn_filter;
2406 struct ixgbe_fdir_rule fdir_rule;
2407 struct rte_eth_l2_tunnel_conf l2_tn_filter;
2408 struct ixgbe_hw_fdir_info *fdir_info =
2409 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2410 struct rte_flow *flow = NULL;
2411 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2412 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2413 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2414 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2415 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2416 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2418 flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
2420 PMD_DRV_LOG(ERR, "failed to allocate memory");
2421 return (struct rte_flow *)flow;
2423 ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
2424 sizeof(struct ixgbe_flow_mem), 0);
2425 if (!ixgbe_flow_mem_ptr) {
2426 PMD_DRV_LOG(ERR, "failed to allocate memory");
2430 ixgbe_flow_mem_ptr->flow = flow;
2431 TAILQ_INSERT_TAIL(&ixgbe_flow_list,
2432 ixgbe_flow_mem_ptr, entries);
2434 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2435 ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2436 actions, &ntuple_filter, error);
2438 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2440 ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
2441 sizeof(struct ixgbe_ntuple_filter_ele), 0);
2442 (void)rte_memcpy(&ntuple_filter_ptr->filter_info,
2444 sizeof(struct rte_eth_ntuple_filter));
2445 TAILQ_INSERT_TAIL(&filter_ntuple_list,
2446 ntuple_filter_ptr, entries);
2447 flow->rule = ntuple_filter_ptr;
2448 flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2454 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2455 ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2456 actions, ðertype_filter, error);
2458 ret = ixgbe_add_del_ethertype_filter(dev,
2459 ðertype_filter, TRUE);
2461 ethertype_filter_ptr = rte_zmalloc(
2462 "ixgbe_ethertype_filter",
2463 sizeof(struct ixgbe_ethertype_filter_ele), 0);
2464 (void)rte_memcpy(ðertype_filter_ptr->filter_info,
2466 sizeof(struct rte_eth_ethertype_filter));
2467 TAILQ_INSERT_TAIL(&filter_ethertype_list,
2468 ethertype_filter_ptr, entries);
2469 flow->rule = ethertype_filter_ptr;
2470 flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2476 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2477 ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2478 actions, &syn_filter, error);
2480 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
2482 syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
2483 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
2484 (void)rte_memcpy(&syn_filter_ptr->filter_info,
2486 sizeof(struct rte_eth_syn_filter));
2487 TAILQ_INSERT_TAIL(&filter_syn_list,
2490 flow->rule = syn_filter_ptr;
2491 flow->filter_type = RTE_ETH_FILTER_SYN;
2497 memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2498 ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2499 actions, &fdir_rule, error);
2501 /* A mask cannot be deleted. */
2502 if (fdir_rule.b_mask) {
2503 if (!fdir_info->mask_added) {
2504 /* It's the first time the mask is set. */
2505 rte_memcpy(&fdir_info->mask,
2507 sizeof(struct ixgbe_hw_fdir_mask));
2508 ret = ixgbe_fdir_set_input_mask(dev);
2512 fdir_info->mask_added = TRUE;
2515 * Only support one global mask,
2516 * all the masks should be the same.
2518 ret = memcmp(&fdir_info->mask,
2520 sizeof(struct ixgbe_hw_fdir_mask));
2526 if (fdir_rule.b_spec) {
2527 ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
2530 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
2531 sizeof(struct ixgbe_fdir_rule_ele), 0);
2532 (void)rte_memcpy(&fdir_rule_ptr->filter_info,
2534 sizeof(struct ixgbe_fdir_rule));
2535 TAILQ_INSERT_TAIL(&filter_fdir_list,
2536 fdir_rule_ptr, entries);
2537 flow->rule = fdir_rule_ptr;
2538 flow->filter_type = RTE_ETH_FILTER_FDIR;
2550 memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2551 ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2552 actions, &l2_tn_filter, error);
2554 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
2556 l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
2557 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
2558 (void)rte_memcpy(&l2_tn_filter_ptr->filter_info,
2560 sizeof(struct rte_eth_l2_tunnel_conf));
2561 TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
2562 l2_tn_filter_ptr, entries);
2563 flow->rule = l2_tn_filter_ptr;
2564 flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
2570 TAILQ_REMOVE(&ixgbe_flow_list,
2571 ixgbe_flow_mem_ptr, entries);
2572 rte_flow_error_set(error, -ret,
2573 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2574 "Failed to create flow.");
2575 rte_free(ixgbe_flow_mem_ptr);
2581 * Check if the flow rule is supported by ixgbe.
2582 * It only checkes the format. Don't guarantee the rule can be programmed into
2583 * the HW. Because there can be no enough room for the rule.
2586 ixgbe_flow_validate(struct rte_eth_dev *dev,
2587 const struct rte_flow_attr *attr,
2588 const struct rte_flow_item pattern[],
2589 const struct rte_flow_action actions[],
2590 struct rte_flow_error *error)
2592 struct rte_eth_ntuple_filter ntuple_filter;
2593 struct rte_eth_ethertype_filter ethertype_filter;
2594 struct rte_eth_syn_filter syn_filter;
2595 struct rte_eth_l2_tunnel_conf l2_tn_filter;
2596 struct ixgbe_fdir_rule fdir_rule;
2599 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2600 ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2601 actions, &ntuple_filter, error);
2605 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2606 ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2607 actions, ðertype_filter, error);
2611 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2612 ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2613 actions, &syn_filter, error);
2617 memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2618 ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2619 actions, &fdir_rule, error);
2623 memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2624 ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2625 actions, &l2_tn_filter, error);
2630 /* Destroy a flow rule on ixgbe. */
2632 ixgbe_flow_destroy(struct rte_eth_dev *dev,
2633 struct rte_flow *flow,
2634 struct rte_flow_error *error)
2637 struct rte_flow *pmd_flow = flow;
2638 enum rte_filter_type filter_type = pmd_flow->filter_type;
2639 struct rte_eth_ntuple_filter ntuple_filter;
2640 struct rte_eth_ethertype_filter ethertype_filter;
2641 struct rte_eth_syn_filter syn_filter;
2642 struct ixgbe_fdir_rule fdir_rule;
2643 struct rte_eth_l2_tunnel_conf l2_tn_filter;
2644 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2645 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2646 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2647 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2648 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2649 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2651 switch (filter_type) {
2652 case RTE_ETH_FILTER_NTUPLE:
2653 ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
2655 (void)rte_memcpy(&ntuple_filter,
2656 &ntuple_filter_ptr->filter_info,
2657 sizeof(struct rte_eth_ntuple_filter));
2658 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
2660 TAILQ_REMOVE(&filter_ntuple_list,
2661 ntuple_filter_ptr, entries);
2662 rte_free(ntuple_filter_ptr);
2665 case RTE_ETH_FILTER_ETHERTYPE:
2666 ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
2668 (void)rte_memcpy(ðertype_filter,
2669 ðertype_filter_ptr->filter_info,
2670 sizeof(struct rte_eth_ethertype_filter));
2671 ret = ixgbe_add_del_ethertype_filter(dev,
2672 ðertype_filter, FALSE);
2674 TAILQ_REMOVE(&filter_ethertype_list,
2675 ethertype_filter_ptr, entries);
2676 rte_free(ethertype_filter_ptr);
2679 case RTE_ETH_FILTER_SYN:
2680 syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
2682 (void)rte_memcpy(&syn_filter,
2683 &syn_filter_ptr->filter_info,
2684 sizeof(struct rte_eth_syn_filter));
2685 ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
2687 TAILQ_REMOVE(&filter_syn_list,
2688 syn_filter_ptr, entries);
2689 rte_free(syn_filter_ptr);
2692 case RTE_ETH_FILTER_FDIR:
2693 fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
2694 (void)rte_memcpy(&fdir_rule,
2695 &fdir_rule_ptr->filter_info,
2696 sizeof(struct ixgbe_fdir_rule));
2697 ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
2699 TAILQ_REMOVE(&filter_fdir_list,
2700 fdir_rule_ptr, entries);
2701 rte_free(fdir_rule_ptr);
2704 case RTE_ETH_FILTER_L2_TUNNEL:
2705 l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
2707 (void)rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
2708 sizeof(struct rte_eth_l2_tunnel_conf));
2709 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
2711 TAILQ_REMOVE(&filter_l2_tunnel_list,
2712 l2_tn_filter_ptr, entries);
2713 rte_free(l2_tn_filter_ptr);
2717 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
2724 rte_flow_error_set(error, EINVAL,
2725 RTE_FLOW_ERROR_TYPE_HANDLE,
2726 NULL, "Failed to destroy flow");
2730 TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
2731 if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
2732 TAILQ_REMOVE(&ixgbe_flow_list,
2733 ixgbe_flow_mem_ptr, entries);
2734 rte_free(ixgbe_flow_mem_ptr);
2742 /* Destroy all flow rules associated with a port on ixgbe. */
2744 ixgbe_flow_flush(struct rte_eth_dev *dev,
2745 struct rte_flow_error *error)
2749 ixgbe_clear_all_ntuple_filter(dev);
2750 ixgbe_clear_all_ethertype_filter(dev);
2751 ixgbe_clear_syn_filter(dev);
2753 ret = ixgbe_clear_all_fdir_filter(dev);
2755 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
2756 NULL, "Failed to flush rule");
2760 ret = ixgbe_clear_all_l2_tn_filter(dev);
2762 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
2763 NULL, "Failed to flush rule");
2767 ixgbe_filterlist_flush();
2772 const struct rte_flow_ops ixgbe_flow_ops = {
2773 ixgbe_flow_validate,