4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
47 #include <rte_interrupts.h>
49 #include <rte_debug.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_atomic.h>
60 #include <rte_malloc.h>
61 #include <rte_random.h>
63 #include <rte_hash_crc.h>
65 #include <rte_flow_driver.h>
67 #include "ixgbe_logs.h"
68 #include "base/ixgbe_api.h"
69 #include "base/ixgbe_vf.h"
70 #include "base/ixgbe_common.h"
71 #include "ixgbe_ethdev.h"
72 #include "ixgbe_bypass.h"
73 #include "ixgbe_rxtx.h"
74 #include "base/ixgbe_type.h"
75 #include "base/ixgbe_phy.h"
76 #include "rte_pmd_ixgbe.h"
78 static int ixgbe_flow_flush(struct rte_eth_dev *dev,
79 struct rte_flow_error *error);
81 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
82 const struct rte_flow_item pattern[],
83 const struct rte_flow_action actions[],
84 struct rte_eth_ntuple_filter *filter,
85 struct rte_flow_error *error);
87 ixgbe_parse_ntuple_filter(const struct rte_flow_attr *attr,
88 const struct rte_flow_item pattern[],
89 const struct rte_flow_action actions[],
90 struct rte_eth_ntuple_filter *filter,
91 struct rte_flow_error *error);
93 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
94 const struct rte_flow_item *pattern,
95 const struct rte_flow_action *actions,
96 struct rte_eth_ethertype_filter *filter,
97 struct rte_flow_error *error);
99 ixgbe_parse_ethertype_filter(const struct rte_flow_attr *attr,
100 const struct rte_flow_item pattern[],
101 const struct rte_flow_action actions[],
102 struct rte_eth_ethertype_filter *filter,
103 struct rte_flow_error *error);
105 ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
106 const struct rte_flow_attr *attr,
107 const struct rte_flow_item pattern[],
108 const struct rte_flow_action actions[],
109 struct rte_flow_error *error);
111 const struct rte_flow_ops ixgbe_flow_ops = {
119 #define IXGBE_MIN_N_TUPLE_PRIO 1
120 #define IXGBE_MAX_N_TUPLE_PRIO 7
121 #define NEXT_ITEM_OF_PATTERN(item, pattern, index)\
123 item = pattern + index;\
124 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\
126 item = pattern + index; \
130 #define NEXT_ITEM_OF_ACTION(act, actions, index)\
132 act = actions + index; \
133 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
135 act = actions + index; \
140 * Please aware there's an asumption for all the parsers.
141 * rte_flow_item is using big endian, rte_flow_attr and
142 * rte_flow_action are using CPU order.
143 * Because the pattern is used to describe the packets,
144 * normally the packets should use network order.
148 * Parse the rule to see if it is a n-tuple rule.
149 * And get the n-tuple filter info BTW.
151 * The first not void item can be ETH or IPV4.
152 * The second not void item must be IPV4 if the first one is ETH.
153 * The third not void item must be UDP or TCP.
154 * The next not void item must be END.
156 * The first not void action should be QUEUE.
157 * The next not void action should be END.
161 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
162 * dst_addr 192.167.3.50 0xFFFFFFFF
163 * next_proto_id 17 0xFF
164 * UDP/TCP src_port 80 0xFFFF
167 * other members in mask and spec should set to 0x00.
168 * item->last should be NULL.
171 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
172 const struct rte_flow_item pattern[],
173 const struct rte_flow_action actions[],
174 struct rte_eth_ntuple_filter *filter,
175 struct rte_flow_error *error)
177 const struct rte_flow_item *item;
178 const struct rte_flow_action *act;
179 const struct rte_flow_item_ipv4 *ipv4_spec;
180 const struct rte_flow_item_ipv4 *ipv4_mask;
181 const struct rte_flow_item_tcp *tcp_spec;
182 const struct rte_flow_item_tcp *tcp_mask;
183 const struct rte_flow_item_udp *udp_spec;
184 const struct rte_flow_item_udp *udp_mask;
188 rte_flow_error_set(error,
189 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
190 NULL, "NULL pattern.");
195 rte_flow_error_set(error, EINVAL,
196 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
197 NULL, "NULL action.");
201 rte_flow_error_set(error, EINVAL,
202 RTE_FLOW_ERROR_TYPE_ATTR,
203 NULL, "NULL attribute.");
210 /* the first not void item can be MAC or IPv4 */
211 NEXT_ITEM_OF_PATTERN(item, pattern, index);
213 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
214 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
215 rte_flow_error_set(error, EINVAL,
216 RTE_FLOW_ERROR_TYPE_ITEM,
217 item, "Not supported by ntuple filter");
221 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
222 /*Not supported last point for range*/
224 rte_flow_error_set(error,
226 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
227 item, "Not supported last point for range");
231 /* if the first item is MAC, the content should be NULL */
232 if (item->spec || item->mask) {
233 rte_flow_error_set(error, EINVAL,
234 RTE_FLOW_ERROR_TYPE_ITEM,
235 item, "Not supported by ntuple filter");
238 /* check if the next not void item is IPv4 */
240 NEXT_ITEM_OF_PATTERN(item, pattern, index);
241 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
242 rte_flow_error_set(error,
243 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
244 item, "Not supported by ntuple filter");
249 /* get the IPv4 info */
250 if (!item->spec || !item->mask) {
251 rte_flow_error_set(error, EINVAL,
252 RTE_FLOW_ERROR_TYPE_ITEM,
253 item, "Invalid ntuple mask");
256 /*Not supported last point for range*/
258 rte_flow_error_set(error, EINVAL,
259 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
260 item, "Not supported last point for range");
265 ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
267 * Only support src & dst addresses, protocol,
268 * others should be masked.
270 if (ipv4_mask->hdr.version_ihl ||
271 ipv4_mask->hdr.type_of_service ||
272 ipv4_mask->hdr.total_length ||
273 ipv4_mask->hdr.packet_id ||
274 ipv4_mask->hdr.fragment_offset ||
275 ipv4_mask->hdr.time_to_live ||
276 ipv4_mask->hdr.hdr_checksum) {
277 rte_flow_error_set(error,
278 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
279 item, "Not supported by ntuple filter");
283 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
284 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
285 filter->proto_mask = ipv4_mask->hdr.next_proto_id;
287 ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
288 filter->dst_ip = ipv4_spec->hdr.dst_addr;
289 filter->src_ip = ipv4_spec->hdr.src_addr;
290 filter->proto = ipv4_spec->hdr.next_proto_id;
292 /* check if the next not void item is TCP or UDP */
294 NEXT_ITEM_OF_PATTERN(item, pattern, index);
295 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
296 item->type != RTE_FLOW_ITEM_TYPE_UDP) {
297 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
298 rte_flow_error_set(error, EINVAL,
299 RTE_FLOW_ERROR_TYPE_ITEM,
300 item, "Not supported by ntuple filter");
304 /* get the TCP/UDP info */
305 if (!item->spec || !item->mask) {
306 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
307 rte_flow_error_set(error, EINVAL,
308 RTE_FLOW_ERROR_TYPE_ITEM,
309 item, "Invalid ntuple mask");
313 /*Not supported last point for range*/
315 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
316 rte_flow_error_set(error, EINVAL,
317 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
318 item, "Not supported last point for range");
323 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
324 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
327 * Only support src & dst ports, tcp flags,
328 * others should be masked.
330 if (tcp_mask->hdr.sent_seq ||
331 tcp_mask->hdr.recv_ack ||
332 tcp_mask->hdr.data_off ||
333 tcp_mask->hdr.rx_win ||
334 tcp_mask->hdr.cksum ||
335 tcp_mask->hdr.tcp_urp) {
337 sizeof(struct rte_eth_ntuple_filter));
338 rte_flow_error_set(error, EINVAL,
339 RTE_FLOW_ERROR_TYPE_ITEM,
340 item, "Not supported by ntuple filter");
344 filter->dst_port_mask = tcp_mask->hdr.dst_port;
345 filter->src_port_mask = tcp_mask->hdr.src_port;
346 if (tcp_mask->hdr.tcp_flags == 0xFF) {
347 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
348 } else if (!tcp_mask->hdr.tcp_flags) {
349 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
351 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
352 rte_flow_error_set(error, EINVAL,
353 RTE_FLOW_ERROR_TYPE_ITEM,
354 item, "Not supported by ntuple filter");
358 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
359 filter->dst_port = tcp_spec->hdr.dst_port;
360 filter->src_port = tcp_spec->hdr.src_port;
361 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
363 udp_mask = (const struct rte_flow_item_udp *)item->mask;
366 * Only support src & dst ports,
367 * others should be masked.
369 if (udp_mask->hdr.dgram_len ||
370 udp_mask->hdr.dgram_cksum) {
372 sizeof(struct rte_eth_ntuple_filter));
373 rte_flow_error_set(error, EINVAL,
374 RTE_FLOW_ERROR_TYPE_ITEM,
375 item, "Not supported by ntuple filter");
379 filter->dst_port_mask = udp_mask->hdr.dst_port;
380 filter->src_port_mask = udp_mask->hdr.src_port;
382 udp_spec = (const struct rte_flow_item_udp *)item->spec;
383 filter->dst_port = udp_spec->hdr.dst_port;
384 filter->src_port = udp_spec->hdr.src_port;
387 /* check if the next not void item is END */
389 NEXT_ITEM_OF_PATTERN(item, pattern, index);
390 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
391 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
392 rte_flow_error_set(error, EINVAL,
393 RTE_FLOW_ERROR_TYPE_ITEM,
394 item, "Not supported by ntuple filter");
402 * n-tuple only supports forwarding,
403 * check if the first not void action is QUEUE.
405 NEXT_ITEM_OF_ACTION(act, actions, index);
406 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
407 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
408 rte_flow_error_set(error, EINVAL,
409 RTE_FLOW_ERROR_TYPE_ACTION,
410 item, "Not supported action.");
414 ((const struct rte_flow_action_queue *)act->conf)->index;
416 /* check if the next not void item is END */
418 NEXT_ITEM_OF_ACTION(act, actions, index);
419 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
420 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
421 rte_flow_error_set(error, EINVAL,
422 RTE_FLOW_ERROR_TYPE_ACTION,
423 act, "Not supported action.");
428 /* must be input direction */
429 if (!attr->ingress) {
430 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
431 rte_flow_error_set(error, EINVAL,
432 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
433 attr, "Only support ingress.");
439 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
440 rte_flow_error_set(error, EINVAL,
441 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
442 attr, "Not support egress.");
446 if (attr->priority > 0xFFFF) {
447 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
448 rte_flow_error_set(error, EINVAL,
449 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
450 attr, "Error priority.");
453 filter->priority = (uint16_t)attr->priority;
454 if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
455 attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
456 filter->priority = 1;
461 /* a specific function for ixgbe because the flags is specific */
463 ixgbe_parse_ntuple_filter(const struct rte_flow_attr *attr,
464 const struct rte_flow_item pattern[],
465 const struct rte_flow_action actions[],
466 struct rte_eth_ntuple_filter *filter,
467 struct rte_flow_error *error)
471 ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
476 /* Ixgbe doesn't support tcp flags. */
477 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
478 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
479 rte_flow_error_set(error, EINVAL,
480 RTE_FLOW_ERROR_TYPE_ITEM,
481 NULL, "Not supported by ntuple filter");
485 /* Ixgbe doesn't support many priorities. */
486 if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
487 filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
488 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
489 rte_flow_error_set(error, EINVAL,
490 RTE_FLOW_ERROR_TYPE_ITEM,
491 NULL, "Priority not supported by ntuple filter");
495 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
496 filter->priority > IXGBE_5TUPLE_MAX_PRI ||
497 filter->priority < IXGBE_5TUPLE_MIN_PRI)
500 /* fixed value for ixgbe */
501 filter->flags = RTE_5TUPLE_FLAGS;
506 * Parse the rule to see if it is a ethertype rule.
507 * And get the ethertype filter info BTW.
509 * The first not void item can be ETH.
510 * The next not void item must be END.
512 * The first not void action should be QUEUE.
513 * The next not void action should be END.
516 * ETH type 0x0807 0xFFFF
518 * other members in mask and spec should set to 0x00.
519 * item->last should be NULL.
522 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
523 const struct rte_flow_item *pattern,
524 const struct rte_flow_action *actions,
525 struct rte_eth_ethertype_filter *filter,
526 struct rte_flow_error *error)
528 const struct rte_flow_item *item;
529 const struct rte_flow_action *act;
530 const struct rte_flow_item_eth *eth_spec;
531 const struct rte_flow_item_eth *eth_mask;
532 const struct rte_flow_action_queue *act_q;
536 rte_flow_error_set(error, EINVAL,
537 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
538 NULL, "NULL pattern.");
543 rte_flow_error_set(error, EINVAL,
544 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
545 NULL, "NULL action.");
550 rte_flow_error_set(error, EINVAL,
551 RTE_FLOW_ERROR_TYPE_ATTR,
552 NULL, "NULL attribute.");
559 /* The first non-void item should be MAC. */
560 item = pattern + index;
561 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
563 item = pattern + index;
565 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
566 rte_flow_error_set(error, EINVAL,
567 RTE_FLOW_ERROR_TYPE_ITEM,
568 item, "Not supported by ethertype filter");
572 /*Not supported last point for range*/
574 rte_flow_error_set(error, EINVAL,
575 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
576 item, "Not supported last point for range");
580 /* Get the MAC info. */
581 if (!item->spec || !item->mask) {
582 rte_flow_error_set(error, EINVAL,
583 RTE_FLOW_ERROR_TYPE_ITEM,
584 item, "Not supported by ethertype filter");
588 eth_spec = (const struct rte_flow_item_eth *)item->spec;
589 eth_mask = (const struct rte_flow_item_eth *)item->mask;
591 /* Mask bits of source MAC address must be full of 0.
592 * Mask bits of destination MAC address must be full
595 if (!is_zero_ether_addr(ð_mask->src) ||
596 (!is_zero_ether_addr(ð_mask->dst) &&
597 !is_broadcast_ether_addr(ð_mask->dst))) {
598 rte_flow_error_set(error, EINVAL,
599 RTE_FLOW_ERROR_TYPE_ITEM,
600 item, "Invalid ether address mask");
604 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
605 rte_flow_error_set(error, EINVAL,
606 RTE_FLOW_ERROR_TYPE_ITEM,
607 item, "Invalid ethertype mask");
611 /* If mask bits of destination MAC address
612 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
614 if (is_broadcast_ether_addr(ð_mask->dst)) {
615 filter->mac_addr = eth_spec->dst;
616 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
618 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
620 filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
622 /* Check if the next non-void item is END. */
624 item = pattern + index;
625 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
627 item = pattern + index;
629 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
630 rte_flow_error_set(error, EINVAL,
631 RTE_FLOW_ERROR_TYPE_ITEM,
632 item, "Not supported by ethertype filter.");
639 /* Check if the first non-void action is QUEUE or DROP. */
640 act = actions + index;
641 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
643 act = actions + index;
645 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
646 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
647 rte_flow_error_set(error, EINVAL,
648 RTE_FLOW_ERROR_TYPE_ACTION,
649 act, "Not supported action.");
653 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
654 act_q = (const struct rte_flow_action_queue *)act->conf;
655 filter->queue = act_q->index;
657 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
660 /* Check if the next non-void item is END */
662 act = actions + index;
663 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
665 act = actions + index;
667 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
668 rte_flow_error_set(error, EINVAL,
669 RTE_FLOW_ERROR_TYPE_ACTION,
670 act, "Not supported action.");
675 /* Must be input direction */
676 if (!attr->ingress) {
677 rte_flow_error_set(error, EINVAL,
678 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
679 attr, "Only support ingress.");
685 rte_flow_error_set(error, EINVAL,
686 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
687 attr, "Not support egress.");
692 if (attr->priority) {
693 rte_flow_error_set(error, EINVAL,
694 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
695 attr, "Not support priority.");
701 rte_flow_error_set(error, EINVAL,
702 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
703 attr, "Not support group.");
711 ixgbe_parse_ethertype_filter(const struct rte_flow_attr *attr,
712 const struct rte_flow_item pattern[],
713 const struct rte_flow_action actions[],
714 struct rte_eth_ethertype_filter *filter,
715 struct rte_flow_error *error)
719 ret = cons_parse_ethertype_filter(attr, pattern,
720 actions, filter, error);
725 /* Ixgbe doesn't support MAC address. */
726 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
727 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
728 rte_flow_error_set(error, EINVAL,
729 RTE_FLOW_ERROR_TYPE_ITEM,
730 NULL, "Not supported by ethertype filter");
734 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
735 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
736 rte_flow_error_set(error, EINVAL,
737 RTE_FLOW_ERROR_TYPE_ITEM,
738 NULL, "queue index much too big");
742 if (filter->ether_type == ETHER_TYPE_IPv4 ||
743 filter->ether_type == ETHER_TYPE_IPv6) {
744 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
745 rte_flow_error_set(error, EINVAL,
746 RTE_FLOW_ERROR_TYPE_ITEM,
747 NULL, "IPv4/IPv6 not supported by ethertype filter");
751 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
752 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
753 rte_flow_error_set(error, EINVAL,
754 RTE_FLOW_ERROR_TYPE_ITEM,
755 NULL, "mac compare is unsupported");
759 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
760 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
761 rte_flow_error_set(error, EINVAL,
762 RTE_FLOW_ERROR_TYPE_ITEM,
763 NULL, "drop option is unsupported");
771 * Check if the flow rule is supported by ixgbe.
772 * It only checkes the format. Don't guarantee the rule can be programmed into
773 * the HW. Because there can be no enough room for the rule.
776 ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
777 const struct rte_flow_attr *attr,
778 const struct rte_flow_item pattern[],
779 const struct rte_flow_action actions[],
780 struct rte_flow_error *error)
782 struct rte_eth_ntuple_filter ntuple_filter;
783 struct rte_eth_ethertype_filter ethertype_filter;
786 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
787 ret = ixgbe_parse_ntuple_filter(attr, pattern,
788 actions, &ntuple_filter, error);
792 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
793 ret = ixgbe_parse_ethertype_filter(attr, pattern,
794 actions, ðertype_filter, error);
801 /* Destroy all flow rules associated with a port on ixgbe. */
803 ixgbe_flow_flush(struct rte_eth_dev *dev,
804 struct rte_flow_error *error)
808 ixgbe_clear_all_ntuple_filter(dev);
809 ixgbe_clear_all_ethertype_filter(dev);
810 ixgbe_clear_syn_filter(dev);
812 ret = ixgbe_clear_all_fdir_filter(dev);
814 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
815 NULL, "Failed to flush rule");
819 ret = ixgbe_clear_all_l2_tn_filter(dev);
821 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
822 NULL, "Failed to flush rule");