4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
47 #include <rte_interrupts.h>
49 #include <rte_debug.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_atomic.h>
60 #include <rte_malloc.h>
61 #include <rte_random.h>
63 #include <rte_hash_crc.h>
65 #include <rte_flow_driver.h>
67 #include "ixgbe_logs.h"
68 #include "base/ixgbe_api.h"
69 #include "base/ixgbe_vf.h"
70 #include "base/ixgbe_common.h"
71 #include "ixgbe_ethdev.h"
72 #include "ixgbe_bypass.h"
73 #include "ixgbe_rxtx.h"
74 #include "base/ixgbe_type.h"
75 #include "base/ixgbe_phy.h"
76 #include "rte_pmd_ixgbe.h"
78 static int ixgbe_flow_flush(struct rte_eth_dev *dev,
79 struct rte_flow_error *error);
81 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
82 const struct rte_flow_item pattern[],
83 const struct rte_flow_action actions[],
84 struct rte_eth_ntuple_filter *filter,
85 struct rte_flow_error *error);
87 ixgbe_parse_ntuple_filter(const struct rte_flow_attr *attr,
88 const struct rte_flow_item pattern[],
89 const struct rte_flow_action actions[],
90 struct rte_eth_ntuple_filter *filter,
91 struct rte_flow_error *error);
93 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
94 const struct rte_flow_item *pattern,
95 const struct rte_flow_action *actions,
96 struct rte_eth_ethertype_filter *filter,
97 struct rte_flow_error *error);
99 ixgbe_parse_ethertype_filter(const struct rte_flow_attr *attr,
100 const struct rte_flow_item pattern[],
101 const struct rte_flow_action actions[],
102 struct rte_eth_ethertype_filter *filter,
103 struct rte_flow_error *error);
105 cons_parse_syn_filter(const struct rte_flow_attr *attr,
106 const struct rte_flow_item pattern[],
107 const struct rte_flow_action actions[],
108 struct rte_eth_syn_filter *filter,
109 struct rte_flow_error *error);
111 ixgbe_parse_syn_filter(const struct rte_flow_attr *attr,
112 const struct rte_flow_item pattern[],
113 const struct rte_flow_action actions[],
114 struct rte_eth_syn_filter *filter,
115 struct rte_flow_error *error);
117 ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
118 const struct rte_flow_attr *attr,
119 const struct rte_flow_item pattern[],
120 const struct rte_flow_action actions[],
121 struct rte_flow_error *error);
123 const struct rte_flow_ops ixgbe_flow_ops = {
131 #define IXGBE_MIN_N_TUPLE_PRIO 1
132 #define IXGBE_MAX_N_TUPLE_PRIO 7
133 #define NEXT_ITEM_OF_PATTERN(item, pattern, index)\
135 item = pattern + index;\
136 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\
138 item = pattern + index; \
142 #define NEXT_ITEM_OF_ACTION(act, actions, index)\
144 act = actions + index; \
145 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
147 act = actions + index; \
152 * Please aware there's an asumption for all the parsers.
153 * rte_flow_item is using big endian, rte_flow_attr and
154 * rte_flow_action are using CPU order.
155 * Because the pattern is used to describe the packets,
156 * normally the packets should use network order.
160 * Parse the rule to see if it is a n-tuple rule.
161 * And get the n-tuple filter info BTW.
163 * The first not void item can be ETH or IPV4.
164 * The second not void item must be IPV4 if the first one is ETH.
165 * The third not void item must be UDP or TCP.
166 * The next not void item must be END.
168 * The first not void action should be QUEUE.
169 * The next not void action should be END.
173 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
174 * dst_addr 192.167.3.50 0xFFFFFFFF
175 * next_proto_id 17 0xFF
176 * UDP/TCP src_port 80 0xFFFF
179 * other members in mask and spec should set to 0x00.
180 * item->last should be NULL.
183 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
184 const struct rte_flow_item pattern[],
185 const struct rte_flow_action actions[],
186 struct rte_eth_ntuple_filter *filter,
187 struct rte_flow_error *error)
189 const struct rte_flow_item *item;
190 const struct rte_flow_action *act;
191 const struct rte_flow_item_ipv4 *ipv4_spec;
192 const struct rte_flow_item_ipv4 *ipv4_mask;
193 const struct rte_flow_item_tcp *tcp_spec;
194 const struct rte_flow_item_tcp *tcp_mask;
195 const struct rte_flow_item_udp *udp_spec;
196 const struct rte_flow_item_udp *udp_mask;
200 rte_flow_error_set(error,
201 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
202 NULL, "NULL pattern.");
207 rte_flow_error_set(error, EINVAL,
208 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
209 NULL, "NULL action.");
213 rte_flow_error_set(error, EINVAL,
214 RTE_FLOW_ERROR_TYPE_ATTR,
215 NULL, "NULL attribute.");
222 /* the first not void item can be MAC or IPv4 */
223 NEXT_ITEM_OF_PATTERN(item, pattern, index);
225 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
226 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
227 rte_flow_error_set(error, EINVAL,
228 RTE_FLOW_ERROR_TYPE_ITEM,
229 item, "Not supported by ntuple filter");
233 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
234 /*Not supported last point for range*/
236 rte_flow_error_set(error,
238 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
239 item, "Not supported last point for range");
243 /* if the first item is MAC, the content should be NULL */
244 if (item->spec || item->mask) {
245 rte_flow_error_set(error, EINVAL,
246 RTE_FLOW_ERROR_TYPE_ITEM,
247 item, "Not supported by ntuple filter");
250 /* check if the next not void item is IPv4 */
252 NEXT_ITEM_OF_PATTERN(item, pattern, index);
253 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
254 rte_flow_error_set(error,
255 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
256 item, "Not supported by ntuple filter");
261 /* get the IPv4 info */
262 if (!item->spec || !item->mask) {
263 rte_flow_error_set(error, EINVAL,
264 RTE_FLOW_ERROR_TYPE_ITEM,
265 item, "Invalid ntuple mask");
268 /*Not supported last point for range*/
270 rte_flow_error_set(error, EINVAL,
271 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
272 item, "Not supported last point for range");
277 ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
279 * Only support src & dst addresses, protocol,
280 * others should be masked.
282 if (ipv4_mask->hdr.version_ihl ||
283 ipv4_mask->hdr.type_of_service ||
284 ipv4_mask->hdr.total_length ||
285 ipv4_mask->hdr.packet_id ||
286 ipv4_mask->hdr.fragment_offset ||
287 ipv4_mask->hdr.time_to_live ||
288 ipv4_mask->hdr.hdr_checksum) {
289 rte_flow_error_set(error,
290 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
291 item, "Not supported by ntuple filter");
295 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
296 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
297 filter->proto_mask = ipv4_mask->hdr.next_proto_id;
299 ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
300 filter->dst_ip = ipv4_spec->hdr.dst_addr;
301 filter->src_ip = ipv4_spec->hdr.src_addr;
302 filter->proto = ipv4_spec->hdr.next_proto_id;
304 /* check if the next not void item is TCP or UDP */
306 NEXT_ITEM_OF_PATTERN(item, pattern, index);
307 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
308 item->type != RTE_FLOW_ITEM_TYPE_UDP) {
309 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
310 rte_flow_error_set(error, EINVAL,
311 RTE_FLOW_ERROR_TYPE_ITEM,
312 item, "Not supported by ntuple filter");
316 /* get the TCP/UDP info */
317 if (!item->spec || !item->mask) {
318 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
319 rte_flow_error_set(error, EINVAL,
320 RTE_FLOW_ERROR_TYPE_ITEM,
321 item, "Invalid ntuple mask");
325 /*Not supported last point for range*/
327 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
328 rte_flow_error_set(error, EINVAL,
329 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
330 item, "Not supported last point for range");
335 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
336 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
339 * Only support src & dst ports, tcp flags,
340 * others should be masked.
342 if (tcp_mask->hdr.sent_seq ||
343 tcp_mask->hdr.recv_ack ||
344 tcp_mask->hdr.data_off ||
345 tcp_mask->hdr.rx_win ||
346 tcp_mask->hdr.cksum ||
347 tcp_mask->hdr.tcp_urp) {
349 sizeof(struct rte_eth_ntuple_filter));
350 rte_flow_error_set(error, EINVAL,
351 RTE_FLOW_ERROR_TYPE_ITEM,
352 item, "Not supported by ntuple filter");
356 filter->dst_port_mask = tcp_mask->hdr.dst_port;
357 filter->src_port_mask = tcp_mask->hdr.src_port;
358 if (tcp_mask->hdr.tcp_flags == 0xFF) {
359 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
360 } else if (!tcp_mask->hdr.tcp_flags) {
361 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
363 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
364 rte_flow_error_set(error, EINVAL,
365 RTE_FLOW_ERROR_TYPE_ITEM,
366 item, "Not supported by ntuple filter");
370 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
371 filter->dst_port = tcp_spec->hdr.dst_port;
372 filter->src_port = tcp_spec->hdr.src_port;
373 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
375 udp_mask = (const struct rte_flow_item_udp *)item->mask;
378 * Only support src & dst ports,
379 * others should be masked.
381 if (udp_mask->hdr.dgram_len ||
382 udp_mask->hdr.dgram_cksum) {
384 sizeof(struct rte_eth_ntuple_filter));
385 rte_flow_error_set(error, EINVAL,
386 RTE_FLOW_ERROR_TYPE_ITEM,
387 item, "Not supported by ntuple filter");
391 filter->dst_port_mask = udp_mask->hdr.dst_port;
392 filter->src_port_mask = udp_mask->hdr.src_port;
394 udp_spec = (const struct rte_flow_item_udp *)item->spec;
395 filter->dst_port = udp_spec->hdr.dst_port;
396 filter->src_port = udp_spec->hdr.src_port;
399 /* check if the next not void item is END */
401 NEXT_ITEM_OF_PATTERN(item, pattern, index);
402 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
403 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
404 rte_flow_error_set(error, EINVAL,
405 RTE_FLOW_ERROR_TYPE_ITEM,
406 item, "Not supported by ntuple filter");
414 * n-tuple only supports forwarding,
415 * check if the first not void action is QUEUE.
417 NEXT_ITEM_OF_ACTION(act, actions, index);
418 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
419 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
420 rte_flow_error_set(error, EINVAL,
421 RTE_FLOW_ERROR_TYPE_ACTION,
422 item, "Not supported action.");
426 ((const struct rte_flow_action_queue *)act->conf)->index;
428 /* check if the next not void item is END */
430 NEXT_ITEM_OF_ACTION(act, actions, index);
431 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
432 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
433 rte_flow_error_set(error, EINVAL,
434 RTE_FLOW_ERROR_TYPE_ACTION,
435 act, "Not supported action.");
440 /* must be input direction */
441 if (!attr->ingress) {
442 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
443 rte_flow_error_set(error, EINVAL,
444 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
445 attr, "Only support ingress.");
451 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
452 rte_flow_error_set(error, EINVAL,
453 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
454 attr, "Not support egress.");
458 if (attr->priority > 0xFFFF) {
459 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
460 rte_flow_error_set(error, EINVAL,
461 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
462 attr, "Error priority.");
465 filter->priority = (uint16_t)attr->priority;
466 if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
467 attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
468 filter->priority = 1;
473 /* a specific function for ixgbe because the flags is specific */
475 ixgbe_parse_ntuple_filter(const struct rte_flow_attr *attr,
476 const struct rte_flow_item pattern[],
477 const struct rte_flow_action actions[],
478 struct rte_eth_ntuple_filter *filter,
479 struct rte_flow_error *error)
483 ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
488 /* Ixgbe doesn't support tcp flags. */
489 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
490 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
491 rte_flow_error_set(error, EINVAL,
492 RTE_FLOW_ERROR_TYPE_ITEM,
493 NULL, "Not supported by ntuple filter");
497 /* Ixgbe doesn't support many priorities. */
498 if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
499 filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
500 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
501 rte_flow_error_set(error, EINVAL,
502 RTE_FLOW_ERROR_TYPE_ITEM,
503 NULL, "Priority not supported by ntuple filter");
507 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
508 filter->priority > IXGBE_5TUPLE_MAX_PRI ||
509 filter->priority < IXGBE_5TUPLE_MIN_PRI)
512 /* fixed value for ixgbe */
513 filter->flags = RTE_5TUPLE_FLAGS;
518 * Parse the rule to see if it is a ethertype rule.
519 * And get the ethertype filter info BTW.
521 * The first not void item can be ETH.
522 * The next not void item must be END.
524 * The first not void action should be QUEUE.
525 * The next not void action should be END.
528 * ETH type 0x0807 0xFFFF
530 * other members in mask and spec should set to 0x00.
531 * item->last should be NULL.
534 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
535 const struct rte_flow_item *pattern,
536 const struct rte_flow_action *actions,
537 struct rte_eth_ethertype_filter *filter,
538 struct rte_flow_error *error)
540 const struct rte_flow_item *item;
541 const struct rte_flow_action *act;
542 const struct rte_flow_item_eth *eth_spec;
543 const struct rte_flow_item_eth *eth_mask;
544 const struct rte_flow_action_queue *act_q;
548 rte_flow_error_set(error, EINVAL,
549 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
550 NULL, "NULL pattern.");
555 rte_flow_error_set(error, EINVAL,
556 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
557 NULL, "NULL action.");
562 rte_flow_error_set(error, EINVAL,
563 RTE_FLOW_ERROR_TYPE_ATTR,
564 NULL, "NULL attribute.");
571 /* The first non-void item should be MAC. */
572 item = pattern + index;
573 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
575 item = pattern + index;
577 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
578 rte_flow_error_set(error, EINVAL,
579 RTE_FLOW_ERROR_TYPE_ITEM,
580 item, "Not supported by ethertype filter");
584 /*Not supported last point for range*/
586 rte_flow_error_set(error, EINVAL,
587 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
588 item, "Not supported last point for range");
592 /* Get the MAC info. */
593 if (!item->spec || !item->mask) {
594 rte_flow_error_set(error, EINVAL,
595 RTE_FLOW_ERROR_TYPE_ITEM,
596 item, "Not supported by ethertype filter");
600 eth_spec = (const struct rte_flow_item_eth *)item->spec;
601 eth_mask = (const struct rte_flow_item_eth *)item->mask;
603 /* Mask bits of source MAC address must be full of 0.
604 * Mask bits of destination MAC address must be full
607 if (!is_zero_ether_addr(ð_mask->src) ||
608 (!is_zero_ether_addr(ð_mask->dst) &&
609 !is_broadcast_ether_addr(ð_mask->dst))) {
610 rte_flow_error_set(error, EINVAL,
611 RTE_FLOW_ERROR_TYPE_ITEM,
612 item, "Invalid ether address mask");
616 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
617 rte_flow_error_set(error, EINVAL,
618 RTE_FLOW_ERROR_TYPE_ITEM,
619 item, "Invalid ethertype mask");
623 /* If mask bits of destination MAC address
624 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
626 if (is_broadcast_ether_addr(ð_mask->dst)) {
627 filter->mac_addr = eth_spec->dst;
628 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
630 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
632 filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
634 /* Check if the next non-void item is END. */
636 item = pattern + index;
637 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
639 item = pattern + index;
641 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
642 rte_flow_error_set(error, EINVAL,
643 RTE_FLOW_ERROR_TYPE_ITEM,
644 item, "Not supported by ethertype filter.");
651 /* Check if the first non-void action is QUEUE or DROP. */
652 act = actions + index;
653 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
655 act = actions + index;
657 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
658 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
659 rte_flow_error_set(error, EINVAL,
660 RTE_FLOW_ERROR_TYPE_ACTION,
661 act, "Not supported action.");
665 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
666 act_q = (const struct rte_flow_action_queue *)act->conf;
667 filter->queue = act_q->index;
669 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
672 /* Check if the next non-void item is END */
674 act = actions + index;
675 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
677 act = actions + index;
679 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
680 rte_flow_error_set(error, EINVAL,
681 RTE_FLOW_ERROR_TYPE_ACTION,
682 act, "Not supported action.");
687 /* Must be input direction */
688 if (!attr->ingress) {
689 rte_flow_error_set(error, EINVAL,
690 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
691 attr, "Only support ingress.");
697 rte_flow_error_set(error, EINVAL,
698 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
699 attr, "Not support egress.");
704 if (attr->priority) {
705 rte_flow_error_set(error, EINVAL,
706 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
707 attr, "Not support priority.");
713 rte_flow_error_set(error, EINVAL,
714 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
715 attr, "Not support group.");
723 ixgbe_parse_ethertype_filter(const struct rte_flow_attr *attr,
724 const struct rte_flow_item pattern[],
725 const struct rte_flow_action actions[],
726 struct rte_eth_ethertype_filter *filter,
727 struct rte_flow_error *error)
731 ret = cons_parse_ethertype_filter(attr, pattern,
732 actions, filter, error);
737 /* Ixgbe doesn't support MAC address. */
738 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
739 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
740 rte_flow_error_set(error, EINVAL,
741 RTE_FLOW_ERROR_TYPE_ITEM,
742 NULL, "Not supported by ethertype filter");
746 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
747 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
748 rte_flow_error_set(error, EINVAL,
749 RTE_FLOW_ERROR_TYPE_ITEM,
750 NULL, "queue index much too big");
754 if (filter->ether_type == ETHER_TYPE_IPv4 ||
755 filter->ether_type == ETHER_TYPE_IPv6) {
756 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
757 rte_flow_error_set(error, EINVAL,
758 RTE_FLOW_ERROR_TYPE_ITEM,
759 NULL, "IPv4/IPv6 not supported by ethertype filter");
763 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
764 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
765 rte_flow_error_set(error, EINVAL,
766 RTE_FLOW_ERROR_TYPE_ITEM,
767 NULL, "mac compare is unsupported");
771 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
772 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
773 rte_flow_error_set(error, EINVAL,
774 RTE_FLOW_ERROR_TYPE_ITEM,
775 NULL, "drop option is unsupported");
783 * Parse the rule to see if it is a TCP SYN rule.
784 * And get the TCP SYN filter info BTW.
786 * The first not void item must be ETH.
787 * The second not void item must be IPV4 or IPV6.
788 * The third not void item must be TCP.
789 * The next not void item must be END.
791 * The first not void action should be QUEUE.
792 * The next not void action should be END.
796 * IPV4/IPV6 NULL NULL
797 * TCP tcp_flags 0x02 0xFF
799 * other members in mask and spec should set to 0x00.
800 * item->last should be NULL.
803 cons_parse_syn_filter(const struct rte_flow_attr *attr,
804 const struct rte_flow_item pattern[],
805 const struct rte_flow_action actions[],
806 struct rte_eth_syn_filter *filter,
807 struct rte_flow_error *error)
809 const struct rte_flow_item *item;
810 const struct rte_flow_action *act;
811 const struct rte_flow_item_tcp *tcp_spec;
812 const struct rte_flow_item_tcp *tcp_mask;
813 const struct rte_flow_action_queue *act_q;
817 rte_flow_error_set(error, EINVAL,
818 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
819 NULL, "NULL pattern.");
824 rte_flow_error_set(error, EINVAL,
825 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
826 NULL, "NULL action.");
831 rte_flow_error_set(error, EINVAL,
832 RTE_FLOW_ERROR_TYPE_ATTR,
833 NULL, "NULL attribute.");
840 /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
841 NEXT_ITEM_OF_PATTERN(item, pattern, index);
842 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
843 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
844 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
845 item->type != RTE_FLOW_ITEM_TYPE_TCP) {
846 rte_flow_error_set(error, EINVAL,
847 RTE_FLOW_ERROR_TYPE_ITEM,
848 item, "Not supported by syn filter");
851 /*Not supported last point for range*/
853 rte_flow_error_set(error, EINVAL,
854 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
855 item, "Not supported last point for range");
860 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
861 /* if the item is MAC, the content should be NULL */
862 if (item->spec || item->mask) {
863 rte_flow_error_set(error, EINVAL,
864 RTE_FLOW_ERROR_TYPE_ITEM,
865 item, "Invalid SYN address mask");
869 /* check if the next not void item is IPv4 or IPv6 */
871 NEXT_ITEM_OF_PATTERN(item, pattern, index);
872 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
873 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
874 rte_flow_error_set(error, EINVAL,
875 RTE_FLOW_ERROR_TYPE_ITEM,
876 item, "Not supported by syn filter");
882 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
883 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
884 /* if the item is IP, the content should be NULL */
885 if (item->spec || item->mask) {
886 rte_flow_error_set(error, EINVAL,
887 RTE_FLOW_ERROR_TYPE_ITEM,
888 item, "Invalid SYN mask");
892 /* check if the next not void item is TCP */
894 NEXT_ITEM_OF_PATTERN(item, pattern, index);
895 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
896 rte_flow_error_set(error, EINVAL,
897 RTE_FLOW_ERROR_TYPE_ITEM,
898 item, "Not supported by syn filter");
903 /* Get the TCP info. Only support SYN. */
904 if (!item->spec || !item->mask) {
905 rte_flow_error_set(error, EINVAL,
906 RTE_FLOW_ERROR_TYPE_ITEM,
907 item, "Invalid SYN mask");
910 /*Not supported last point for range*/
912 rte_flow_error_set(error, EINVAL,
913 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
914 item, "Not supported last point for range");
918 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
919 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
920 if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
921 tcp_mask->hdr.src_port ||
922 tcp_mask->hdr.dst_port ||
923 tcp_mask->hdr.sent_seq ||
924 tcp_mask->hdr.recv_ack ||
925 tcp_mask->hdr.data_off ||
926 tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
927 tcp_mask->hdr.rx_win ||
928 tcp_mask->hdr.cksum ||
929 tcp_mask->hdr.tcp_urp) {
930 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
931 rte_flow_error_set(error, EINVAL,
932 RTE_FLOW_ERROR_TYPE_ITEM,
933 item, "Not supported by syn filter");
937 /* check if the next not void item is END */
939 NEXT_ITEM_OF_PATTERN(item, pattern, index);
940 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
941 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
942 rte_flow_error_set(error, EINVAL,
943 RTE_FLOW_ERROR_TYPE_ITEM,
944 item, "Not supported by syn filter");
951 /* check if the first not void action is QUEUE. */
952 NEXT_ITEM_OF_ACTION(act, actions, index);
953 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
954 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
955 rte_flow_error_set(error, EINVAL,
956 RTE_FLOW_ERROR_TYPE_ACTION,
957 act, "Not supported action.");
961 act_q = (const struct rte_flow_action_queue *)act->conf;
962 filter->queue = act_q->index;
963 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
964 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
965 rte_flow_error_set(error, EINVAL,
966 RTE_FLOW_ERROR_TYPE_ACTION,
967 act, "Not supported action.");
971 /* check if the next not void item is END */
973 NEXT_ITEM_OF_ACTION(act, actions, index);
974 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
975 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
976 rte_flow_error_set(error, EINVAL,
977 RTE_FLOW_ERROR_TYPE_ACTION,
978 act, "Not supported action.");
983 /* must be input direction */
984 if (!attr->ingress) {
985 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
986 rte_flow_error_set(error, EINVAL,
987 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
988 attr, "Only support ingress.");
994 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
995 rte_flow_error_set(error, EINVAL,
996 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
997 attr, "Not support egress.");
1001 /* Support 2 priorities, the lowest or highest. */
1002 if (!attr->priority) {
1003 filter->hig_pri = 0;
1004 } else if (attr->priority == (uint32_t)~0U) {
1005 filter->hig_pri = 1;
1007 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1008 rte_flow_error_set(error, EINVAL,
1009 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1010 attr, "Not support priority.");
1018 ixgbe_parse_syn_filter(const struct rte_flow_attr *attr,
1019 const struct rte_flow_item pattern[],
1020 const struct rte_flow_action actions[],
1021 struct rte_eth_syn_filter *filter,
1022 struct rte_flow_error *error)
1026 ret = cons_parse_syn_filter(attr, pattern,
1027 actions, filter, error);
1036 * Check if the flow rule is supported by ixgbe.
1037 * It only checkes the format. Don't guarantee the rule can be programmed into
1038 * the HW. Because there can be no enough room for the rule.
1041 ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
1042 const struct rte_flow_attr *attr,
1043 const struct rte_flow_item pattern[],
1044 const struct rte_flow_action actions[],
1045 struct rte_flow_error *error)
1047 struct rte_eth_ntuple_filter ntuple_filter;
1048 struct rte_eth_ethertype_filter ethertype_filter;
1049 struct rte_eth_syn_filter syn_filter;
1052 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
1053 ret = ixgbe_parse_ntuple_filter(attr, pattern,
1054 actions, &ntuple_filter, error);
1058 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
1059 ret = ixgbe_parse_ethertype_filter(attr, pattern,
1060 actions, ðertype_filter, error);
1064 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
1065 ret = ixgbe_parse_syn_filter(attr, pattern,
1066 actions, &syn_filter, error);
1073 /* Destroy all flow rules associated with a port on ixgbe. */
1075 ixgbe_flow_flush(struct rte_eth_dev *dev,
1076 struct rte_flow_error *error)
1080 ixgbe_clear_all_ntuple_filter(dev);
1081 ixgbe_clear_all_ethertype_filter(dev);
1082 ixgbe_clear_syn_filter(dev);
1084 ret = ixgbe_clear_all_fdir_filter(dev);
1086 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
1087 NULL, "Failed to flush rule");
1091 ret = ixgbe_clear_all_l2_tn_filter(dev);
1093 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
1094 NULL, "Failed to flush rule");