4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
47 #include <rte_interrupts.h>
49 #include <rte_debug.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_atomic.h>
60 #include <rte_malloc.h>
61 #include <rte_random.h>
63 #include <rte_hash_crc.h>
65 #include <rte_flow_driver.h>
67 #include "ixgbe_logs.h"
68 #include "base/ixgbe_api.h"
69 #include "base/ixgbe_vf.h"
70 #include "base/ixgbe_common.h"
71 #include "ixgbe_ethdev.h"
72 #include "ixgbe_bypass.h"
73 #include "ixgbe_rxtx.h"
74 #include "base/ixgbe_type.h"
75 #include "base/ixgbe_phy.h"
76 #include "rte_pmd_ixgbe.h"
78 static int ixgbe_flow_flush(struct rte_eth_dev *dev,
79 struct rte_flow_error *error);
81 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
82 const struct rte_flow_item pattern[],
83 const struct rte_flow_action actions[],
84 struct rte_eth_ntuple_filter *filter,
85 struct rte_flow_error *error);
87 ixgbe_parse_ntuple_filter(const struct rte_flow_attr *attr,
88 const struct rte_flow_item pattern[],
89 const struct rte_flow_action actions[],
90 struct rte_eth_ntuple_filter *filter,
91 struct rte_flow_error *error);
93 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
94 const struct rte_flow_item *pattern,
95 const struct rte_flow_action *actions,
96 struct rte_eth_ethertype_filter *filter,
97 struct rte_flow_error *error);
99 ixgbe_parse_ethertype_filter(const struct rte_flow_attr *attr,
100 const struct rte_flow_item pattern[],
101 const struct rte_flow_action actions[],
102 struct rte_eth_ethertype_filter *filter,
103 struct rte_flow_error *error);
105 cons_parse_syn_filter(const struct rte_flow_attr *attr,
106 const struct rte_flow_item pattern[],
107 const struct rte_flow_action actions[],
108 struct rte_eth_syn_filter *filter,
109 struct rte_flow_error *error);
111 ixgbe_parse_syn_filter(const struct rte_flow_attr *attr,
112 const struct rte_flow_item pattern[],
113 const struct rte_flow_action actions[],
114 struct rte_eth_syn_filter *filter,
115 struct rte_flow_error *error);
117 cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
118 const struct rte_flow_item pattern[],
119 const struct rte_flow_action actions[],
120 struct rte_eth_l2_tunnel_conf *filter,
121 struct rte_flow_error *error);
123 ixgbe_validate_l2_tn_filter(struct rte_eth_dev *dev,
124 const struct rte_flow_attr *attr,
125 const struct rte_flow_item pattern[],
126 const struct rte_flow_action actions[],
127 struct rte_eth_l2_tunnel_conf *rule,
128 struct rte_flow_error *error);
130 ixgbe_validate_fdir_filter(struct rte_eth_dev *dev,
131 const struct rte_flow_attr *attr,
132 const struct rte_flow_item pattern[],
133 const struct rte_flow_action actions[],
134 struct ixgbe_fdir_rule *rule,
135 struct rte_flow_error *error);
137 ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
138 const struct rte_flow_item pattern[],
139 const struct rte_flow_action actions[],
140 struct ixgbe_fdir_rule *rule,
141 struct rte_flow_error *error);
143 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
144 const struct rte_flow_item pattern[],
145 const struct rte_flow_action actions[],
146 struct ixgbe_fdir_rule *rule,
147 struct rte_flow_error *error);
149 ixgbe_parse_fdir_filter(const struct rte_flow_attr *attr,
150 const struct rte_flow_item pattern[],
151 const struct rte_flow_action actions[],
152 struct ixgbe_fdir_rule *rule,
153 struct rte_flow_error *error);
155 ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
156 const struct rte_flow_attr *attr,
157 const struct rte_flow_item pattern[],
158 const struct rte_flow_action actions[],
159 struct rte_flow_error *error);
160 static struct rte_flow *ixgbe_flow_create(struct rte_eth_dev *dev,
161 const struct rte_flow_attr *attr,
162 const struct rte_flow_item pattern[],
163 const struct rte_flow_action actions[],
164 struct rte_flow_error *error);
166 const struct rte_flow_ops ixgbe_flow_ops = {
174 #define IXGBE_MIN_N_TUPLE_PRIO 1
175 #define IXGBE_MAX_N_TUPLE_PRIO 7
176 #define NEXT_ITEM_OF_PATTERN(item, pattern, index)\
178 item = pattern + index;\
179 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\
181 item = pattern + index; \
185 #define NEXT_ITEM_OF_ACTION(act, actions, index)\
187 act = actions + index; \
188 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
190 act = actions + index; \
195 * Please aware there's an asumption for all the parsers.
196 * rte_flow_item is using big endian, rte_flow_attr and
197 * rte_flow_action are using CPU order.
198 * Because the pattern is used to describe the packets,
199 * normally the packets should use network order.
203 * Parse the rule to see if it is a n-tuple rule.
204 * And get the n-tuple filter info BTW.
206 * The first not void item can be ETH or IPV4.
207 * The second not void item must be IPV4 if the first one is ETH.
208 * The third not void item must be UDP or TCP.
209 * The next not void item must be END.
211 * The first not void action should be QUEUE.
212 * The next not void action should be END.
216 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
217 * dst_addr 192.167.3.50 0xFFFFFFFF
218 * next_proto_id 17 0xFF
219 * UDP/TCP src_port 80 0xFFFF
222 * other members in mask and spec should set to 0x00.
223 * item->last should be NULL.
226 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
227 const struct rte_flow_item pattern[],
228 const struct rte_flow_action actions[],
229 struct rte_eth_ntuple_filter *filter,
230 struct rte_flow_error *error)
232 const struct rte_flow_item *item;
233 const struct rte_flow_action *act;
234 const struct rte_flow_item_ipv4 *ipv4_spec;
235 const struct rte_flow_item_ipv4 *ipv4_mask;
236 const struct rte_flow_item_tcp *tcp_spec;
237 const struct rte_flow_item_tcp *tcp_mask;
238 const struct rte_flow_item_udp *udp_spec;
239 const struct rte_flow_item_udp *udp_mask;
243 rte_flow_error_set(error,
244 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
245 NULL, "NULL pattern.");
250 rte_flow_error_set(error, EINVAL,
251 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
252 NULL, "NULL action.");
256 rte_flow_error_set(error, EINVAL,
257 RTE_FLOW_ERROR_TYPE_ATTR,
258 NULL, "NULL attribute.");
265 /* the first not void item can be MAC or IPv4 */
266 NEXT_ITEM_OF_PATTERN(item, pattern, index);
268 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
269 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
270 rte_flow_error_set(error, EINVAL,
271 RTE_FLOW_ERROR_TYPE_ITEM,
272 item, "Not supported by ntuple filter");
276 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
277 /*Not supported last point for range*/
279 rte_flow_error_set(error,
281 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
282 item, "Not supported last point for range");
286 /* if the first item is MAC, the content should be NULL */
287 if (item->spec || item->mask) {
288 rte_flow_error_set(error, EINVAL,
289 RTE_FLOW_ERROR_TYPE_ITEM,
290 item, "Not supported by ntuple filter");
293 /* check if the next not void item is IPv4 */
295 NEXT_ITEM_OF_PATTERN(item, pattern, index);
296 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
297 rte_flow_error_set(error,
298 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
299 item, "Not supported by ntuple filter");
304 /* get the IPv4 info */
305 if (!item->spec || !item->mask) {
306 rte_flow_error_set(error, EINVAL,
307 RTE_FLOW_ERROR_TYPE_ITEM,
308 item, "Invalid ntuple mask");
311 /*Not supported last point for range*/
313 rte_flow_error_set(error, EINVAL,
314 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
315 item, "Not supported last point for range");
320 ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
322 * Only support src & dst addresses, protocol,
323 * others should be masked.
325 if (ipv4_mask->hdr.version_ihl ||
326 ipv4_mask->hdr.type_of_service ||
327 ipv4_mask->hdr.total_length ||
328 ipv4_mask->hdr.packet_id ||
329 ipv4_mask->hdr.fragment_offset ||
330 ipv4_mask->hdr.time_to_live ||
331 ipv4_mask->hdr.hdr_checksum) {
332 rte_flow_error_set(error,
333 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
334 item, "Not supported by ntuple filter");
338 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
339 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
340 filter->proto_mask = ipv4_mask->hdr.next_proto_id;
342 ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
343 filter->dst_ip = ipv4_spec->hdr.dst_addr;
344 filter->src_ip = ipv4_spec->hdr.src_addr;
345 filter->proto = ipv4_spec->hdr.next_proto_id;
347 /* check if the next not void item is TCP or UDP */
349 NEXT_ITEM_OF_PATTERN(item, pattern, index);
350 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
351 item->type != RTE_FLOW_ITEM_TYPE_UDP) {
352 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
353 rte_flow_error_set(error, EINVAL,
354 RTE_FLOW_ERROR_TYPE_ITEM,
355 item, "Not supported by ntuple filter");
359 /* get the TCP/UDP info */
360 if (!item->spec || !item->mask) {
361 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
362 rte_flow_error_set(error, EINVAL,
363 RTE_FLOW_ERROR_TYPE_ITEM,
364 item, "Invalid ntuple mask");
368 /*Not supported last point for range*/
370 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
371 rte_flow_error_set(error, EINVAL,
372 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
373 item, "Not supported last point for range");
378 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
379 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
382 * Only support src & dst ports, tcp flags,
383 * others should be masked.
385 if (tcp_mask->hdr.sent_seq ||
386 tcp_mask->hdr.recv_ack ||
387 tcp_mask->hdr.data_off ||
388 tcp_mask->hdr.rx_win ||
389 tcp_mask->hdr.cksum ||
390 tcp_mask->hdr.tcp_urp) {
392 sizeof(struct rte_eth_ntuple_filter));
393 rte_flow_error_set(error, EINVAL,
394 RTE_FLOW_ERROR_TYPE_ITEM,
395 item, "Not supported by ntuple filter");
399 filter->dst_port_mask = tcp_mask->hdr.dst_port;
400 filter->src_port_mask = tcp_mask->hdr.src_port;
401 if (tcp_mask->hdr.tcp_flags == 0xFF) {
402 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
403 } else if (!tcp_mask->hdr.tcp_flags) {
404 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
406 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
407 rte_flow_error_set(error, EINVAL,
408 RTE_FLOW_ERROR_TYPE_ITEM,
409 item, "Not supported by ntuple filter");
413 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
414 filter->dst_port = tcp_spec->hdr.dst_port;
415 filter->src_port = tcp_spec->hdr.src_port;
416 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
418 udp_mask = (const struct rte_flow_item_udp *)item->mask;
421 * Only support src & dst ports,
422 * others should be masked.
424 if (udp_mask->hdr.dgram_len ||
425 udp_mask->hdr.dgram_cksum) {
427 sizeof(struct rte_eth_ntuple_filter));
428 rte_flow_error_set(error, EINVAL,
429 RTE_FLOW_ERROR_TYPE_ITEM,
430 item, "Not supported by ntuple filter");
434 filter->dst_port_mask = udp_mask->hdr.dst_port;
435 filter->src_port_mask = udp_mask->hdr.src_port;
437 udp_spec = (const struct rte_flow_item_udp *)item->spec;
438 filter->dst_port = udp_spec->hdr.dst_port;
439 filter->src_port = udp_spec->hdr.src_port;
442 /* check if the next not void item is END */
444 NEXT_ITEM_OF_PATTERN(item, pattern, index);
445 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
446 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
447 rte_flow_error_set(error, EINVAL,
448 RTE_FLOW_ERROR_TYPE_ITEM,
449 item, "Not supported by ntuple filter");
457 * n-tuple only supports forwarding,
458 * check if the first not void action is QUEUE.
460 NEXT_ITEM_OF_ACTION(act, actions, index);
461 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
462 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
463 rte_flow_error_set(error, EINVAL,
464 RTE_FLOW_ERROR_TYPE_ACTION,
465 item, "Not supported action.");
469 ((const struct rte_flow_action_queue *)act->conf)->index;
471 /* check if the next not void item is END */
473 NEXT_ITEM_OF_ACTION(act, actions, index);
474 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
475 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
476 rte_flow_error_set(error, EINVAL,
477 RTE_FLOW_ERROR_TYPE_ACTION,
478 act, "Not supported action.");
483 /* must be input direction */
484 if (!attr->ingress) {
485 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
486 rte_flow_error_set(error, EINVAL,
487 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
488 attr, "Only support ingress.");
494 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
495 rte_flow_error_set(error, EINVAL,
496 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
497 attr, "Not support egress.");
501 if (attr->priority > 0xFFFF) {
502 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
503 rte_flow_error_set(error, EINVAL,
504 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
505 attr, "Error priority.");
508 filter->priority = (uint16_t)attr->priority;
509 if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
510 attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
511 filter->priority = 1;
516 /* a specific function for ixgbe because the flags is specific */
518 ixgbe_parse_ntuple_filter(const struct rte_flow_attr *attr,
519 const struct rte_flow_item pattern[],
520 const struct rte_flow_action actions[],
521 struct rte_eth_ntuple_filter *filter,
522 struct rte_flow_error *error)
526 ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
531 /* Ixgbe doesn't support tcp flags. */
532 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
533 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
534 rte_flow_error_set(error, EINVAL,
535 RTE_FLOW_ERROR_TYPE_ITEM,
536 NULL, "Not supported by ntuple filter");
540 /* Ixgbe doesn't support many priorities. */
541 if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
542 filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
543 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
544 rte_flow_error_set(error, EINVAL,
545 RTE_FLOW_ERROR_TYPE_ITEM,
546 NULL, "Priority not supported by ntuple filter");
550 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
551 filter->priority > IXGBE_5TUPLE_MAX_PRI ||
552 filter->priority < IXGBE_5TUPLE_MIN_PRI)
555 /* fixed value for ixgbe */
556 filter->flags = RTE_5TUPLE_FLAGS;
561 * Parse the rule to see if it is a ethertype rule.
562 * And get the ethertype filter info BTW.
564 * The first not void item can be ETH.
565 * The next not void item must be END.
567 * The first not void action should be QUEUE.
568 * The next not void action should be END.
571 * ETH type 0x0807 0xFFFF
573 * other members in mask and spec should set to 0x00.
574 * item->last should be NULL.
577 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
578 const struct rte_flow_item *pattern,
579 const struct rte_flow_action *actions,
580 struct rte_eth_ethertype_filter *filter,
581 struct rte_flow_error *error)
583 const struct rte_flow_item *item;
584 const struct rte_flow_action *act;
585 const struct rte_flow_item_eth *eth_spec;
586 const struct rte_flow_item_eth *eth_mask;
587 const struct rte_flow_action_queue *act_q;
591 rte_flow_error_set(error, EINVAL,
592 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
593 NULL, "NULL pattern.");
598 rte_flow_error_set(error, EINVAL,
599 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
600 NULL, "NULL action.");
605 rte_flow_error_set(error, EINVAL,
606 RTE_FLOW_ERROR_TYPE_ATTR,
607 NULL, "NULL attribute.");
614 /* The first non-void item should be MAC. */
615 item = pattern + index;
616 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
618 item = pattern + index;
620 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
621 rte_flow_error_set(error, EINVAL,
622 RTE_FLOW_ERROR_TYPE_ITEM,
623 item, "Not supported by ethertype filter");
627 /*Not supported last point for range*/
629 rte_flow_error_set(error, EINVAL,
630 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
631 item, "Not supported last point for range");
635 /* Get the MAC info. */
636 if (!item->spec || !item->mask) {
637 rte_flow_error_set(error, EINVAL,
638 RTE_FLOW_ERROR_TYPE_ITEM,
639 item, "Not supported by ethertype filter");
643 eth_spec = (const struct rte_flow_item_eth *)item->spec;
644 eth_mask = (const struct rte_flow_item_eth *)item->mask;
646 /* Mask bits of source MAC address must be full of 0.
647 * Mask bits of destination MAC address must be full
650 if (!is_zero_ether_addr(ð_mask->src) ||
651 (!is_zero_ether_addr(ð_mask->dst) &&
652 !is_broadcast_ether_addr(ð_mask->dst))) {
653 rte_flow_error_set(error, EINVAL,
654 RTE_FLOW_ERROR_TYPE_ITEM,
655 item, "Invalid ether address mask");
659 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
660 rte_flow_error_set(error, EINVAL,
661 RTE_FLOW_ERROR_TYPE_ITEM,
662 item, "Invalid ethertype mask");
666 /* If mask bits of destination MAC address
667 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
669 if (is_broadcast_ether_addr(ð_mask->dst)) {
670 filter->mac_addr = eth_spec->dst;
671 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
673 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
675 filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
677 /* Check if the next non-void item is END. */
679 item = pattern + index;
680 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
682 item = pattern + index;
684 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
685 rte_flow_error_set(error, EINVAL,
686 RTE_FLOW_ERROR_TYPE_ITEM,
687 item, "Not supported by ethertype filter.");
694 /* Check if the first non-void action is QUEUE or DROP. */
695 act = actions + index;
696 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
698 act = actions + index;
700 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
701 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
702 rte_flow_error_set(error, EINVAL,
703 RTE_FLOW_ERROR_TYPE_ACTION,
704 act, "Not supported action.");
708 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
709 act_q = (const struct rte_flow_action_queue *)act->conf;
710 filter->queue = act_q->index;
712 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
715 /* Check if the next non-void item is END */
717 act = actions + index;
718 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
720 act = actions + index;
722 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
723 rte_flow_error_set(error, EINVAL,
724 RTE_FLOW_ERROR_TYPE_ACTION,
725 act, "Not supported action.");
730 /* Must be input direction */
731 if (!attr->ingress) {
732 rte_flow_error_set(error, EINVAL,
733 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
734 attr, "Only support ingress.");
740 rte_flow_error_set(error, EINVAL,
741 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
742 attr, "Not support egress.");
747 if (attr->priority) {
748 rte_flow_error_set(error, EINVAL,
749 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
750 attr, "Not support priority.");
756 rte_flow_error_set(error, EINVAL,
757 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
758 attr, "Not support group.");
766 ixgbe_parse_ethertype_filter(const struct rte_flow_attr *attr,
767 const struct rte_flow_item pattern[],
768 const struct rte_flow_action actions[],
769 struct rte_eth_ethertype_filter *filter,
770 struct rte_flow_error *error)
774 ret = cons_parse_ethertype_filter(attr, pattern,
775 actions, filter, error);
780 /* Ixgbe doesn't support MAC address. */
781 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
782 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
783 rte_flow_error_set(error, EINVAL,
784 RTE_FLOW_ERROR_TYPE_ITEM,
785 NULL, "Not supported by ethertype filter");
789 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
790 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
791 rte_flow_error_set(error, EINVAL,
792 RTE_FLOW_ERROR_TYPE_ITEM,
793 NULL, "queue index much too big");
797 if (filter->ether_type == ETHER_TYPE_IPv4 ||
798 filter->ether_type == ETHER_TYPE_IPv6) {
799 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
800 rte_flow_error_set(error, EINVAL,
801 RTE_FLOW_ERROR_TYPE_ITEM,
802 NULL, "IPv4/IPv6 not supported by ethertype filter");
806 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
807 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
808 rte_flow_error_set(error, EINVAL,
809 RTE_FLOW_ERROR_TYPE_ITEM,
810 NULL, "mac compare is unsupported");
814 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
815 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
816 rte_flow_error_set(error, EINVAL,
817 RTE_FLOW_ERROR_TYPE_ITEM,
818 NULL, "drop option is unsupported");
826 * Parse the rule to see if it is a TCP SYN rule.
827 * And get the TCP SYN filter info BTW.
829 * The first not void item must be ETH.
830 * The second not void item must be IPV4 or IPV6.
831 * The third not void item must be TCP.
832 * The next not void item must be END.
834 * The first not void action should be QUEUE.
835 * The next not void action should be END.
839 * IPV4/IPV6 NULL NULL
840 * TCP tcp_flags 0x02 0xFF
842 * other members in mask and spec should set to 0x00.
843 * item->last should be NULL.
846 cons_parse_syn_filter(const struct rte_flow_attr *attr,
847 const struct rte_flow_item pattern[],
848 const struct rte_flow_action actions[],
849 struct rte_eth_syn_filter *filter,
850 struct rte_flow_error *error)
852 const struct rte_flow_item *item;
853 const struct rte_flow_action *act;
854 const struct rte_flow_item_tcp *tcp_spec;
855 const struct rte_flow_item_tcp *tcp_mask;
856 const struct rte_flow_action_queue *act_q;
860 rte_flow_error_set(error, EINVAL,
861 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
862 NULL, "NULL pattern.");
867 rte_flow_error_set(error, EINVAL,
868 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
869 NULL, "NULL action.");
874 rte_flow_error_set(error, EINVAL,
875 RTE_FLOW_ERROR_TYPE_ATTR,
876 NULL, "NULL attribute.");
883 /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
884 NEXT_ITEM_OF_PATTERN(item, pattern, index);
885 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
886 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
887 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
888 item->type != RTE_FLOW_ITEM_TYPE_TCP) {
889 rte_flow_error_set(error, EINVAL,
890 RTE_FLOW_ERROR_TYPE_ITEM,
891 item, "Not supported by syn filter");
894 /*Not supported last point for range*/
896 rte_flow_error_set(error, EINVAL,
897 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
898 item, "Not supported last point for range");
903 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
904 /* if the item is MAC, the content should be NULL */
905 if (item->spec || item->mask) {
906 rte_flow_error_set(error, EINVAL,
907 RTE_FLOW_ERROR_TYPE_ITEM,
908 item, "Invalid SYN address mask");
912 /* check if the next not void item is IPv4 or IPv6 */
914 NEXT_ITEM_OF_PATTERN(item, pattern, index);
915 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
916 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
917 rte_flow_error_set(error, EINVAL,
918 RTE_FLOW_ERROR_TYPE_ITEM,
919 item, "Not supported by syn filter");
925 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
926 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
927 /* if the item is IP, the content should be NULL */
928 if (item->spec || item->mask) {
929 rte_flow_error_set(error, EINVAL,
930 RTE_FLOW_ERROR_TYPE_ITEM,
931 item, "Invalid SYN mask");
935 /* check if the next not void item is TCP */
937 NEXT_ITEM_OF_PATTERN(item, pattern, index);
938 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
939 rte_flow_error_set(error, EINVAL,
940 RTE_FLOW_ERROR_TYPE_ITEM,
941 item, "Not supported by syn filter");
946 /* Get the TCP info. Only support SYN. */
947 if (!item->spec || !item->mask) {
948 rte_flow_error_set(error, EINVAL,
949 RTE_FLOW_ERROR_TYPE_ITEM,
950 item, "Invalid SYN mask");
953 /*Not supported last point for range*/
955 rte_flow_error_set(error, EINVAL,
956 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
957 item, "Not supported last point for range");
961 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
962 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
963 if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
964 tcp_mask->hdr.src_port ||
965 tcp_mask->hdr.dst_port ||
966 tcp_mask->hdr.sent_seq ||
967 tcp_mask->hdr.recv_ack ||
968 tcp_mask->hdr.data_off ||
969 tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
970 tcp_mask->hdr.rx_win ||
971 tcp_mask->hdr.cksum ||
972 tcp_mask->hdr.tcp_urp) {
973 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
974 rte_flow_error_set(error, EINVAL,
975 RTE_FLOW_ERROR_TYPE_ITEM,
976 item, "Not supported by syn filter");
980 /* check if the next not void item is END */
982 NEXT_ITEM_OF_PATTERN(item, pattern, index);
983 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
984 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
985 rte_flow_error_set(error, EINVAL,
986 RTE_FLOW_ERROR_TYPE_ITEM,
987 item, "Not supported by syn filter");
994 /* check if the first not void action is QUEUE. */
995 NEXT_ITEM_OF_ACTION(act, actions, index);
996 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
997 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
998 rte_flow_error_set(error, EINVAL,
999 RTE_FLOW_ERROR_TYPE_ACTION,
1000 act, "Not supported action.");
1004 act_q = (const struct rte_flow_action_queue *)act->conf;
1005 filter->queue = act_q->index;
1006 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
1007 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1008 rte_flow_error_set(error, EINVAL,
1009 RTE_FLOW_ERROR_TYPE_ACTION,
1010 act, "Not supported action.");
1014 /* check if the next not void item is END */
1016 NEXT_ITEM_OF_ACTION(act, actions, index);
1017 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1018 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1019 rte_flow_error_set(error, EINVAL,
1020 RTE_FLOW_ERROR_TYPE_ACTION,
1021 act, "Not supported action.");
1026 /* must be input direction */
1027 if (!attr->ingress) {
1028 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1029 rte_flow_error_set(error, EINVAL,
1030 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1031 attr, "Only support ingress.");
1037 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1038 rte_flow_error_set(error, EINVAL,
1039 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1040 attr, "Not support egress.");
1044 /* Support 2 priorities, the lowest or highest. */
1045 if (!attr->priority) {
1046 filter->hig_pri = 0;
1047 } else if (attr->priority == (uint32_t)~0U) {
1048 filter->hig_pri = 1;
1050 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1051 rte_flow_error_set(error, EINVAL,
1052 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1053 attr, "Not support priority.");
1061 ixgbe_parse_syn_filter(const struct rte_flow_attr *attr,
1062 const struct rte_flow_item pattern[],
1063 const struct rte_flow_action actions[],
1064 struct rte_eth_syn_filter *filter,
1065 struct rte_flow_error *error)
1069 ret = cons_parse_syn_filter(attr, pattern,
1070 actions, filter, error);
1079 * Parse the rule to see if it is a L2 tunnel rule.
1080 * And get the L2 tunnel filter info BTW.
1081 * Only support E-tag now.
1083 * The first not void item can be E_TAG.
1084 * The next not void item must be END.
1086 * The first not void action should be QUEUE.
1087 * The next not void action should be END.
1091 e_cid_base 0x309 0xFFF
1093 * other members in mask and spec should set to 0x00.
1094 * item->last should be NULL.
1097 cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
1098 const struct rte_flow_item pattern[],
1099 const struct rte_flow_action actions[],
1100 struct rte_eth_l2_tunnel_conf *filter,
1101 struct rte_flow_error *error)
1103 const struct rte_flow_item *item;
1104 const struct rte_flow_item_e_tag *e_tag_spec;
1105 const struct rte_flow_item_e_tag *e_tag_mask;
1106 const struct rte_flow_action *act;
1107 const struct rte_flow_action_queue *act_q;
1111 rte_flow_error_set(error, EINVAL,
1112 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1113 NULL, "NULL pattern.");
1118 rte_flow_error_set(error, EINVAL,
1119 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1120 NULL, "NULL action.");
1125 rte_flow_error_set(error, EINVAL,
1126 RTE_FLOW_ERROR_TYPE_ATTR,
1127 NULL, "NULL attribute.");
1133 /* The first not void item should be e-tag. */
1134 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1135 if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1136 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1137 rte_flow_error_set(error, EINVAL,
1138 RTE_FLOW_ERROR_TYPE_ITEM,
1139 item, "Not supported by L2 tunnel filter");
1143 if (!item->spec || !item->mask) {
1144 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1145 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1146 item, "Not supported by L2 tunnel filter");
1150 /*Not supported last point for range*/
1152 rte_flow_error_set(error, EINVAL,
1153 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1154 item, "Not supported last point for range");
1158 e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
1159 e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
1161 /* Only care about GRP and E cid base. */
1162 if (e_tag_mask->epcp_edei_in_ecid_b ||
1163 e_tag_mask->in_ecid_e ||
1164 e_tag_mask->ecid_e ||
1165 e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1166 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1167 rte_flow_error_set(error, EINVAL,
1168 RTE_FLOW_ERROR_TYPE_ITEM,
1169 item, "Not supported by L2 tunnel filter");
1173 filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1175 * grp and e_cid_base are bit fields and only use 14 bits.
1176 * e-tag id is taken as little endian by HW.
1178 filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1180 /* check if the next not void item is END */
1182 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1183 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1184 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1185 rte_flow_error_set(error, EINVAL,
1186 RTE_FLOW_ERROR_TYPE_ITEM,
1187 item, "Not supported by L2 tunnel filter");
1192 /* must be input direction */
1193 if (!attr->ingress) {
1194 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1195 rte_flow_error_set(error, EINVAL,
1196 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1197 attr, "Only support ingress.");
1203 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1204 rte_flow_error_set(error, EINVAL,
1205 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1206 attr, "Not support egress.");
1211 if (attr->priority) {
1212 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1213 rte_flow_error_set(error, EINVAL,
1214 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1215 attr, "Not support priority.");
1222 /* check if the first not void action is QUEUE. */
1223 NEXT_ITEM_OF_ACTION(act, actions, index);
1224 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1225 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1226 rte_flow_error_set(error, EINVAL,
1227 RTE_FLOW_ERROR_TYPE_ACTION,
1228 act, "Not supported action.");
1232 act_q = (const struct rte_flow_action_queue *)act->conf;
1233 filter->pool = act_q->index;
1235 /* check if the next not void item is END */
1237 NEXT_ITEM_OF_ACTION(act, actions, index);
1238 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1239 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1240 rte_flow_error_set(error, EINVAL,
1241 RTE_FLOW_ERROR_TYPE_ACTION,
1242 act, "Not supported action.");
1250 ixgbe_validate_l2_tn_filter(struct rte_eth_dev *dev,
1251 const struct rte_flow_attr *attr,
1252 const struct rte_flow_item pattern[],
1253 const struct rte_flow_action actions[],
1254 struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1255 struct rte_flow_error *error)
1258 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1260 ret = cons_parse_l2_tn_filter(attr, pattern,
1261 actions, l2_tn_filter, error);
1263 if (hw->mac.type != ixgbe_mac_X550 &&
1264 hw->mac.type != ixgbe_mac_X550EM_x &&
1265 hw->mac.type != ixgbe_mac_X550EM_a) {
1266 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1267 rte_flow_error_set(error, EINVAL,
1268 RTE_FLOW_ERROR_TYPE_ITEM,
1269 NULL, "Not supported by L2 tunnel filter");
1276 /* Parse to get the attr and action info of flow director rule. */
1278 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1279 const struct rte_flow_action actions[],
1280 struct ixgbe_fdir_rule *rule,
1281 struct rte_flow_error *error)
1283 const struct rte_flow_action *act;
1284 const struct rte_flow_action_queue *act_q;
1285 const struct rte_flow_action_mark *mark;
1289 /* must be input direction */
1290 if (!attr->ingress) {
1291 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1292 rte_flow_error_set(error, EINVAL,
1293 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1294 attr, "Only support ingress.");
1300 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1301 rte_flow_error_set(error, EINVAL,
1302 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1303 attr, "Not support egress.");
1308 if (attr->priority) {
1309 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1310 rte_flow_error_set(error, EINVAL,
1311 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1312 attr, "Not support priority.");
1319 /* check if the first not void action is QUEUE or DROP. */
1320 NEXT_ITEM_OF_ACTION(act, actions, index);
1321 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1322 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1323 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1324 rte_flow_error_set(error, EINVAL,
1325 RTE_FLOW_ERROR_TYPE_ACTION,
1326 act, "Not supported action.");
1330 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1331 act_q = (const struct rte_flow_action_queue *)act->conf;
1332 rule->queue = act_q->index;
1334 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1337 /* check if the next not void item is MARK */
1339 NEXT_ITEM_OF_ACTION(act, actions, index);
1340 if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1341 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1342 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1343 rte_flow_error_set(error, EINVAL,
1344 RTE_FLOW_ERROR_TYPE_ACTION,
1345 act, "Not supported action.");
1351 if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1352 mark = (const struct rte_flow_action_mark *)act->conf;
1353 rule->soft_id = mark->id;
1355 NEXT_ITEM_OF_ACTION(act, actions, index);
1358 /* check if the next not void item is END */
1359 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1360 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1361 rte_flow_error_set(error, EINVAL,
1362 RTE_FLOW_ERROR_TYPE_ACTION,
1363 act, "Not supported action.");
1371 * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1372 * And get the flow director filter info BTW.
1373 * UDP/TCP/SCTP PATTERN:
1374 * The first not void item can be ETH or IPV4.
1375 * The second not void item must be IPV4 if the first one is ETH.
1376 * The third not void item must be UDP or TCP or SCTP.
1377 * The next not void item must be END.
1379 * The first not void item must be ETH.
1380 * The second not void item must be MAC VLAN.
1381 * The next not void item must be END.
1383 * The first not void action should be QUEUE or DROP.
1384 * The second not void optional action should be MARK,
1385 * mark_id is a uint32_t number.
1386 * The next not void action should be END.
1387 * UDP/TCP/SCTP pattern example:
1390 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
1391 * dst_addr 192.167.3.50 0xFFFFFFFF
1392 * UDP/TCP/SCTP src_port 80 0xFFFF
1393 * dst_port 80 0xFFFF
1395 * MAC VLAN pattern example:
1398 {0xAC, 0x7B, 0xA1, {0xFF, 0xFF, 0xFF,
1399 0x2C, 0x6D, 0x36} 0xFF, 0xFF, 0xFF}
1400 * MAC VLAN tci 0x2016 0xFFFF
1401 * tpid 0x8100 0xFFFF
1403 * Other members in mask and spec should set to 0x00.
1404 * Item->last should be NULL.
1407 ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
1408 const struct rte_flow_item pattern[],
1409 const struct rte_flow_action actions[],
1410 struct ixgbe_fdir_rule *rule,
1411 struct rte_flow_error *error)
1413 const struct rte_flow_item *item;
1414 const struct rte_flow_item_eth *eth_spec;
1415 const struct rte_flow_item_eth *eth_mask;
1416 const struct rte_flow_item_ipv4 *ipv4_spec;
1417 const struct rte_flow_item_ipv4 *ipv4_mask;
1418 const struct rte_flow_item_tcp *tcp_spec;
1419 const struct rte_flow_item_tcp *tcp_mask;
1420 const struct rte_flow_item_udp *udp_spec;
1421 const struct rte_flow_item_udp *udp_mask;
1422 const struct rte_flow_item_sctp *sctp_spec;
1423 const struct rte_flow_item_sctp *sctp_mask;
1424 const struct rte_flow_item_vlan *vlan_spec;
1425 const struct rte_flow_item_vlan *vlan_mask;
1430 rte_flow_error_set(error, EINVAL,
1431 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1432 NULL, "NULL pattern.");
1437 rte_flow_error_set(error, EINVAL,
1438 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1439 NULL, "NULL action.");
1444 rte_flow_error_set(error, EINVAL,
1445 RTE_FLOW_ERROR_TYPE_ATTR,
1446 NULL, "NULL attribute.");
1451 * Some fields may not be provided. Set spec to 0 and mask to default
1452 * value. So, we need not do anything for the not provided fields later.
1454 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1455 memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1456 rule->mask.vlan_tci_mask = 0;
1462 * The first not void item should be
1463 * MAC or IPv4 or TCP or UDP or SCTP.
1465 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1466 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1467 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1468 item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1469 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1470 item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1471 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1472 rte_flow_error_set(error, EINVAL,
1473 RTE_FLOW_ERROR_TYPE_ITEM,
1474 item, "Not supported by fdir filter");
1478 rule->mode = RTE_FDIR_MODE_PERFECT;
1480 /*Not supported last point for range*/
1482 rte_flow_error_set(error, EINVAL,
1483 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1484 item, "Not supported last point for range");
1488 /* Get the MAC info. */
1489 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1491 * Only support vlan and dst MAC address,
1492 * others should be masked.
1494 if (item->spec && !item->mask) {
1495 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1496 rte_flow_error_set(error, EINVAL,
1497 RTE_FLOW_ERROR_TYPE_ITEM,
1498 item, "Not supported by fdir filter");
1503 rule->b_spec = TRUE;
1504 eth_spec = (const struct rte_flow_item_eth *)item->spec;
1506 /* Get the dst MAC. */
1507 for (j = 0; j < ETHER_ADDR_LEN; j++) {
1508 rule->ixgbe_fdir.formatted.inner_mac[j] =
1509 eth_spec->dst.addr_bytes[j];
1515 /* If ethernet has meaning, it means MAC VLAN mode. */
1516 rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1518 rule->b_mask = TRUE;
1519 eth_mask = (const struct rte_flow_item_eth *)item->mask;
1521 /* Ether type should be masked. */
1522 if (eth_mask->type) {
1523 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1524 rte_flow_error_set(error, EINVAL,
1525 RTE_FLOW_ERROR_TYPE_ITEM,
1526 item, "Not supported by fdir filter");
1531 * src MAC address must be masked,
1532 * and don't support dst MAC address mask.
1534 for (j = 0; j < ETHER_ADDR_LEN; j++) {
1535 if (eth_mask->src.addr_bytes[j] ||
1536 eth_mask->dst.addr_bytes[j] != 0xFF) {
1538 sizeof(struct ixgbe_fdir_rule));
1539 rte_flow_error_set(error, EINVAL,
1540 RTE_FLOW_ERROR_TYPE_ITEM,
1541 item, "Not supported by fdir filter");
1546 /* When no VLAN, considered as full mask. */
1547 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1549 /*** If both spec and mask are item,
1550 * it means don't care about ETH.
1555 * Check if the next not void item is vlan or ipv4.
1556 * IPv6 is not supported.
1559 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1560 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1561 if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1562 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1563 rte_flow_error_set(error, EINVAL,
1564 RTE_FLOW_ERROR_TYPE_ITEM,
1565 item, "Not supported by fdir filter");
1569 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
1570 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1571 rte_flow_error_set(error, EINVAL,
1572 RTE_FLOW_ERROR_TYPE_ITEM,
1573 item, "Not supported by fdir filter");
1579 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1580 if (!(item->spec && item->mask)) {
1581 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1582 rte_flow_error_set(error, EINVAL,
1583 RTE_FLOW_ERROR_TYPE_ITEM,
1584 item, "Not supported by fdir filter");
1588 /*Not supported last point for range*/
1590 rte_flow_error_set(error, EINVAL,
1591 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1592 item, "Not supported last point for range");
1596 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
1597 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
1599 if (vlan_spec->tpid != rte_cpu_to_be_16(ETHER_TYPE_VLAN)) {
1600 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1601 rte_flow_error_set(error, EINVAL,
1602 RTE_FLOW_ERROR_TYPE_ITEM,
1603 item, "Not supported by fdir filter");
1607 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1609 if (vlan_mask->tpid != (uint16_t)~0U) {
1610 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1611 rte_flow_error_set(error, EINVAL,
1612 RTE_FLOW_ERROR_TYPE_ITEM,
1613 item, "Not supported by fdir filter");
1616 rule->mask.vlan_tci_mask = vlan_mask->tci;
1617 /* More than one tags are not supported. */
1620 * Check if the next not void item is not vlan.
1623 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1624 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1625 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1626 rte_flow_error_set(error, EINVAL,
1627 RTE_FLOW_ERROR_TYPE_ITEM,
1628 item, "Not supported by fdir filter");
1630 } else if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1631 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1632 rte_flow_error_set(error, EINVAL,
1633 RTE_FLOW_ERROR_TYPE_ITEM,
1634 item, "Not supported by fdir filter");
1639 /* Get the IP info. */
1640 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1642 * Set the flow type even if there's no content
1643 * as we must have a flow type.
1645 rule->ixgbe_fdir.formatted.flow_type =
1646 IXGBE_ATR_FLOW_TYPE_IPV4;
1647 /*Not supported last point for range*/
1649 rte_flow_error_set(error, EINVAL,
1650 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1651 item, "Not supported last point for range");
1655 * Only care about src & dst addresses,
1656 * others should be masked.
1659 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1660 rte_flow_error_set(error, EINVAL,
1661 RTE_FLOW_ERROR_TYPE_ITEM,
1662 item, "Not supported by fdir filter");
1665 rule->b_mask = TRUE;
1667 (const struct rte_flow_item_ipv4 *)item->mask;
1668 if (ipv4_mask->hdr.version_ihl ||
1669 ipv4_mask->hdr.type_of_service ||
1670 ipv4_mask->hdr.total_length ||
1671 ipv4_mask->hdr.packet_id ||
1672 ipv4_mask->hdr.fragment_offset ||
1673 ipv4_mask->hdr.time_to_live ||
1674 ipv4_mask->hdr.next_proto_id ||
1675 ipv4_mask->hdr.hdr_checksum) {
1676 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1677 rte_flow_error_set(error, EINVAL,
1678 RTE_FLOW_ERROR_TYPE_ITEM,
1679 item, "Not supported by fdir filter");
1682 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1683 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1686 rule->b_spec = TRUE;
1688 (const struct rte_flow_item_ipv4 *)item->spec;
1689 rule->ixgbe_fdir.formatted.dst_ip[0] =
1690 ipv4_spec->hdr.dst_addr;
1691 rule->ixgbe_fdir.formatted.src_ip[0] =
1692 ipv4_spec->hdr.src_addr;
1696 * Check if the next not void item is
1697 * TCP or UDP or SCTP or END.
1700 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1701 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1702 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1703 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1704 item->type != RTE_FLOW_ITEM_TYPE_END) {
1705 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1706 rte_flow_error_set(error, EINVAL,
1707 RTE_FLOW_ERROR_TYPE_ITEM,
1708 item, "Not supported by fdir filter");
1713 /* Get the TCP info. */
1714 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1716 * Set the flow type even if there's no content
1717 * as we must have a flow type.
1719 rule->ixgbe_fdir.formatted.flow_type =
1720 IXGBE_ATR_FLOW_TYPE_TCPV4;
1721 /*Not supported last point for range*/
1723 rte_flow_error_set(error, EINVAL,
1724 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1725 item, "Not supported last point for range");
1729 * Only care about src & dst ports,
1730 * others should be masked.
1733 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1734 rte_flow_error_set(error, EINVAL,
1735 RTE_FLOW_ERROR_TYPE_ITEM,
1736 item, "Not supported by fdir filter");
1739 rule->b_mask = TRUE;
1740 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1741 if (tcp_mask->hdr.sent_seq ||
1742 tcp_mask->hdr.recv_ack ||
1743 tcp_mask->hdr.data_off ||
1744 tcp_mask->hdr.tcp_flags ||
1745 tcp_mask->hdr.rx_win ||
1746 tcp_mask->hdr.cksum ||
1747 tcp_mask->hdr.tcp_urp) {
1748 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1749 rte_flow_error_set(error, EINVAL,
1750 RTE_FLOW_ERROR_TYPE_ITEM,
1751 item, "Not supported by fdir filter");
1754 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1755 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1758 rule->b_spec = TRUE;
1759 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1760 rule->ixgbe_fdir.formatted.src_port =
1761 tcp_spec->hdr.src_port;
1762 rule->ixgbe_fdir.formatted.dst_port =
1763 tcp_spec->hdr.dst_port;
1767 /* Get the UDP info */
1768 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1770 * Set the flow type even if there's no content
1771 * as we must have a flow type.
1773 rule->ixgbe_fdir.formatted.flow_type =
1774 IXGBE_ATR_FLOW_TYPE_UDPV4;
1775 /*Not supported last point for range*/
1777 rte_flow_error_set(error, EINVAL,
1778 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1779 item, "Not supported last point for range");
1783 * Only care about src & dst ports,
1784 * others should be masked.
1787 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1788 rte_flow_error_set(error, EINVAL,
1789 RTE_FLOW_ERROR_TYPE_ITEM,
1790 item, "Not supported by fdir filter");
1793 rule->b_mask = TRUE;
1794 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1795 if (udp_mask->hdr.dgram_len ||
1796 udp_mask->hdr.dgram_cksum) {
1797 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1798 rte_flow_error_set(error, EINVAL,
1799 RTE_FLOW_ERROR_TYPE_ITEM,
1800 item, "Not supported by fdir filter");
1803 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1804 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1807 rule->b_spec = TRUE;
1808 udp_spec = (const struct rte_flow_item_udp *)item->spec;
1809 rule->ixgbe_fdir.formatted.src_port =
1810 udp_spec->hdr.src_port;
1811 rule->ixgbe_fdir.formatted.dst_port =
1812 udp_spec->hdr.dst_port;
1816 /* Get the SCTP info */
1817 if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
1819 * Set the flow type even if there's no content
1820 * as we must have a flow type.
1822 rule->ixgbe_fdir.formatted.flow_type =
1823 IXGBE_ATR_FLOW_TYPE_SCTPV4;
1824 /*Not supported last point for range*/
1826 rte_flow_error_set(error, EINVAL,
1827 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1828 item, "Not supported last point for range");
1832 * Only care about src & dst ports,
1833 * others should be masked.
1836 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1837 rte_flow_error_set(error, EINVAL,
1838 RTE_FLOW_ERROR_TYPE_ITEM,
1839 item, "Not supported by fdir filter");
1842 rule->b_mask = TRUE;
1844 (const struct rte_flow_item_sctp *)item->mask;
1845 if (sctp_mask->hdr.tag ||
1846 sctp_mask->hdr.cksum) {
1847 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1848 rte_flow_error_set(error, EINVAL,
1849 RTE_FLOW_ERROR_TYPE_ITEM,
1850 item, "Not supported by fdir filter");
1853 rule->mask.src_port_mask = sctp_mask->hdr.src_port;
1854 rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
1857 rule->b_spec = TRUE;
1859 (const struct rte_flow_item_sctp *)item->spec;
1860 rule->ixgbe_fdir.formatted.src_port =
1861 sctp_spec->hdr.src_port;
1862 rule->ixgbe_fdir.formatted.dst_port =
1863 sctp_spec->hdr.dst_port;
1867 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1868 /* check if the next not void item is END */
1870 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1871 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1872 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1873 rte_flow_error_set(error, EINVAL,
1874 RTE_FLOW_ERROR_TYPE_ITEM,
1875 item, "Not supported by fdir filter");
1880 return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
1883 #define NVGRE_PROTOCOL 0x6558
1886 * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
1887 * And get the flow director filter info BTW.
1889 * The first not void item must be ETH.
1890 * The second not void item must be IPV4/ IPV6.
1891 * The third not void item must be NVGRE.
1892 * The next not void item must be END.
1894 * The first not void item must be ETH.
1895 * The second not void item must be IPV4/ IPV6.
1896 * The third not void item must be NVGRE.
1897 * The next not void item must be END.
1899 * The first not void action should be QUEUE or DROP.
1900 * The second not void optional action should be MARK,
1901 * mark_id is a uint32_t number.
1902 * The next not void action should be END.
1903 * VxLAN pattern example:
1906 * IPV4/IPV6 NULL NULL
1908 * VxLAN vni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
1910 * NEGRV pattern example:
1913 * IPV4/IPV6 NULL NULL
1914 * NVGRE protocol 0x6558 0xFFFF
1915 * tni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
1917 * other members in mask and spec should set to 0x00.
1918 * item->last should be NULL.
1921 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
1922 const struct rte_flow_item pattern[],
1923 const struct rte_flow_action actions[],
1924 struct ixgbe_fdir_rule *rule,
1925 struct rte_flow_error *error)
1927 const struct rte_flow_item *item;
1928 const struct rte_flow_item_vxlan *vxlan_spec;
1929 const struct rte_flow_item_vxlan *vxlan_mask;
1930 const struct rte_flow_item_nvgre *nvgre_spec;
1931 const struct rte_flow_item_nvgre *nvgre_mask;
1932 const struct rte_flow_item_eth *eth_spec;
1933 const struct rte_flow_item_eth *eth_mask;
1934 const struct rte_flow_item_vlan *vlan_spec;
1935 const struct rte_flow_item_vlan *vlan_mask;
1939 rte_flow_error_set(error, EINVAL,
1940 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1941 NULL, "NULL pattern.");
1946 rte_flow_error_set(error, EINVAL,
1947 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1948 NULL, "NULL action.");
1953 rte_flow_error_set(error, EINVAL,
1954 RTE_FLOW_ERROR_TYPE_ATTR,
1955 NULL, "NULL attribute.");
1960 * Some fields may not be provided. Set spec to 0 and mask to default
1961 * value. So, we need not do anything for the not provided fields later.
1963 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1964 memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1965 rule->mask.vlan_tci_mask = 0;
1971 * The first not void item should be
1972 * MAC or IPv4 or IPv6 or UDP or VxLAN.
1974 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1975 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1976 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1977 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1978 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1979 item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
1980 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
1981 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1982 rte_flow_error_set(error, EINVAL,
1983 RTE_FLOW_ERROR_TYPE_ITEM,
1984 item, "Not supported by fdir filter");
1988 rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
1991 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1992 /* Only used to describe the protocol stack. */
1993 if (item->spec || item->mask) {
1994 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1995 rte_flow_error_set(error, EINVAL,
1996 RTE_FLOW_ERROR_TYPE_ITEM,
1997 item, "Not supported by fdir filter");
2000 /*Not supported last point for range*/
2002 rte_flow_error_set(error, EINVAL,
2003 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2004 item, "Not supported last point for range");
2008 /* Check if the next not void item is IPv4 or IPv6. */
2010 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2011 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2012 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2013 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2014 rte_flow_error_set(error, EINVAL,
2015 RTE_FLOW_ERROR_TYPE_ITEM,
2016 item, "Not supported by fdir filter");
2022 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2023 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2024 /* Only used to describe the protocol stack. */
2025 if (item->spec || item->mask) {
2026 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2027 rte_flow_error_set(error, EINVAL,
2028 RTE_FLOW_ERROR_TYPE_ITEM,
2029 item, "Not supported by fdir filter");
2032 /*Not supported last point for range*/
2034 rte_flow_error_set(error, EINVAL,
2035 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2036 item, "Not supported last point for range");
2040 /* Check if the next not void item is UDP or NVGRE. */
2042 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2043 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2044 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2045 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2046 rte_flow_error_set(error, EINVAL,
2047 RTE_FLOW_ERROR_TYPE_ITEM,
2048 item, "Not supported by fdir filter");
2054 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2055 /* Only used to describe the protocol stack. */
2056 if (item->spec || item->mask) {
2057 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2058 rte_flow_error_set(error, EINVAL,
2059 RTE_FLOW_ERROR_TYPE_ITEM,
2060 item, "Not supported by fdir filter");
2063 /*Not supported last point for range*/
2065 rte_flow_error_set(error, EINVAL,
2066 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2067 item, "Not supported last point for range");
2071 /* Check if the next not void item is VxLAN. */
2073 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2074 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2075 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2076 rte_flow_error_set(error, EINVAL,
2077 RTE_FLOW_ERROR_TYPE_ITEM,
2078 item, "Not supported by fdir filter");
2083 /* Get the VxLAN info */
2084 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2085 rule->ixgbe_fdir.formatted.tunnel_type =
2086 RTE_FDIR_TUNNEL_TYPE_VXLAN;
2088 /* Only care about VNI, others should be masked. */
2090 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2091 rte_flow_error_set(error, EINVAL,
2092 RTE_FLOW_ERROR_TYPE_ITEM,
2093 item, "Not supported by fdir filter");
2096 /*Not supported last point for range*/
2098 rte_flow_error_set(error, EINVAL,
2099 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2100 item, "Not supported last point for range");
2103 rule->b_mask = TRUE;
2105 /* Tunnel type is always meaningful. */
2106 rule->mask.tunnel_type_mask = 1;
2109 (const struct rte_flow_item_vxlan *)item->mask;
2110 if (vxlan_mask->flags) {
2111 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2112 rte_flow_error_set(error, EINVAL,
2113 RTE_FLOW_ERROR_TYPE_ITEM,
2114 item, "Not supported by fdir filter");
2117 /* VNI must be totally masked or not. */
2118 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2119 vxlan_mask->vni[2]) &&
2120 ((vxlan_mask->vni[0] != 0xFF) ||
2121 (vxlan_mask->vni[1] != 0xFF) ||
2122 (vxlan_mask->vni[2] != 0xFF))) {
2123 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2124 rte_flow_error_set(error, EINVAL,
2125 RTE_FLOW_ERROR_TYPE_ITEM,
2126 item, "Not supported by fdir filter");
2130 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2131 RTE_DIM(vxlan_mask->vni));
2132 rule->mask.tunnel_id_mask <<= 8;
2135 rule->b_spec = TRUE;
2136 vxlan_spec = (const struct rte_flow_item_vxlan *)
2138 rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2139 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2140 rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2144 /* Get the NVGRE info */
2145 if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2146 rule->ixgbe_fdir.formatted.tunnel_type =
2147 RTE_FDIR_TUNNEL_TYPE_NVGRE;
2150 * Only care about flags0, flags1, protocol and TNI,
2151 * others should be masked.
2154 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2155 rte_flow_error_set(error, EINVAL,
2156 RTE_FLOW_ERROR_TYPE_ITEM,
2157 item, "Not supported by fdir filter");
2160 /*Not supported last point for range*/
2162 rte_flow_error_set(error, EINVAL,
2163 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2164 item, "Not supported last point for range");
2167 rule->b_mask = TRUE;
2169 /* Tunnel type is always meaningful. */
2170 rule->mask.tunnel_type_mask = 1;
2173 (const struct rte_flow_item_nvgre *)item->mask;
2174 if (nvgre_mask->flow_id) {
2175 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2176 rte_flow_error_set(error, EINVAL,
2177 RTE_FLOW_ERROR_TYPE_ITEM,
2178 item, "Not supported by fdir filter");
2181 if (nvgre_mask->c_k_s_rsvd0_ver !=
2182 rte_cpu_to_be_16(0x3000) ||
2183 nvgre_mask->protocol != 0xFFFF) {
2184 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2185 rte_flow_error_set(error, EINVAL,
2186 RTE_FLOW_ERROR_TYPE_ITEM,
2187 item, "Not supported by fdir filter");
2190 /* TNI must be totally masked or not. */
2191 if (nvgre_mask->tni[0] &&
2192 ((nvgre_mask->tni[0] != 0xFF) ||
2193 (nvgre_mask->tni[1] != 0xFF) ||
2194 (nvgre_mask->tni[2] != 0xFF))) {
2195 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2196 rte_flow_error_set(error, EINVAL,
2197 RTE_FLOW_ERROR_TYPE_ITEM,
2198 item, "Not supported by fdir filter");
2201 /* tni is a 24-bits bit field */
2202 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2203 RTE_DIM(nvgre_mask->tni));
2204 rule->mask.tunnel_id_mask <<= 8;
2207 rule->b_spec = TRUE;
2209 (const struct rte_flow_item_nvgre *)item->spec;
2210 if (nvgre_spec->c_k_s_rsvd0_ver !=
2211 rte_cpu_to_be_16(0x2000) ||
2212 nvgre_spec->protocol !=
2213 rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2214 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2215 rte_flow_error_set(error, EINVAL,
2216 RTE_FLOW_ERROR_TYPE_ITEM,
2217 item, "Not supported by fdir filter");
2220 /* tni is a 24-bits bit field */
2221 rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2222 nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2223 rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2227 /* check if the next not void item is MAC */
2229 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2230 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2231 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2232 rte_flow_error_set(error, EINVAL,
2233 RTE_FLOW_ERROR_TYPE_ITEM,
2234 item, "Not supported by fdir filter");
2239 * Only support vlan and dst MAC address,
2240 * others should be masked.
2244 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2245 rte_flow_error_set(error, EINVAL,
2246 RTE_FLOW_ERROR_TYPE_ITEM,
2247 item, "Not supported by fdir filter");
2250 /*Not supported last point for range*/
2252 rte_flow_error_set(error, EINVAL,
2253 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2254 item, "Not supported last point for range");
2257 rule->b_mask = TRUE;
2258 eth_mask = (const struct rte_flow_item_eth *)item->mask;
2260 /* Ether type should be masked. */
2261 if (eth_mask->type) {
2262 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2263 rte_flow_error_set(error, EINVAL,
2264 RTE_FLOW_ERROR_TYPE_ITEM,
2265 item, "Not supported by fdir filter");
2269 /* src MAC address should be masked. */
2270 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2271 if (eth_mask->src.addr_bytes[j]) {
2273 sizeof(struct ixgbe_fdir_rule));
2274 rte_flow_error_set(error, EINVAL,
2275 RTE_FLOW_ERROR_TYPE_ITEM,
2276 item, "Not supported by fdir filter");
2280 rule->mask.mac_addr_byte_mask = 0;
2281 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2282 /* It's a per byte mask. */
2283 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2284 rule->mask.mac_addr_byte_mask |= 0x1 << j;
2285 } else if (eth_mask->dst.addr_bytes[j]) {
2286 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2287 rte_flow_error_set(error, EINVAL,
2288 RTE_FLOW_ERROR_TYPE_ITEM,
2289 item, "Not supported by fdir filter");
2294 /* When no vlan, considered as full mask. */
2295 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2298 rule->b_spec = TRUE;
2299 eth_spec = (const struct rte_flow_item_eth *)item->spec;
2301 /* Get the dst MAC. */
2302 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2303 rule->ixgbe_fdir.formatted.inner_mac[j] =
2304 eth_spec->dst.addr_bytes[j];
2309 * Check if the next not void item is vlan or ipv4.
2310 * IPv6 is not supported.
2313 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2314 if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2315 (item->type != RTE_FLOW_ITEM_TYPE_VLAN)) {
2316 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2317 rte_flow_error_set(error, EINVAL,
2318 RTE_FLOW_ERROR_TYPE_ITEM,
2319 item, "Not supported by fdir filter");
2322 /*Not supported last point for range*/
2324 rte_flow_error_set(error, EINVAL,
2325 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2326 item, "Not supported last point for range");
2330 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2331 if (!(item->spec && item->mask)) {
2332 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2333 rte_flow_error_set(error, EINVAL,
2334 RTE_FLOW_ERROR_TYPE_ITEM,
2335 item, "Not supported by fdir filter");
2339 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
2340 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
2342 if (vlan_spec->tpid != rte_cpu_to_be_16(ETHER_TYPE_VLAN)) {
2343 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2344 rte_flow_error_set(error, EINVAL,
2345 RTE_FLOW_ERROR_TYPE_ITEM,
2346 item, "Not supported by fdir filter");
2350 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2352 if (vlan_mask->tpid != (uint16_t)~0U) {
2353 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2354 rte_flow_error_set(error, EINVAL,
2355 RTE_FLOW_ERROR_TYPE_ITEM,
2356 item, "Not supported by fdir filter");
2359 rule->mask.vlan_tci_mask = vlan_mask->tci;
2360 /* More than one tags are not supported. */
2363 * Check if the next not void item is not vlan.
2366 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2367 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2368 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2369 rte_flow_error_set(error, EINVAL,
2370 RTE_FLOW_ERROR_TYPE_ITEM,
2371 item, "Not supported by fdir filter");
2373 } else if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2374 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2375 rte_flow_error_set(error, EINVAL,
2376 RTE_FLOW_ERROR_TYPE_ITEM,
2377 item, "Not supported by fdir filter");
2380 /* check if the next not void item is END */
2382 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2383 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2384 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2385 rte_flow_error_set(error, EINVAL,
2386 RTE_FLOW_ERROR_TYPE_ITEM,
2387 item, "Not supported by fdir filter");
2393 * If the tags is 0, it means don't care about the VLAN.
2397 return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2401 ixgbe_validate_fdir_filter(struct rte_eth_dev *dev,
2402 const struct rte_flow_attr *attr,
2403 const struct rte_flow_item pattern[],
2404 const struct rte_flow_action actions[],
2405 struct ixgbe_fdir_rule *rule,
2406 struct rte_flow_error *error)
2410 enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2412 ixgbe_parse_fdir_filter(attr, pattern, actions,
2416 if (fdir_mode == RTE_FDIR_MODE_NONE ||
2417 fdir_mode != rule->mode)
2424 ixgbe_parse_fdir_filter(const struct rte_flow_attr *attr,
2425 const struct rte_flow_item pattern[],
2426 const struct rte_flow_action actions[],
2427 struct ixgbe_fdir_rule *rule,
2428 struct rte_flow_error *error)
2432 ret = ixgbe_parse_fdir_filter_normal(attr, pattern,
2433 actions, rule, error);
2438 ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2439 actions, rule, error);
2445 * Create or destroy a flow rule.
2446 * Theorically one rule can match more than one filters.
2447 * We will let it use the filter which it hitt first.
2448 * So, the sequence matters.
2450 static struct rte_flow *
2451 ixgbe_flow_create(struct rte_eth_dev *dev,
2452 const struct rte_flow_attr *attr,
2453 const struct rte_flow_item pattern[],
2454 const struct rte_flow_action actions[],
2455 struct rte_flow_error *error)
2458 struct rte_eth_ntuple_filter ntuple_filter;
2459 struct rte_eth_ethertype_filter ethertype_filter;
2460 struct rte_eth_syn_filter syn_filter;
2461 struct ixgbe_fdir_rule fdir_rule;
2462 struct rte_eth_l2_tunnel_conf l2_tn_filter;
2463 struct ixgbe_hw_fdir_info *fdir_info =
2464 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2465 struct rte_flow *flow = NULL;
2466 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2467 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2468 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2469 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2470 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2471 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2473 flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
2475 PMD_DRV_LOG(ERR, "failed to allocate memory");
2476 return (struct rte_flow *)flow;
2478 ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
2479 sizeof(struct ixgbe_flow_mem), 0);
2480 if (!ixgbe_flow_mem_ptr) {
2481 PMD_DRV_LOG(ERR, "failed to allocate memory");
2485 ixgbe_flow_mem_ptr->flow = flow;
2486 TAILQ_INSERT_TAIL(&ixgbe_flow_list,
2487 ixgbe_flow_mem_ptr, entries);
2489 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2490 ret = ixgbe_parse_ntuple_filter(attr, pattern,
2491 actions, &ntuple_filter, error);
2493 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2495 ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
2496 sizeof(struct ixgbe_ntuple_filter_ele), 0);
2497 (void)rte_memcpy(&ntuple_filter_ptr->filter_info,
2499 sizeof(struct rte_eth_ntuple_filter));
2500 TAILQ_INSERT_TAIL(&filter_ntuple_list,
2501 ntuple_filter_ptr, entries);
2502 flow->rule = ntuple_filter_ptr;
2503 flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2509 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2510 ret = ixgbe_parse_ethertype_filter(attr, pattern,
2511 actions, ðertype_filter, error);
2513 ret = ixgbe_add_del_ethertype_filter(dev,
2514 ðertype_filter, TRUE);
2516 ethertype_filter_ptr = rte_zmalloc(
2517 "ixgbe_ethertype_filter",
2518 sizeof(struct ixgbe_ethertype_filter_ele), 0);
2519 (void)rte_memcpy(ðertype_filter_ptr->filter_info,
2521 sizeof(struct rte_eth_ethertype_filter));
2522 TAILQ_INSERT_TAIL(&filter_ethertype_list,
2523 ethertype_filter_ptr, entries);
2524 flow->rule = ethertype_filter_ptr;
2525 flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2531 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2532 ret = cons_parse_syn_filter(attr, pattern, actions, &syn_filter, error);
2534 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
2536 syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
2537 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
2538 (void)rte_memcpy(&syn_filter_ptr->filter_info,
2540 sizeof(struct rte_eth_syn_filter));
2541 TAILQ_INSERT_TAIL(&filter_syn_list,
2544 flow->rule = syn_filter_ptr;
2545 flow->filter_type = RTE_ETH_FILTER_SYN;
2551 memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2552 ret = ixgbe_parse_fdir_filter(attr, pattern,
2553 actions, &fdir_rule, error);
2555 /* A mask cannot be deleted. */
2556 if (fdir_rule.b_mask) {
2557 if (!fdir_info->mask_added) {
2558 /* It's the first time the mask is set. */
2559 rte_memcpy(&fdir_info->mask,
2561 sizeof(struct ixgbe_hw_fdir_mask));
2562 ret = ixgbe_fdir_set_input_mask(dev);
2566 fdir_info->mask_added = TRUE;
2569 * Only support one global mask,
2570 * all the masks should be the same.
2572 ret = memcmp(&fdir_info->mask,
2574 sizeof(struct ixgbe_hw_fdir_mask));
2580 if (fdir_rule.b_spec) {
2581 ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
2584 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
2585 sizeof(struct ixgbe_fdir_rule_ele), 0);
2586 (void)rte_memcpy(&fdir_rule_ptr->filter_info,
2588 sizeof(struct ixgbe_fdir_rule));
2589 TAILQ_INSERT_TAIL(&filter_fdir_list,
2590 fdir_rule_ptr, entries);
2591 flow->rule = fdir_rule_ptr;
2592 flow->filter_type = RTE_ETH_FILTER_FDIR;
2604 memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2605 ret = cons_parse_l2_tn_filter(attr, pattern,
2606 actions, &l2_tn_filter, error);
2608 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
2610 l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
2611 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
2612 (void)rte_memcpy(&l2_tn_filter_ptr->filter_info,
2614 sizeof(struct rte_eth_l2_tunnel_conf));
2615 TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
2616 l2_tn_filter_ptr, entries);
2617 flow->rule = l2_tn_filter_ptr;
2618 flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
2624 TAILQ_REMOVE(&ixgbe_flow_list,
2625 ixgbe_flow_mem_ptr, entries);
2626 rte_free(ixgbe_flow_mem_ptr);
2632 * Check if the flow rule is supported by ixgbe.
2633 * It only checkes the format. Don't guarantee the rule can be programmed into
2634 * the HW. Because there can be no enough room for the rule.
2637 ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
2638 const struct rte_flow_attr *attr,
2639 const struct rte_flow_item pattern[],
2640 const struct rte_flow_action actions[],
2641 struct rte_flow_error *error)
2643 struct rte_eth_ntuple_filter ntuple_filter;
2644 struct rte_eth_ethertype_filter ethertype_filter;
2645 struct rte_eth_syn_filter syn_filter;
2646 struct rte_eth_l2_tunnel_conf l2_tn_filter;
2647 struct ixgbe_fdir_rule fdir_rule;
2650 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2651 ret = ixgbe_parse_ntuple_filter(attr, pattern,
2652 actions, &ntuple_filter, error);
2656 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2657 ret = ixgbe_parse_ethertype_filter(attr, pattern,
2658 actions, ðertype_filter, error);
2662 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2663 ret = ixgbe_parse_syn_filter(attr, pattern,
2664 actions, &syn_filter, error);
2668 memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2669 ret = ixgbe_validate_fdir_filter(dev, attr, pattern,
2670 actions, &fdir_rule, error);
2674 memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2675 ret = ixgbe_validate_l2_tn_filter(dev, attr, pattern,
2676 actions, &l2_tn_filter, error);
2681 /* Destroy all flow rules associated with a port on ixgbe. */
2683 ixgbe_flow_flush(struct rte_eth_dev *dev,
2684 struct rte_flow_error *error)
2688 ixgbe_clear_all_ntuple_filter(dev);
2689 ixgbe_clear_all_ethertype_filter(dev);
2690 ixgbe_clear_syn_filter(dev);
2692 ret = ixgbe_clear_all_fdir_filter(dev);
2694 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
2695 NULL, "Failed to flush rule");
2699 ret = ixgbe_clear_all_l2_tn_filter(dev);
2701 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
2702 NULL, "Failed to flush rule");