4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
47 #include <rte_interrupts.h>
49 #include <rte_debug.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_atomic.h>
60 #include <rte_malloc.h>
61 #include <rte_random.h>
63 #include <rte_hash_crc.h>
65 #include <rte_flow_driver.h>
67 #include "ixgbe_logs.h"
68 #include "base/ixgbe_api.h"
69 #include "base/ixgbe_vf.h"
70 #include "base/ixgbe_common.h"
71 #include "ixgbe_ethdev.h"
72 #include "ixgbe_bypass.h"
73 #include "ixgbe_rxtx.h"
74 #include "base/ixgbe_type.h"
75 #include "base/ixgbe_phy.h"
76 #include "rte_pmd_ixgbe.h"
78 static int ixgbe_flow_flush(struct rte_eth_dev *dev,
79 struct rte_flow_error *error);
81 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
82 const struct rte_flow_item pattern[],
83 const struct rte_flow_action actions[],
84 struct rte_eth_ntuple_filter *filter,
85 struct rte_flow_error *error);
87 ixgbe_parse_ntuple_filter(const struct rte_flow_attr *attr,
88 const struct rte_flow_item pattern[],
89 const struct rte_flow_action actions[],
90 struct rte_eth_ntuple_filter *filter,
91 struct rte_flow_error *error);
93 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
94 const struct rte_flow_item *pattern,
95 const struct rte_flow_action *actions,
96 struct rte_eth_ethertype_filter *filter,
97 struct rte_flow_error *error);
99 ixgbe_parse_ethertype_filter(const struct rte_flow_attr *attr,
100 const struct rte_flow_item pattern[],
101 const struct rte_flow_action actions[],
102 struct rte_eth_ethertype_filter *filter,
103 struct rte_flow_error *error);
105 cons_parse_syn_filter(const struct rte_flow_attr *attr,
106 const struct rte_flow_item pattern[],
107 const struct rte_flow_action actions[],
108 struct rte_eth_syn_filter *filter,
109 struct rte_flow_error *error);
111 ixgbe_parse_syn_filter(const struct rte_flow_attr *attr,
112 const struct rte_flow_item pattern[],
113 const struct rte_flow_action actions[],
114 struct rte_eth_syn_filter *filter,
115 struct rte_flow_error *error);
117 cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
118 const struct rte_flow_item pattern[],
119 const struct rte_flow_action actions[],
120 struct rte_eth_l2_tunnel_conf *filter,
121 struct rte_flow_error *error);
123 ixgbe_validate_l2_tn_filter(struct rte_eth_dev *dev,
124 const struct rte_flow_attr *attr,
125 const struct rte_flow_item pattern[],
126 const struct rte_flow_action actions[],
127 struct rte_eth_l2_tunnel_conf *rule,
128 struct rte_flow_error *error);
130 ixgbe_validate_fdir_filter(struct rte_eth_dev *dev,
131 const struct rte_flow_attr *attr,
132 const struct rte_flow_item pattern[],
133 const struct rte_flow_action actions[],
134 struct ixgbe_fdir_rule *rule,
135 struct rte_flow_error *error);
137 ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
138 const struct rte_flow_item pattern[],
139 const struct rte_flow_action actions[],
140 struct ixgbe_fdir_rule *rule,
141 struct rte_flow_error *error);
143 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
144 const struct rte_flow_item pattern[],
145 const struct rte_flow_action actions[],
146 struct ixgbe_fdir_rule *rule,
147 struct rte_flow_error *error);
149 ixgbe_parse_fdir_filter(const struct rte_flow_attr *attr,
150 const struct rte_flow_item pattern[],
151 const struct rte_flow_action actions[],
152 struct ixgbe_fdir_rule *rule,
153 struct rte_flow_error *error);
155 ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
156 const struct rte_flow_attr *attr,
157 const struct rte_flow_item pattern[],
158 const struct rte_flow_action actions[],
159 struct rte_flow_error *error);
161 const struct rte_flow_ops ixgbe_flow_ops = {
169 #define IXGBE_MIN_N_TUPLE_PRIO 1
170 #define IXGBE_MAX_N_TUPLE_PRIO 7
171 #define NEXT_ITEM_OF_PATTERN(item, pattern, index)\
173 item = pattern + index;\
174 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\
176 item = pattern + index; \
180 #define NEXT_ITEM_OF_ACTION(act, actions, index)\
182 act = actions + index; \
183 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
185 act = actions + index; \
190 * Please aware there's an asumption for all the parsers.
191 * rte_flow_item is using big endian, rte_flow_attr and
192 * rte_flow_action are using CPU order.
193 * Because the pattern is used to describe the packets,
194 * normally the packets should use network order.
198 * Parse the rule to see if it is a n-tuple rule.
199 * And get the n-tuple filter info BTW.
201 * The first not void item can be ETH or IPV4.
202 * The second not void item must be IPV4 if the first one is ETH.
203 * The third not void item must be UDP or TCP.
204 * The next not void item must be END.
206 * The first not void action should be QUEUE.
207 * The next not void action should be END.
211 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
212 * dst_addr 192.167.3.50 0xFFFFFFFF
213 * next_proto_id 17 0xFF
214 * UDP/TCP src_port 80 0xFFFF
217 * other members in mask and spec should set to 0x00.
218 * item->last should be NULL.
221 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
222 const struct rte_flow_item pattern[],
223 const struct rte_flow_action actions[],
224 struct rte_eth_ntuple_filter *filter,
225 struct rte_flow_error *error)
227 const struct rte_flow_item *item;
228 const struct rte_flow_action *act;
229 const struct rte_flow_item_ipv4 *ipv4_spec;
230 const struct rte_flow_item_ipv4 *ipv4_mask;
231 const struct rte_flow_item_tcp *tcp_spec;
232 const struct rte_flow_item_tcp *tcp_mask;
233 const struct rte_flow_item_udp *udp_spec;
234 const struct rte_flow_item_udp *udp_mask;
238 rte_flow_error_set(error,
239 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
240 NULL, "NULL pattern.");
245 rte_flow_error_set(error, EINVAL,
246 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
247 NULL, "NULL action.");
251 rte_flow_error_set(error, EINVAL,
252 RTE_FLOW_ERROR_TYPE_ATTR,
253 NULL, "NULL attribute.");
260 /* the first not void item can be MAC or IPv4 */
261 NEXT_ITEM_OF_PATTERN(item, pattern, index);
263 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
264 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
265 rte_flow_error_set(error, EINVAL,
266 RTE_FLOW_ERROR_TYPE_ITEM,
267 item, "Not supported by ntuple filter");
271 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
272 /*Not supported last point for range*/
274 rte_flow_error_set(error,
276 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
277 item, "Not supported last point for range");
281 /* if the first item is MAC, the content should be NULL */
282 if (item->spec || item->mask) {
283 rte_flow_error_set(error, EINVAL,
284 RTE_FLOW_ERROR_TYPE_ITEM,
285 item, "Not supported by ntuple filter");
288 /* check if the next not void item is IPv4 */
290 NEXT_ITEM_OF_PATTERN(item, pattern, index);
291 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
292 rte_flow_error_set(error,
293 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
294 item, "Not supported by ntuple filter");
299 /* get the IPv4 info */
300 if (!item->spec || !item->mask) {
301 rte_flow_error_set(error, EINVAL,
302 RTE_FLOW_ERROR_TYPE_ITEM,
303 item, "Invalid ntuple mask");
306 /*Not supported last point for range*/
308 rte_flow_error_set(error, EINVAL,
309 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
310 item, "Not supported last point for range");
315 ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
317 * Only support src & dst addresses, protocol,
318 * others should be masked.
320 if (ipv4_mask->hdr.version_ihl ||
321 ipv4_mask->hdr.type_of_service ||
322 ipv4_mask->hdr.total_length ||
323 ipv4_mask->hdr.packet_id ||
324 ipv4_mask->hdr.fragment_offset ||
325 ipv4_mask->hdr.time_to_live ||
326 ipv4_mask->hdr.hdr_checksum) {
327 rte_flow_error_set(error,
328 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
329 item, "Not supported by ntuple filter");
333 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
334 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
335 filter->proto_mask = ipv4_mask->hdr.next_proto_id;
337 ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
338 filter->dst_ip = ipv4_spec->hdr.dst_addr;
339 filter->src_ip = ipv4_spec->hdr.src_addr;
340 filter->proto = ipv4_spec->hdr.next_proto_id;
342 /* check if the next not void item is TCP or UDP */
344 NEXT_ITEM_OF_PATTERN(item, pattern, index);
345 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
346 item->type != RTE_FLOW_ITEM_TYPE_UDP) {
347 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
348 rte_flow_error_set(error, EINVAL,
349 RTE_FLOW_ERROR_TYPE_ITEM,
350 item, "Not supported by ntuple filter");
354 /* get the TCP/UDP info */
355 if (!item->spec || !item->mask) {
356 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
357 rte_flow_error_set(error, EINVAL,
358 RTE_FLOW_ERROR_TYPE_ITEM,
359 item, "Invalid ntuple mask");
363 /*Not supported last point for range*/
365 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
366 rte_flow_error_set(error, EINVAL,
367 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
368 item, "Not supported last point for range");
373 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
374 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
377 * Only support src & dst ports, tcp flags,
378 * others should be masked.
380 if (tcp_mask->hdr.sent_seq ||
381 tcp_mask->hdr.recv_ack ||
382 tcp_mask->hdr.data_off ||
383 tcp_mask->hdr.rx_win ||
384 tcp_mask->hdr.cksum ||
385 tcp_mask->hdr.tcp_urp) {
387 sizeof(struct rte_eth_ntuple_filter));
388 rte_flow_error_set(error, EINVAL,
389 RTE_FLOW_ERROR_TYPE_ITEM,
390 item, "Not supported by ntuple filter");
394 filter->dst_port_mask = tcp_mask->hdr.dst_port;
395 filter->src_port_mask = tcp_mask->hdr.src_port;
396 if (tcp_mask->hdr.tcp_flags == 0xFF) {
397 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
398 } else if (!tcp_mask->hdr.tcp_flags) {
399 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
401 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
402 rte_flow_error_set(error, EINVAL,
403 RTE_FLOW_ERROR_TYPE_ITEM,
404 item, "Not supported by ntuple filter");
408 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
409 filter->dst_port = tcp_spec->hdr.dst_port;
410 filter->src_port = tcp_spec->hdr.src_port;
411 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
413 udp_mask = (const struct rte_flow_item_udp *)item->mask;
416 * Only support src & dst ports,
417 * others should be masked.
419 if (udp_mask->hdr.dgram_len ||
420 udp_mask->hdr.dgram_cksum) {
422 sizeof(struct rte_eth_ntuple_filter));
423 rte_flow_error_set(error, EINVAL,
424 RTE_FLOW_ERROR_TYPE_ITEM,
425 item, "Not supported by ntuple filter");
429 filter->dst_port_mask = udp_mask->hdr.dst_port;
430 filter->src_port_mask = udp_mask->hdr.src_port;
432 udp_spec = (const struct rte_flow_item_udp *)item->spec;
433 filter->dst_port = udp_spec->hdr.dst_port;
434 filter->src_port = udp_spec->hdr.src_port;
437 /* check if the next not void item is END */
439 NEXT_ITEM_OF_PATTERN(item, pattern, index);
440 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
441 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
442 rte_flow_error_set(error, EINVAL,
443 RTE_FLOW_ERROR_TYPE_ITEM,
444 item, "Not supported by ntuple filter");
452 * n-tuple only supports forwarding,
453 * check if the first not void action is QUEUE.
455 NEXT_ITEM_OF_ACTION(act, actions, index);
456 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
457 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
458 rte_flow_error_set(error, EINVAL,
459 RTE_FLOW_ERROR_TYPE_ACTION,
460 item, "Not supported action.");
464 ((const struct rte_flow_action_queue *)act->conf)->index;
466 /* check if the next not void item is END */
468 NEXT_ITEM_OF_ACTION(act, actions, index);
469 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
470 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
471 rte_flow_error_set(error, EINVAL,
472 RTE_FLOW_ERROR_TYPE_ACTION,
473 act, "Not supported action.");
478 /* must be input direction */
479 if (!attr->ingress) {
480 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
481 rte_flow_error_set(error, EINVAL,
482 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
483 attr, "Only support ingress.");
489 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
490 rte_flow_error_set(error, EINVAL,
491 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
492 attr, "Not support egress.");
496 if (attr->priority > 0xFFFF) {
497 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
498 rte_flow_error_set(error, EINVAL,
499 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
500 attr, "Error priority.");
503 filter->priority = (uint16_t)attr->priority;
504 if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
505 attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
506 filter->priority = 1;
511 /* a specific function for ixgbe because the flags is specific */
513 ixgbe_parse_ntuple_filter(const struct rte_flow_attr *attr,
514 const struct rte_flow_item pattern[],
515 const struct rte_flow_action actions[],
516 struct rte_eth_ntuple_filter *filter,
517 struct rte_flow_error *error)
521 ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
526 /* Ixgbe doesn't support tcp flags. */
527 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
528 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
529 rte_flow_error_set(error, EINVAL,
530 RTE_FLOW_ERROR_TYPE_ITEM,
531 NULL, "Not supported by ntuple filter");
535 /* Ixgbe doesn't support many priorities. */
536 if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
537 filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
538 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
539 rte_flow_error_set(error, EINVAL,
540 RTE_FLOW_ERROR_TYPE_ITEM,
541 NULL, "Priority not supported by ntuple filter");
545 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
546 filter->priority > IXGBE_5TUPLE_MAX_PRI ||
547 filter->priority < IXGBE_5TUPLE_MIN_PRI)
550 /* fixed value for ixgbe */
551 filter->flags = RTE_5TUPLE_FLAGS;
556 * Parse the rule to see if it is a ethertype rule.
557 * And get the ethertype filter info BTW.
559 * The first not void item can be ETH.
560 * The next not void item must be END.
562 * The first not void action should be QUEUE.
563 * The next not void action should be END.
566 * ETH type 0x0807 0xFFFF
568 * other members in mask and spec should set to 0x00.
569 * item->last should be NULL.
572 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
573 const struct rte_flow_item *pattern,
574 const struct rte_flow_action *actions,
575 struct rte_eth_ethertype_filter *filter,
576 struct rte_flow_error *error)
578 const struct rte_flow_item *item;
579 const struct rte_flow_action *act;
580 const struct rte_flow_item_eth *eth_spec;
581 const struct rte_flow_item_eth *eth_mask;
582 const struct rte_flow_action_queue *act_q;
586 rte_flow_error_set(error, EINVAL,
587 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
588 NULL, "NULL pattern.");
593 rte_flow_error_set(error, EINVAL,
594 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
595 NULL, "NULL action.");
600 rte_flow_error_set(error, EINVAL,
601 RTE_FLOW_ERROR_TYPE_ATTR,
602 NULL, "NULL attribute.");
609 /* The first non-void item should be MAC. */
610 item = pattern + index;
611 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
613 item = pattern + index;
615 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
616 rte_flow_error_set(error, EINVAL,
617 RTE_FLOW_ERROR_TYPE_ITEM,
618 item, "Not supported by ethertype filter");
622 /*Not supported last point for range*/
624 rte_flow_error_set(error, EINVAL,
625 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
626 item, "Not supported last point for range");
630 /* Get the MAC info. */
631 if (!item->spec || !item->mask) {
632 rte_flow_error_set(error, EINVAL,
633 RTE_FLOW_ERROR_TYPE_ITEM,
634 item, "Not supported by ethertype filter");
638 eth_spec = (const struct rte_flow_item_eth *)item->spec;
639 eth_mask = (const struct rte_flow_item_eth *)item->mask;
641 /* Mask bits of source MAC address must be full of 0.
642 * Mask bits of destination MAC address must be full
645 if (!is_zero_ether_addr(ð_mask->src) ||
646 (!is_zero_ether_addr(ð_mask->dst) &&
647 !is_broadcast_ether_addr(ð_mask->dst))) {
648 rte_flow_error_set(error, EINVAL,
649 RTE_FLOW_ERROR_TYPE_ITEM,
650 item, "Invalid ether address mask");
654 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
655 rte_flow_error_set(error, EINVAL,
656 RTE_FLOW_ERROR_TYPE_ITEM,
657 item, "Invalid ethertype mask");
661 /* If mask bits of destination MAC address
662 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
664 if (is_broadcast_ether_addr(ð_mask->dst)) {
665 filter->mac_addr = eth_spec->dst;
666 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
668 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
670 filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
672 /* Check if the next non-void item is END. */
674 item = pattern + index;
675 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
677 item = pattern + index;
679 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
680 rte_flow_error_set(error, EINVAL,
681 RTE_FLOW_ERROR_TYPE_ITEM,
682 item, "Not supported by ethertype filter.");
689 /* Check if the first non-void action is QUEUE or DROP. */
690 act = actions + index;
691 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
693 act = actions + index;
695 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
696 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
697 rte_flow_error_set(error, EINVAL,
698 RTE_FLOW_ERROR_TYPE_ACTION,
699 act, "Not supported action.");
703 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
704 act_q = (const struct rte_flow_action_queue *)act->conf;
705 filter->queue = act_q->index;
707 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
710 /* Check if the next non-void item is END */
712 act = actions + index;
713 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
715 act = actions + index;
717 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
718 rte_flow_error_set(error, EINVAL,
719 RTE_FLOW_ERROR_TYPE_ACTION,
720 act, "Not supported action.");
725 /* Must be input direction */
726 if (!attr->ingress) {
727 rte_flow_error_set(error, EINVAL,
728 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
729 attr, "Only support ingress.");
735 rte_flow_error_set(error, EINVAL,
736 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
737 attr, "Not support egress.");
742 if (attr->priority) {
743 rte_flow_error_set(error, EINVAL,
744 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
745 attr, "Not support priority.");
751 rte_flow_error_set(error, EINVAL,
752 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
753 attr, "Not support group.");
761 ixgbe_parse_ethertype_filter(const struct rte_flow_attr *attr,
762 const struct rte_flow_item pattern[],
763 const struct rte_flow_action actions[],
764 struct rte_eth_ethertype_filter *filter,
765 struct rte_flow_error *error)
769 ret = cons_parse_ethertype_filter(attr, pattern,
770 actions, filter, error);
775 /* Ixgbe doesn't support MAC address. */
776 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
777 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
778 rte_flow_error_set(error, EINVAL,
779 RTE_FLOW_ERROR_TYPE_ITEM,
780 NULL, "Not supported by ethertype filter");
784 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
785 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
786 rte_flow_error_set(error, EINVAL,
787 RTE_FLOW_ERROR_TYPE_ITEM,
788 NULL, "queue index much too big");
792 if (filter->ether_type == ETHER_TYPE_IPv4 ||
793 filter->ether_type == ETHER_TYPE_IPv6) {
794 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
795 rte_flow_error_set(error, EINVAL,
796 RTE_FLOW_ERROR_TYPE_ITEM,
797 NULL, "IPv4/IPv6 not supported by ethertype filter");
801 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
802 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
803 rte_flow_error_set(error, EINVAL,
804 RTE_FLOW_ERROR_TYPE_ITEM,
805 NULL, "mac compare is unsupported");
809 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
810 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
811 rte_flow_error_set(error, EINVAL,
812 RTE_FLOW_ERROR_TYPE_ITEM,
813 NULL, "drop option is unsupported");
821 * Parse the rule to see if it is a TCP SYN rule.
822 * And get the TCP SYN filter info BTW.
824 * The first not void item must be ETH.
825 * The second not void item must be IPV4 or IPV6.
826 * The third not void item must be TCP.
827 * The next not void item must be END.
829 * The first not void action should be QUEUE.
830 * The next not void action should be END.
834 * IPV4/IPV6 NULL NULL
835 * TCP tcp_flags 0x02 0xFF
837 * other members in mask and spec should set to 0x00.
838 * item->last should be NULL.
841 cons_parse_syn_filter(const struct rte_flow_attr *attr,
842 const struct rte_flow_item pattern[],
843 const struct rte_flow_action actions[],
844 struct rte_eth_syn_filter *filter,
845 struct rte_flow_error *error)
847 const struct rte_flow_item *item;
848 const struct rte_flow_action *act;
849 const struct rte_flow_item_tcp *tcp_spec;
850 const struct rte_flow_item_tcp *tcp_mask;
851 const struct rte_flow_action_queue *act_q;
855 rte_flow_error_set(error, EINVAL,
856 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
857 NULL, "NULL pattern.");
862 rte_flow_error_set(error, EINVAL,
863 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
864 NULL, "NULL action.");
869 rte_flow_error_set(error, EINVAL,
870 RTE_FLOW_ERROR_TYPE_ATTR,
871 NULL, "NULL attribute.");
878 /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
879 NEXT_ITEM_OF_PATTERN(item, pattern, index);
880 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
881 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
882 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
883 item->type != RTE_FLOW_ITEM_TYPE_TCP) {
884 rte_flow_error_set(error, EINVAL,
885 RTE_FLOW_ERROR_TYPE_ITEM,
886 item, "Not supported by syn filter");
889 /*Not supported last point for range*/
891 rte_flow_error_set(error, EINVAL,
892 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
893 item, "Not supported last point for range");
898 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
899 /* if the item is MAC, the content should be NULL */
900 if (item->spec || item->mask) {
901 rte_flow_error_set(error, EINVAL,
902 RTE_FLOW_ERROR_TYPE_ITEM,
903 item, "Invalid SYN address mask");
907 /* check if the next not void item is IPv4 or IPv6 */
909 NEXT_ITEM_OF_PATTERN(item, pattern, index);
910 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
911 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
912 rte_flow_error_set(error, EINVAL,
913 RTE_FLOW_ERROR_TYPE_ITEM,
914 item, "Not supported by syn filter");
920 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
921 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
922 /* if the item is IP, the content should be NULL */
923 if (item->spec || item->mask) {
924 rte_flow_error_set(error, EINVAL,
925 RTE_FLOW_ERROR_TYPE_ITEM,
926 item, "Invalid SYN mask");
930 /* check if the next not void item is TCP */
932 NEXT_ITEM_OF_PATTERN(item, pattern, index);
933 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
934 rte_flow_error_set(error, EINVAL,
935 RTE_FLOW_ERROR_TYPE_ITEM,
936 item, "Not supported by syn filter");
941 /* Get the TCP info. Only support SYN. */
942 if (!item->spec || !item->mask) {
943 rte_flow_error_set(error, EINVAL,
944 RTE_FLOW_ERROR_TYPE_ITEM,
945 item, "Invalid SYN mask");
948 /*Not supported last point for range*/
950 rte_flow_error_set(error, EINVAL,
951 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
952 item, "Not supported last point for range");
956 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
957 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
958 if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
959 tcp_mask->hdr.src_port ||
960 tcp_mask->hdr.dst_port ||
961 tcp_mask->hdr.sent_seq ||
962 tcp_mask->hdr.recv_ack ||
963 tcp_mask->hdr.data_off ||
964 tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
965 tcp_mask->hdr.rx_win ||
966 tcp_mask->hdr.cksum ||
967 tcp_mask->hdr.tcp_urp) {
968 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
969 rte_flow_error_set(error, EINVAL,
970 RTE_FLOW_ERROR_TYPE_ITEM,
971 item, "Not supported by syn filter");
975 /* check if the next not void item is END */
977 NEXT_ITEM_OF_PATTERN(item, pattern, index);
978 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
979 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
980 rte_flow_error_set(error, EINVAL,
981 RTE_FLOW_ERROR_TYPE_ITEM,
982 item, "Not supported by syn filter");
989 /* check if the first not void action is QUEUE. */
990 NEXT_ITEM_OF_ACTION(act, actions, index);
991 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
992 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
993 rte_flow_error_set(error, EINVAL,
994 RTE_FLOW_ERROR_TYPE_ACTION,
995 act, "Not supported action.");
999 act_q = (const struct rte_flow_action_queue *)act->conf;
1000 filter->queue = act_q->index;
1001 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
1002 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1003 rte_flow_error_set(error, EINVAL,
1004 RTE_FLOW_ERROR_TYPE_ACTION,
1005 act, "Not supported action.");
1009 /* check if the next not void item is END */
1011 NEXT_ITEM_OF_ACTION(act, actions, index);
1012 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1013 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1014 rte_flow_error_set(error, EINVAL,
1015 RTE_FLOW_ERROR_TYPE_ACTION,
1016 act, "Not supported action.");
1021 /* must be input direction */
1022 if (!attr->ingress) {
1023 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1024 rte_flow_error_set(error, EINVAL,
1025 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1026 attr, "Only support ingress.");
1032 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1033 rte_flow_error_set(error, EINVAL,
1034 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1035 attr, "Not support egress.");
1039 /* Support 2 priorities, the lowest or highest. */
1040 if (!attr->priority) {
1041 filter->hig_pri = 0;
1042 } else if (attr->priority == (uint32_t)~0U) {
1043 filter->hig_pri = 1;
1045 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1046 rte_flow_error_set(error, EINVAL,
1047 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1048 attr, "Not support priority.");
1056 ixgbe_parse_syn_filter(const struct rte_flow_attr *attr,
1057 const struct rte_flow_item pattern[],
1058 const struct rte_flow_action actions[],
1059 struct rte_eth_syn_filter *filter,
1060 struct rte_flow_error *error)
1064 ret = cons_parse_syn_filter(attr, pattern,
1065 actions, filter, error);
1074 * Parse the rule to see if it is a L2 tunnel rule.
1075 * And get the L2 tunnel filter info BTW.
1076 * Only support E-tag now.
1078 * The first not void item can be E_TAG.
1079 * The next not void item must be END.
1081 * The first not void action should be QUEUE.
1082 * The next not void action should be END.
1086 e_cid_base 0x309 0xFFF
1088 * other members in mask and spec should set to 0x00.
1089 * item->last should be NULL.
1092 cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
1093 const struct rte_flow_item pattern[],
1094 const struct rte_flow_action actions[],
1095 struct rte_eth_l2_tunnel_conf *filter,
1096 struct rte_flow_error *error)
1098 const struct rte_flow_item *item;
1099 const struct rte_flow_item_e_tag *e_tag_spec;
1100 const struct rte_flow_item_e_tag *e_tag_mask;
1101 const struct rte_flow_action *act;
1102 const struct rte_flow_action_queue *act_q;
1106 rte_flow_error_set(error, EINVAL,
1107 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1108 NULL, "NULL pattern.");
1113 rte_flow_error_set(error, EINVAL,
1114 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1115 NULL, "NULL action.");
1120 rte_flow_error_set(error, EINVAL,
1121 RTE_FLOW_ERROR_TYPE_ATTR,
1122 NULL, "NULL attribute.");
1128 /* The first not void item should be e-tag. */
1129 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1130 if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1131 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1132 rte_flow_error_set(error, EINVAL,
1133 RTE_FLOW_ERROR_TYPE_ITEM,
1134 item, "Not supported by L2 tunnel filter");
1138 if (!item->spec || !item->mask) {
1139 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1140 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1141 item, "Not supported by L2 tunnel filter");
1145 /*Not supported last point for range*/
1147 rte_flow_error_set(error, EINVAL,
1148 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1149 item, "Not supported last point for range");
1153 e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
1154 e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
1156 /* Only care about GRP and E cid base. */
1157 if (e_tag_mask->epcp_edei_in_ecid_b ||
1158 e_tag_mask->in_ecid_e ||
1159 e_tag_mask->ecid_e ||
1160 e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1161 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1162 rte_flow_error_set(error, EINVAL,
1163 RTE_FLOW_ERROR_TYPE_ITEM,
1164 item, "Not supported by L2 tunnel filter");
1168 filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1170 * grp and e_cid_base are bit fields and only use 14 bits.
1171 * e-tag id is taken as little endian by HW.
1173 filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1175 /* check if the next not void item is END */
1177 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1178 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1179 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1180 rte_flow_error_set(error, EINVAL,
1181 RTE_FLOW_ERROR_TYPE_ITEM,
1182 item, "Not supported by L2 tunnel filter");
1187 /* must be input direction */
1188 if (!attr->ingress) {
1189 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1190 rte_flow_error_set(error, EINVAL,
1191 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1192 attr, "Only support ingress.");
1198 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1199 rte_flow_error_set(error, EINVAL,
1200 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1201 attr, "Not support egress.");
1206 if (attr->priority) {
1207 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1208 rte_flow_error_set(error, EINVAL,
1209 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1210 attr, "Not support priority.");
1217 /* check if the first not void action is QUEUE. */
1218 NEXT_ITEM_OF_ACTION(act, actions, index);
1219 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1220 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1221 rte_flow_error_set(error, EINVAL,
1222 RTE_FLOW_ERROR_TYPE_ACTION,
1223 act, "Not supported action.");
1227 act_q = (const struct rte_flow_action_queue *)act->conf;
1228 filter->pool = act_q->index;
1230 /* check if the next not void item is END */
1232 NEXT_ITEM_OF_ACTION(act, actions, index);
1233 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1234 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1235 rte_flow_error_set(error, EINVAL,
1236 RTE_FLOW_ERROR_TYPE_ACTION,
1237 act, "Not supported action.");
1245 ixgbe_validate_l2_tn_filter(struct rte_eth_dev *dev,
1246 const struct rte_flow_attr *attr,
1247 const struct rte_flow_item pattern[],
1248 const struct rte_flow_action actions[],
1249 struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1250 struct rte_flow_error *error)
1253 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1255 ret = cons_parse_l2_tn_filter(attr, pattern,
1256 actions, l2_tn_filter, error);
1258 if (hw->mac.type != ixgbe_mac_X550 &&
1259 hw->mac.type != ixgbe_mac_X550EM_x &&
1260 hw->mac.type != ixgbe_mac_X550EM_a) {
1261 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1262 rte_flow_error_set(error, EINVAL,
1263 RTE_FLOW_ERROR_TYPE_ITEM,
1264 NULL, "Not supported by L2 tunnel filter");
1271 /* Parse to get the attr and action info of flow director rule. */
1273 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1274 const struct rte_flow_action actions[],
1275 struct ixgbe_fdir_rule *rule,
1276 struct rte_flow_error *error)
1278 const struct rte_flow_action *act;
1279 const struct rte_flow_action_queue *act_q;
1280 const struct rte_flow_action_mark *mark;
1284 /* must be input direction */
1285 if (!attr->ingress) {
1286 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1287 rte_flow_error_set(error, EINVAL,
1288 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1289 attr, "Only support ingress.");
1295 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1296 rte_flow_error_set(error, EINVAL,
1297 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1298 attr, "Not support egress.");
1303 if (attr->priority) {
1304 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1305 rte_flow_error_set(error, EINVAL,
1306 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1307 attr, "Not support priority.");
1314 /* check if the first not void action is QUEUE or DROP. */
1315 NEXT_ITEM_OF_ACTION(act, actions, index);
1316 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1317 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1318 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1319 rte_flow_error_set(error, EINVAL,
1320 RTE_FLOW_ERROR_TYPE_ACTION,
1321 act, "Not supported action.");
1325 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1326 act_q = (const struct rte_flow_action_queue *)act->conf;
1327 rule->queue = act_q->index;
1329 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1332 /* check if the next not void item is MARK */
1334 NEXT_ITEM_OF_ACTION(act, actions, index);
1335 if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1336 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1337 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1338 rte_flow_error_set(error, EINVAL,
1339 RTE_FLOW_ERROR_TYPE_ACTION,
1340 act, "Not supported action.");
1346 if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1347 mark = (const struct rte_flow_action_mark *)act->conf;
1348 rule->soft_id = mark->id;
1350 NEXT_ITEM_OF_ACTION(act, actions, index);
1353 /* check if the next not void item is END */
1354 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1355 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1356 rte_flow_error_set(error, EINVAL,
1357 RTE_FLOW_ERROR_TYPE_ACTION,
1358 act, "Not supported action.");
1366 * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1367 * And get the flow director filter info BTW.
1368 * UDP/TCP/SCTP PATTERN:
1369 * The first not void item can be ETH or IPV4.
1370 * The second not void item must be IPV4 if the first one is ETH.
1371 * The third not void item must be UDP or TCP or SCTP.
1372 * The next not void item must be END.
1374 * The first not void item must be ETH.
1375 * The second not void item must be MAC VLAN.
1376 * The next not void item must be END.
1378 * The first not void action should be QUEUE or DROP.
1379 * The second not void optional action should be MARK,
1380 * mark_id is a uint32_t number.
1381 * The next not void action should be END.
1382 * UDP/TCP/SCTP pattern example:
1385 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
1386 * dst_addr 192.167.3.50 0xFFFFFFFF
1387 * UDP/TCP/SCTP src_port 80 0xFFFF
1388 * dst_port 80 0xFFFF
1390 * MAC VLAN pattern example:
1393 {0xAC, 0x7B, 0xA1, {0xFF, 0xFF, 0xFF,
1394 0x2C, 0x6D, 0x36} 0xFF, 0xFF, 0xFF}
1395 * MAC VLAN tci 0x2016 0xFFFF
1396 * tpid 0x8100 0xFFFF
1398 * Other members in mask and spec should set to 0x00.
1399 * Item->last should be NULL.
1402 ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
1403 const struct rte_flow_item pattern[],
1404 const struct rte_flow_action actions[],
1405 struct ixgbe_fdir_rule *rule,
1406 struct rte_flow_error *error)
1408 const struct rte_flow_item *item;
1409 const struct rte_flow_item_eth *eth_spec;
1410 const struct rte_flow_item_eth *eth_mask;
1411 const struct rte_flow_item_ipv4 *ipv4_spec;
1412 const struct rte_flow_item_ipv4 *ipv4_mask;
1413 const struct rte_flow_item_tcp *tcp_spec;
1414 const struct rte_flow_item_tcp *tcp_mask;
1415 const struct rte_flow_item_udp *udp_spec;
1416 const struct rte_flow_item_udp *udp_mask;
1417 const struct rte_flow_item_sctp *sctp_spec;
1418 const struct rte_flow_item_sctp *sctp_mask;
1419 const struct rte_flow_item_vlan *vlan_spec;
1420 const struct rte_flow_item_vlan *vlan_mask;
1425 rte_flow_error_set(error, EINVAL,
1426 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1427 NULL, "NULL pattern.");
1432 rte_flow_error_set(error, EINVAL,
1433 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1434 NULL, "NULL action.");
1439 rte_flow_error_set(error, EINVAL,
1440 RTE_FLOW_ERROR_TYPE_ATTR,
1441 NULL, "NULL attribute.");
1446 * Some fields may not be provided. Set spec to 0 and mask to default
1447 * value. So, we need not do anything for the not provided fields later.
1449 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1450 memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1451 rule->mask.vlan_tci_mask = 0;
1457 * The first not void item should be
1458 * MAC or IPv4 or TCP or UDP or SCTP.
1460 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1461 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1462 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1463 item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1464 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1465 item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1466 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1467 rte_flow_error_set(error, EINVAL,
1468 RTE_FLOW_ERROR_TYPE_ITEM,
1469 item, "Not supported by fdir filter");
1473 rule->mode = RTE_FDIR_MODE_PERFECT;
1475 /*Not supported last point for range*/
1477 rte_flow_error_set(error, EINVAL,
1478 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1479 item, "Not supported last point for range");
1483 /* Get the MAC info. */
1484 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1486 * Only support vlan and dst MAC address,
1487 * others should be masked.
1489 if (item->spec && !item->mask) {
1490 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1491 rte_flow_error_set(error, EINVAL,
1492 RTE_FLOW_ERROR_TYPE_ITEM,
1493 item, "Not supported by fdir filter");
1498 rule->b_spec = TRUE;
1499 eth_spec = (const struct rte_flow_item_eth *)item->spec;
1501 /* Get the dst MAC. */
1502 for (j = 0; j < ETHER_ADDR_LEN; j++) {
1503 rule->ixgbe_fdir.formatted.inner_mac[j] =
1504 eth_spec->dst.addr_bytes[j];
1510 /* If ethernet has meaning, it means MAC VLAN mode. */
1511 rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1513 rule->b_mask = TRUE;
1514 eth_mask = (const struct rte_flow_item_eth *)item->mask;
1516 /* Ether type should be masked. */
1517 if (eth_mask->type) {
1518 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1519 rte_flow_error_set(error, EINVAL,
1520 RTE_FLOW_ERROR_TYPE_ITEM,
1521 item, "Not supported by fdir filter");
1526 * src MAC address must be masked,
1527 * and don't support dst MAC address mask.
1529 for (j = 0; j < ETHER_ADDR_LEN; j++) {
1530 if (eth_mask->src.addr_bytes[j] ||
1531 eth_mask->dst.addr_bytes[j] != 0xFF) {
1533 sizeof(struct ixgbe_fdir_rule));
1534 rte_flow_error_set(error, EINVAL,
1535 RTE_FLOW_ERROR_TYPE_ITEM,
1536 item, "Not supported by fdir filter");
1541 /* When no VLAN, considered as full mask. */
1542 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1544 /*** If both spec and mask are item,
1545 * it means don't care about ETH.
1550 * Check if the next not void item is vlan or ipv4.
1551 * IPv6 is not supported.
1554 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1555 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1556 if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1557 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1558 rte_flow_error_set(error, EINVAL,
1559 RTE_FLOW_ERROR_TYPE_ITEM,
1560 item, "Not supported by fdir filter");
1564 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
1565 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1566 rte_flow_error_set(error, EINVAL,
1567 RTE_FLOW_ERROR_TYPE_ITEM,
1568 item, "Not supported by fdir filter");
1574 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1575 if (!(item->spec && item->mask)) {
1576 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1577 rte_flow_error_set(error, EINVAL,
1578 RTE_FLOW_ERROR_TYPE_ITEM,
1579 item, "Not supported by fdir filter");
1583 /*Not supported last point for range*/
1585 rte_flow_error_set(error, EINVAL,
1586 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1587 item, "Not supported last point for range");
1591 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
1592 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
1594 if (vlan_spec->tpid != rte_cpu_to_be_16(ETHER_TYPE_VLAN)) {
1595 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1596 rte_flow_error_set(error, EINVAL,
1597 RTE_FLOW_ERROR_TYPE_ITEM,
1598 item, "Not supported by fdir filter");
1602 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1604 if (vlan_mask->tpid != (uint16_t)~0U) {
1605 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1606 rte_flow_error_set(error, EINVAL,
1607 RTE_FLOW_ERROR_TYPE_ITEM,
1608 item, "Not supported by fdir filter");
1611 rule->mask.vlan_tci_mask = vlan_mask->tci;
1612 /* More than one tags are not supported. */
1615 * Check if the next not void item is not vlan.
1618 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1619 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1620 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1621 rte_flow_error_set(error, EINVAL,
1622 RTE_FLOW_ERROR_TYPE_ITEM,
1623 item, "Not supported by fdir filter");
1625 } else if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1626 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1627 rte_flow_error_set(error, EINVAL,
1628 RTE_FLOW_ERROR_TYPE_ITEM,
1629 item, "Not supported by fdir filter");
1634 /* Get the IP info. */
1635 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1637 * Set the flow type even if there's no content
1638 * as we must have a flow type.
1640 rule->ixgbe_fdir.formatted.flow_type =
1641 IXGBE_ATR_FLOW_TYPE_IPV4;
1642 /*Not supported last point for range*/
1644 rte_flow_error_set(error, EINVAL,
1645 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1646 item, "Not supported last point for range");
1650 * Only care about src & dst addresses,
1651 * others should be masked.
1654 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1655 rte_flow_error_set(error, EINVAL,
1656 RTE_FLOW_ERROR_TYPE_ITEM,
1657 item, "Not supported by fdir filter");
1660 rule->b_mask = TRUE;
1662 (const struct rte_flow_item_ipv4 *)item->mask;
1663 if (ipv4_mask->hdr.version_ihl ||
1664 ipv4_mask->hdr.type_of_service ||
1665 ipv4_mask->hdr.total_length ||
1666 ipv4_mask->hdr.packet_id ||
1667 ipv4_mask->hdr.fragment_offset ||
1668 ipv4_mask->hdr.time_to_live ||
1669 ipv4_mask->hdr.next_proto_id ||
1670 ipv4_mask->hdr.hdr_checksum) {
1671 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1672 rte_flow_error_set(error, EINVAL,
1673 RTE_FLOW_ERROR_TYPE_ITEM,
1674 item, "Not supported by fdir filter");
1677 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1678 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1681 rule->b_spec = TRUE;
1683 (const struct rte_flow_item_ipv4 *)item->spec;
1684 rule->ixgbe_fdir.formatted.dst_ip[0] =
1685 ipv4_spec->hdr.dst_addr;
1686 rule->ixgbe_fdir.formatted.src_ip[0] =
1687 ipv4_spec->hdr.src_addr;
1691 * Check if the next not void item is
1692 * TCP or UDP or SCTP or END.
1695 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1696 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1697 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1698 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1699 item->type != RTE_FLOW_ITEM_TYPE_END) {
1700 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1701 rte_flow_error_set(error, EINVAL,
1702 RTE_FLOW_ERROR_TYPE_ITEM,
1703 item, "Not supported by fdir filter");
1708 /* Get the TCP info. */
1709 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1711 * Set the flow type even if there's no content
1712 * as we must have a flow type.
1714 rule->ixgbe_fdir.formatted.flow_type =
1715 IXGBE_ATR_FLOW_TYPE_TCPV4;
1716 /*Not supported last point for range*/
1718 rte_flow_error_set(error, EINVAL,
1719 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1720 item, "Not supported last point for range");
1724 * Only care about src & dst ports,
1725 * others should be masked.
1728 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1729 rte_flow_error_set(error, EINVAL,
1730 RTE_FLOW_ERROR_TYPE_ITEM,
1731 item, "Not supported by fdir filter");
1734 rule->b_mask = TRUE;
1735 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1736 if (tcp_mask->hdr.sent_seq ||
1737 tcp_mask->hdr.recv_ack ||
1738 tcp_mask->hdr.data_off ||
1739 tcp_mask->hdr.tcp_flags ||
1740 tcp_mask->hdr.rx_win ||
1741 tcp_mask->hdr.cksum ||
1742 tcp_mask->hdr.tcp_urp) {
1743 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1744 rte_flow_error_set(error, EINVAL,
1745 RTE_FLOW_ERROR_TYPE_ITEM,
1746 item, "Not supported by fdir filter");
1749 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1750 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1753 rule->b_spec = TRUE;
1754 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1755 rule->ixgbe_fdir.formatted.src_port =
1756 tcp_spec->hdr.src_port;
1757 rule->ixgbe_fdir.formatted.dst_port =
1758 tcp_spec->hdr.dst_port;
1762 /* Get the UDP info */
1763 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1765 * Set the flow type even if there's no content
1766 * as we must have a flow type.
1768 rule->ixgbe_fdir.formatted.flow_type =
1769 IXGBE_ATR_FLOW_TYPE_UDPV4;
1770 /*Not supported last point for range*/
1772 rte_flow_error_set(error, EINVAL,
1773 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1774 item, "Not supported last point for range");
1778 * Only care about src & dst ports,
1779 * others should be masked.
1782 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1783 rte_flow_error_set(error, EINVAL,
1784 RTE_FLOW_ERROR_TYPE_ITEM,
1785 item, "Not supported by fdir filter");
1788 rule->b_mask = TRUE;
1789 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1790 if (udp_mask->hdr.dgram_len ||
1791 udp_mask->hdr.dgram_cksum) {
1792 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1793 rte_flow_error_set(error, EINVAL,
1794 RTE_FLOW_ERROR_TYPE_ITEM,
1795 item, "Not supported by fdir filter");
1798 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1799 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1802 rule->b_spec = TRUE;
1803 udp_spec = (const struct rte_flow_item_udp *)item->spec;
1804 rule->ixgbe_fdir.formatted.src_port =
1805 udp_spec->hdr.src_port;
1806 rule->ixgbe_fdir.formatted.dst_port =
1807 udp_spec->hdr.dst_port;
1811 /* Get the SCTP info */
1812 if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
1814 * Set the flow type even if there's no content
1815 * as we must have a flow type.
1817 rule->ixgbe_fdir.formatted.flow_type =
1818 IXGBE_ATR_FLOW_TYPE_SCTPV4;
1819 /*Not supported last point for range*/
1821 rte_flow_error_set(error, EINVAL,
1822 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1823 item, "Not supported last point for range");
1827 * Only care about src & dst ports,
1828 * others should be masked.
1831 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1832 rte_flow_error_set(error, EINVAL,
1833 RTE_FLOW_ERROR_TYPE_ITEM,
1834 item, "Not supported by fdir filter");
1837 rule->b_mask = TRUE;
1839 (const struct rte_flow_item_sctp *)item->mask;
1840 if (sctp_mask->hdr.tag ||
1841 sctp_mask->hdr.cksum) {
1842 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1843 rte_flow_error_set(error, EINVAL,
1844 RTE_FLOW_ERROR_TYPE_ITEM,
1845 item, "Not supported by fdir filter");
1848 rule->mask.src_port_mask = sctp_mask->hdr.src_port;
1849 rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
1852 rule->b_spec = TRUE;
1854 (const struct rte_flow_item_sctp *)item->spec;
1855 rule->ixgbe_fdir.formatted.src_port =
1856 sctp_spec->hdr.src_port;
1857 rule->ixgbe_fdir.formatted.dst_port =
1858 sctp_spec->hdr.dst_port;
1862 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1863 /* check if the next not void item is END */
1865 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1866 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1867 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1868 rte_flow_error_set(error, EINVAL,
1869 RTE_FLOW_ERROR_TYPE_ITEM,
1870 item, "Not supported by fdir filter");
1875 return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
1878 #define NVGRE_PROTOCOL 0x6558
1881 * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
1882 * And get the flow director filter info BTW.
1884 * The first not void item must be ETH.
1885 * The second not void item must be IPV4/ IPV6.
1886 * The third not void item must be NVGRE.
1887 * The next not void item must be END.
1889 * The first not void item must be ETH.
1890 * The second not void item must be IPV4/ IPV6.
1891 * The third not void item must be NVGRE.
1892 * The next not void item must be END.
1894 * The first not void action should be QUEUE or DROP.
1895 * The second not void optional action should be MARK,
1896 * mark_id is a uint32_t number.
1897 * The next not void action should be END.
1898 * VxLAN pattern example:
1901 * IPV4/IPV6 NULL NULL
1903 * VxLAN vni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
1905 * NEGRV pattern example:
1908 * IPV4/IPV6 NULL NULL
1909 * NVGRE protocol 0x6558 0xFFFF
1910 * tni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
1912 * other members in mask and spec should set to 0x00.
1913 * item->last should be NULL.
1916 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
1917 const struct rte_flow_item pattern[],
1918 const struct rte_flow_action actions[],
1919 struct ixgbe_fdir_rule *rule,
1920 struct rte_flow_error *error)
1922 const struct rte_flow_item *item;
1923 const struct rte_flow_item_vxlan *vxlan_spec;
1924 const struct rte_flow_item_vxlan *vxlan_mask;
1925 const struct rte_flow_item_nvgre *nvgre_spec;
1926 const struct rte_flow_item_nvgre *nvgre_mask;
1927 const struct rte_flow_item_eth *eth_spec;
1928 const struct rte_flow_item_eth *eth_mask;
1929 const struct rte_flow_item_vlan *vlan_spec;
1930 const struct rte_flow_item_vlan *vlan_mask;
1934 rte_flow_error_set(error, EINVAL,
1935 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1936 NULL, "NULL pattern.");
1941 rte_flow_error_set(error, EINVAL,
1942 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1943 NULL, "NULL action.");
1948 rte_flow_error_set(error, EINVAL,
1949 RTE_FLOW_ERROR_TYPE_ATTR,
1950 NULL, "NULL attribute.");
1955 * Some fields may not be provided. Set spec to 0 and mask to default
1956 * value. So, we need not do anything for the not provided fields later.
1958 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1959 memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1960 rule->mask.vlan_tci_mask = 0;
1966 * The first not void item should be
1967 * MAC or IPv4 or IPv6 or UDP or VxLAN.
1969 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1970 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1971 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1972 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1973 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1974 item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
1975 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
1976 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1977 rte_flow_error_set(error, EINVAL,
1978 RTE_FLOW_ERROR_TYPE_ITEM,
1979 item, "Not supported by fdir filter");
1983 rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
1986 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1987 /* Only used to describe the protocol stack. */
1988 if (item->spec || item->mask) {
1989 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1990 rte_flow_error_set(error, EINVAL,
1991 RTE_FLOW_ERROR_TYPE_ITEM,
1992 item, "Not supported by fdir filter");
1995 /*Not supported last point for range*/
1997 rte_flow_error_set(error, EINVAL,
1998 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1999 item, "Not supported last point for range");
2003 /* Check if the next not void item is IPv4 or IPv6. */
2005 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2006 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2007 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2008 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2009 rte_flow_error_set(error, EINVAL,
2010 RTE_FLOW_ERROR_TYPE_ITEM,
2011 item, "Not supported by fdir filter");
2017 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2018 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2019 /* Only used to describe the protocol stack. */
2020 if (item->spec || item->mask) {
2021 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2022 rte_flow_error_set(error, EINVAL,
2023 RTE_FLOW_ERROR_TYPE_ITEM,
2024 item, "Not supported by fdir filter");
2027 /*Not supported last point for range*/
2029 rte_flow_error_set(error, EINVAL,
2030 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2031 item, "Not supported last point for range");
2035 /* Check if the next not void item is UDP or NVGRE. */
2037 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2038 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2039 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2040 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2041 rte_flow_error_set(error, EINVAL,
2042 RTE_FLOW_ERROR_TYPE_ITEM,
2043 item, "Not supported by fdir filter");
2049 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2050 /* Only used to describe the protocol stack. */
2051 if (item->spec || item->mask) {
2052 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2053 rte_flow_error_set(error, EINVAL,
2054 RTE_FLOW_ERROR_TYPE_ITEM,
2055 item, "Not supported by fdir filter");
2058 /*Not supported last point for range*/
2060 rte_flow_error_set(error, EINVAL,
2061 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2062 item, "Not supported last point for range");
2066 /* Check if the next not void item is VxLAN. */
2068 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2069 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2070 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2071 rte_flow_error_set(error, EINVAL,
2072 RTE_FLOW_ERROR_TYPE_ITEM,
2073 item, "Not supported by fdir filter");
2078 /* Get the VxLAN info */
2079 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2080 rule->ixgbe_fdir.formatted.tunnel_type =
2081 RTE_FDIR_TUNNEL_TYPE_VXLAN;
2083 /* Only care about VNI, others should be masked. */
2085 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2086 rte_flow_error_set(error, EINVAL,
2087 RTE_FLOW_ERROR_TYPE_ITEM,
2088 item, "Not supported by fdir filter");
2091 /*Not supported last point for range*/
2093 rte_flow_error_set(error, EINVAL,
2094 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2095 item, "Not supported last point for range");
2098 rule->b_mask = TRUE;
2100 /* Tunnel type is always meaningful. */
2101 rule->mask.tunnel_type_mask = 1;
2104 (const struct rte_flow_item_vxlan *)item->mask;
2105 if (vxlan_mask->flags) {
2106 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2107 rte_flow_error_set(error, EINVAL,
2108 RTE_FLOW_ERROR_TYPE_ITEM,
2109 item, "Not supported by fdir filter");
2112 /* VNI must be totally masked or not. */
2113 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2114 vxlan_mask->vni[2]) &&
2115 ((vxlan_mask->vni[0] != 0xFF) ||
2116 (vxlan_mask->vni[1] != 0xFF) ||
2117 (vxlan_mask->vni[2] != 0xFF))) {
2118 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2119 rte_flow_error_set(error, EINVAL,
2120 RTE_FLOW_ERROR_TYPE_ITEM,
2121 item, "Not supported by fdir filter");
2125 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2126 RTE_DIM(vxlan_mask->vni));
2127 rule->mask.tunnel_id_mask <<= 8;
2130 rule->b_spec = TRUE;
2131 vxlan_spec = (const struct rte_flow_item_vxlan *)
2133 rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2134 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2135 rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2139 /* Get the NVGRE info */
2140 if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2141 rule->ixgbe_fdir.formatted.tunnel_type =
2142 RTE_FDIR_TUNNEL_TYPE_NVGRE;
2145 * Only care about flags0, flags1, protocol and TNI,
2146 * others should be masked.
2149 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2150 rte_flow_error_set(error, EINVAL,
2151 RTE_FLOW_ERROR_TYPE_ITEM,
2152 item, "Not supported by fdir filter");
2155 /*Not supported last point for range*/
2157 rte_flow_error_set(error, EINVAL,
2158 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2159 item, "Not supported last point for range");
2162 rule->b_mask = TRUE;
2164 /* Tunnel type is always meaningful. */
2165 rule->mask.tunnel_type_mask = 1;
2168 (const struct rte_flow_item_nvgre *)item->mask;
2169 if (nvgre_mask->flow_id) {
2170 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2171 rte_flow_error_set(error, EINVAL,
2172 RTE_FLOW_ERROR_TYPE_ITEM,
2173 item, "Not supported by fdir filter");
2176 if (nvgre_mask->c_k_s_rsvd0_ver !=
2177 rte_cpu_to_be_16(0x3000) ||
2178 nvgre_mask->protocol != 0xFFFF) {
2179 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2180 rte_flow_error_set(error, EINVAL,
2181 RTE_FLOW_ERROR_TYPE_ITEM,
2182 item, "Not supported by fdir filter");
2185 /* TNI must be totally masked or not. */
2186 if (nvgre_mask->tni[0] &&
2187 ((nvgre_mask->tni[0] != 0xFF) ||
2188 (nvgre_mask->tni[1] != 0xFF) ||
2189 (nvgre_mask->tni[2] != 0xFF))) {
2190 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2191 rte_flow_error_set(error, EINVAL,
2192 RTE_FLOW_ERROR_TYPE_ITEM,
2193 item, "Not supported by fdir filter");
2196 /* tni is a 24-bits bit field */
2197 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2198 RTE_DIM(nvgre_mask->tni));
2199 rule->mask.tunnel_id_mask <<= 8;
2202 rule->b_spec = TRUE;
2204 (const struct rte_flow_item_nvgre *)item->spec;
2205 if (nvgre_spec->c_k_s_rsvd0_ver !=
2206 rte_cpu_to_be_16(0x2000) ||
2207 nvgre_spec->protocol !=
2208 rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2209 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2210 rte_flow_error_set(error, EINVAL,
2211 RTE_FLOW_ERROR_TYPE_ITEM,
2212 item, "Not supported by fdir filter");
2215 /* tni is a 24-bits bit field */
2216 rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2217 nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2218 rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2222 /* check if the next not void item is MAC */
2224 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2225 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2226 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2227 rte_flow_error_set(error, EINVAL,
2228 RTE_FLOW_ERROR_TYPE_ITEM,
2229 item, "Not supported by fdir filter");
2234 * Only support vlan and dst MAC address,
2235 * others should be masked.
2239 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2240 rte_flow_error_set(error, EINVAL,
2241 RTE_FLOW_ERROR_TYPE_ITEM,
2242 item, "Not supported by fdir filter");
2245 /*Not supported last point for range*/
2247 rte_flow_error_set(error, EINVAL,
2248 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2249 item, "Not supported last point for range");
2252 rule->b_mask = TRUE;
2253 eth_mask = (const struct rte_flow_item_eth *)item->mask;
2255 /* Ether type should be masked. */
2256 if (eth_mask->type) {
2257 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2258 rte_flow_error_set(error, EINVAL,
2259 RTE_FLOW_ERROR_TYPE_ITEM,
2260 item, "Not supported by fdir filter");
2264 /* src MAC address should be masked. */
2265 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2266 if (eth_mask->src.addr_bytes[j]) {
2268 sizeof(struct ixgbe_fdir_rule));
2269 rte_flow_error_set(error, EINVAL,
2270 RTE_FLOW_ERROR_TYPE_ITEM,
2271 item, "Not supported by fdir filter");
2275 rule->mask.mac_addr_byte_mask = 0;
2276 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2277 /* It's a per byte mask. */
2278 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2279 rule->mask.mac_addr_byte_mask |= 0x1 << j;
2280 } else if (eth_mask->dst.addr_bytes[j]) {
2281 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2282 rte_flow_error_set(error, EINVAL,
2283 RTE_FLOW_ERROR_TYPE_ITEM,
2284 item, "Not supported by fdir filter");
2289 /* When no vlan, considered as full mask. */
2290 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2293 rule->b_spec = TRUE;
2294 eth_spec = (const struct rte_flow_item_eth *)item->spec;
2296 /* Get the dst MAC. */
2297 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2298 rule->ixgbe_fdir.formatted.inner_mac[j] =
2299 eth_spec->dst.addr_bytes[j];
2304 * Check if the next not void item is vlan or ipv4.
2305 * IPv6 is not supported.
2308 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2309 if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2310 (item->type != RTE_FLOW_ITEM_TYPE_VLAN)) {
2311 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2312 rte_flow_error_set(error, EINVAL,
2313 RTE_FLOW_ERROR_TYPE_ITEM,
2314 item, "Not supported by fdir filter");
2317 /*Not supported last point for range*/
2319 rte_flow_error_set(error, EINVAL,
2320 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2321 item, "Not supported last point for range");
2325 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2326 if (!(item->spec && item->mask)) {
2327 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2328 rte_flow_error_set(error, EINVAL,
2329 RTE_FLOW_ERROR_TYPE_ITEM,
2330 item, "Not supported by fdir filter");
2334 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
2335 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
2337 if (vlan_spec->tpid != rte_cpu_to_be_16(ETHER_TYPE_VLAN)) {
2338 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2339 rte_flow_error_set(error, EINVAL,
2340 RTE_FLOW_ERROR_TYPE_ITEM,
2341 item, "Not supported by fdir filter");
2345 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2347 if (vlan_mask->tpid != (uint16_t)~0U) {
2348 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2349 rte_flow_error_set(error, EINVAL,
2350 RTE_FLOW_ERROR_TYPE_ITEM,
2351 item, "Not supported by fdir filter");
2354 rule->mask.vlan_tci_mask = vlan_mask->tci;
2355 /* More than one tags are not supported. */
2358 * Check if the next not void item is not vlan.
2361 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2362 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2363 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2364 rte_flow_error_set(error, EINVAL,
2365 RTE_FLOW_ERROR_TYPE_ITEM,
2366 item, "Not supported by fdir filter");
2368 } else if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2369 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2370 rte_flow_error_set(error, EINVAL,
2371 RTE_FLOW_ERROR_TYPE_ITEM,
2372 item, "Not supported by fdir filter");
2375 /* check if the next not void item is END */
2377 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2378 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2379 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2380 rte_flow_error_set(error, EINVAL,
2381 RTE_FLOW_ERROR_TYPE_ITEM,
2382 item, "Not supported by fdir filter");
2388 * If the tags is 0, it means don't care about the VLAN.
2392 return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2396 ixgbe_validate_fdir_filter(struct rte_eth_dev *dev,
2397 const struct rte_flow_attr *attr,
2398 const struct rte_flow_item pattern[],
2399 const struct rte_flow_action actions[],
2400 struct ixgbe_fdir_rule *rule,
2401 struct rte_flow_error *error)
2405 enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2407 ixgbe_parse_fdir_filter(attr, pattern, actions,
2411 if (fdir_mode == RTE_FDIR_MODE_NONE ||
2412 fdir_mode != rule->mode)
2419 ixgbe_parse_fdir_filter(const struct rte_flow_attr *attr,
2420 const struct rte_flow_item pattern[],
2421 const struct rte_flow_action actions[],
2422 struct ixgbe_fdir_rule *rule,
2423 struct rte_flow_error *error)
2427 ret = ixgbe_parse_fdir_filter_normal(attr, pattern,
2428 actions, rule, error);
2433 ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2434 actions, rule, error);
2440 * Check if the flow rule is supported by ixgbe.
2441 * It only checkes the format. Don't guarantee the rule can be programmed into
2442 * the HW. Because there can be no enough room for the rule.
2445 ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
2446 const struct rte_flow_attr *attr,
2447 const struct rte_flow_item pattern[],
2448 const struct rte_flow_action actions[],
2449 struct rte_flow_error *error)
2451 struct rte_eth_ntuple_filter ntuple_filter;
2452 struct rte_eth_ethertype_filter ethertype_filter;
2453 struct rte_eth_syn_filter syn_filter;
2454 struct rte_eth_l2_tunnel_conf l2_tn_filter;
2455 struct ixgbe_fdir_rule fdir_rule;
2458 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2459 ret = ixgbe_parse_ntuple_filter(attr, pattern,
2460 actions, &ntuple_filter, error);
2464 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2465 ret = ixgbe_parse_ethertype_filter(attr, pattern,
2466 actions, ðertype_filter, error);
2470 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2471 ret = ixgbe_parse_syn_filter(attr, pattern,
2472 actions, &syn_filter, error);
2476 memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2477 ret = ixgbe_validate_fdir_filter(dev, attr, pattern,
2478 actions, &fdir_rule, error);
2482 memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2483 ret = ixgbe_validate_l2_tn_filter(dev, attr, pattern,
2484 actions, &l2_tn_filter, error);
2489 /* Destroy all flow rules associated with a port on ixgbe. */
2491 ixgbe_flow_flush(struct rte_eth_dev *dev,
2492 struct rte_flow_error *error)
2496 ixgbe_clear_all_ntuple_filter(dev);
2497 ixgbe_clear_all_ethertype_filter(dev);
2498 ixgbe_clear_syn_filter(dev);
2500 ret = ixgbe_clear_all_fdir_filter(dev);
2502 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
2503 NULL, "Failed to flush rule");
2507 ret = ixgbe_clear_all_l2_tn_filter(dev);
2509 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
2510 NULL, "Failed to flush rule");