4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
47 #include <rte_interrupts.h>
49 #include <rte_debug.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_atomic.h>
60 #include <rte_malloc.h>
61 #include <rte_random.h>
63 #include <rte_hash_crc.h>
65 #include <rte_flow_driver.h>
67 #include "ixgbe_logs.h"
68 #include "base/ixgbe_api.h"
69 #include "base/ixgbe_vf.h"
70 #include "base/ixgbe_common.h"
71 #include "ixgbe_ethdev.h"
72 #include "ixgbe_bypass.h"
73 #include "ixgbe_rxtx.h"
74 #include "base/ixgbe_type.h"
75 #include "base/ixgbe_phy.h"
76 #include "rte_pmd_ixgbe.h"
78 static int ixgbe_flow_flush(struct rte_eth_dev *dev,
79 struct rte_flow_error *error);
81 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
82 const struct rte_flow_item pattern[],
83 const struct rte_flow_action actions[],
84 struct rte_eth_ntuple_filter *filter,
85 struct rte_flow_error *error);
87 ixgbe_parse_ntuple_filter(const struct rte_flow_attr *attr,
88 const struct rte_flow_item pattern[],
89 const struct rte_flow_action actions[],
90 struct rte_eth_ntuple_filter *filter,
91 struct rte_flow_error *error);
93 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
94 const struct rte_flow_item *pattern,
95 const struct rte_flow_action *actions,
96 struct rte_eth_ethertype_filter *filter,
97 struct rte_flow_error *error);
99 ixgbe_parse_ethertype_filter(const struct rte_flow_attr *attr,
100 const struct rte_flow_item pattern[],
101 const struct rte_flow_action actions[],
102 struct rte_eth_ethertype_filter *filter,
103 struct rte_flow_error *error);
105 cons_parse_syn_filter(const struct rte_flow_attr *attr,
106 const struct rte_flow_item pattern[],
107 const struct rte_flow_action actions[],
108 struct rte_eth_syn_filter *filter,
109 struct rte_flow_error *error);
111 ixgbe_parse_syn_filter(const struct rte_flow_attr *attr,
112 const struct rte_flow_item pattern[],
113 const struct rte_flow_action actions[],
114 struct rte_eth_syn_filter *filter,
115 struct rte_flow_error *error);
117 cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
118 const struct rte_flow_item pattern[],
119 const struct rte_flow_action actions[],
120 struct rte_eth_l2_tunnel_conf *filter,
121 struct rte_flow_error *error);
123 ixgbe_validate_l2_tn_filter(struct rte_eth_dev *dev,
124 const struct rte_flow_attr *attr,
125 const struct rte_flow_item pattern[],
126 const struct rte_flow_action actions[],
127 struct rte_eth_l2_tunnel_conf *rule,
128 struct rte_flow_error *error);
130 ixgbe_validate_fdir_filter(struct rte_eth_dev *dev,
131 const struct rte_flow_attr *attr,
132 const struct rte_flow_item pattern[],
133 const struct rte_flow_action actions[],
134 struct ixgbe_fdir_rule *rule,
135 struct rte_flow_error *error);
137 ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
138 const struct rte_flow_item pattern[],
139 const struct rte_flow_action actions[],
140 struct ixgbe_fdir_rule *rule,
141 struct rte_flow_error *error);
143 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
144 const struct rte_flow_item pattern[],
145 const struct rte_flow_action actions[],
146 struct ixgbe_fdir_rule *rule,
147 struct rte_flow_error *error);
149 ixgbe_parse_fdir_filter(const struct rte_flow_attr *attr,
150 const struct rte_flow_item pattern[],
151 const struct rte_flow_action actions[],
152 struct ixgbe_fdir_rule *rule,
153 struct rte_flow_error *error);
155 ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
156 const struct rte_flow_attr *attr,
157 const struct rte_flow_item pattern[],
158 const struct rte_flow_action actions[],
159 struct rte_flow_error *error);
160 static struct rte_flow *ixgbe_flow_create(struct rte_eth_dev *dev,
161 const struct rte_flow_attr *attr,
162 const struct rte_flow_item pattern[],
163 const struct rte_flow_action actions[],
164 struct rte_flow_error *error);
165 static int ixgbe_flow_destroy(struct rte_eth_dev *dev,
166 struct rte_flow *flow,
167 struct rte_flow_error *error);
169 const struct rte_flow_ops ixgbe_flow_ops = {
177 #define IXGBE_MIN_N_TUPLE_PRIO 1
178 #define IXGBE_MAX_N_TUPLE_PRIO 7
179 #define NEXT_ITEM_OF_PATTERN(item, pattern, index)\
181 item = pattern + index;\
182 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\
184 item = pattern + index; \
188 #define NEXT_ITEM_OF_ACTION(act, actions, index)\
190 act = actions + index; \
191 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
193 act = actions + index; \
198 * Please aware there's an asumption for all the parsers.
199 * rte_flow_item is using big endian, rte_flow_attr and
200 * rte_flow_action are using CPU order.
201 * Because the pattern is used to describe the packets,
202 * normally the packets should use network order.
206 * Parse the rule to see if it is a n-tuple rule.
207 * And get the n-tuple filter info BTW.
209 * The first not void item can be ETH or IPV4.
210 * The second not void item must be IPV4 if the first one is ETH.
211 * The third not void item must be UDP or TCP.
212 * The next not void item must be END.
214 * The first not void action should be QUEUE.
215 * The next not void action should be END.
219 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
220 * dst_addr 192.167.3.50 0xFFFFFFFF
221 * next_proto_id 17 0xFF
222 * UDP/TCP src_port 80 0xFFFF
225 * other members in mask and spec should set to 0x00.
226 * item->last should be NULL.
229 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
230 const struct rte_flow_item pattern[],
231 const struct rte_flow_action actions[],
232 struct rte_eth_ntuple_filter *filter,
233 struct rte_flow_error *error)
235 const struct rte_flow_item *item;
236 const struct rte_flow_action *act;
237 const struct rte_flow_item_ipv4 *ipv4_spec;
238 const struct rte_flow_item_ipv4 *ipv4_mask;
239 const struct rte_flow_item_tcp *tcp_spec;
240 const struct rte_flow_item_tcp *tcp_mask;
241 const struct rte_flow_item_udp *udp_spec;
242 const struct rte_flow_item_udp *udp_mask;
246 rte_flow_error_set(error,
247 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
248 NULL, "NULL pattern.");
253 rte_flow_error_set(error, EINVAL,
254 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
255 NULL, "NULL action.");
259 rte_flow_error_set(error, EINVAL,
260 RTE_FLOW_ERROR_TYPE_ATTR,
261 NULL, "NULL attribute.");
268 /* the first not void item can be MAC or IPv4 */
269 NEXT_ITEM_OF_PATTERN(item, pattern, index);
271 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
272 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
273 rte_flow_error_set(error, EINVAL,
274 RTE_FLOW_ERROR_TYPE_ITEM,
275 item, "Not supported by ntuple filter");
279 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
280 /*Not supported last point for range*/
282 rte_flow_error_set(error,
284 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
285 item, "Not supported last point for range");
289 /* if the first item is MAC, the content should be NULL */
290 if (item->spec || item->mask) {
291 rte_flow_error_set(error, EINVAL,
292 RTE_FLOW_ERROR_TYPE_ITEM,
293 item, "Not supported by ntuple filter");
296 /* check if the next not void item is IPv4 */
298 NEXT_ITEM_OF_PATTERN(item, pattern, index);
299 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
300 rte_flow_error_set(error,
301 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
302 item, "Not supported by ntuple filter");
307 /* get the IPv4 info */
308 if (!item->spec || !item->mask) {
309 rte_flow_error_set(error, EINVAL,
310 RTE_FLOW_ERROR_TYPE_ITEM,
311 item, "Invalid ntuple mask");
314 /*Not supported last point for range*/
316 rte_flow_error_set(error, EINVAL,
317 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
318 item, "Not supported last point for range");
323 ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
325 * Only support src & dst addresses, protocol,
326 * others should be masked.
328 if (ipv4_mask->hdr.version_ihl ||
329 ipv4_mask->hdr.type_of_service ||
330 ipv4_mask->hdr.total_length ||
331 ipv4_mask->hdr.packet_id ||
332 ipv4_mask->hdr.fragment_offset ||
333 ipv4_mask->hdr.time_to_live ||
334 ipv4_mask->hdr.hdr_checksum) {
335 rte_flow_error_set(error,
336 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
337 item, "Not supported by ntuple filter");
341 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
342 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
343 filter->proto_mask = ipv4_mask->hdr.next_proto_id;
345 ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
346 filter->dst_ip = ipv4_spec->hdr.dst_addr;
347 filter->src_ip = ipv4_spec->hdr.src_addr;
348 filter->proto = ipv4_spec->hdr.next_proto_id;
350 /* check if the next not void item is TCP or UDP */
352 NEXT_ITEM_OF_PATTERN(item, pattern, index);
353 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
354 item->type != RTE_FLOW_ITEM_TYPE_UDP) {
355 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
356 rte_flow_error_set(error, EINVAL,
357 RTE_FLOW_ERROR_TYPE_ITEM,
358 item, "Not supported by ntuple filter");
362 /* get the TCP/UDP info */
363 if (!item->spec || !item->mask) {
364 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
365 rte_flow_error_set(error, EINVAL,
366 RTE_FLOW_ERROR_TYPE_ITEM,
367 item, "Invalid ntuple mask");
371 /*Not supported last point for range*/
373 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
374 rte_flow_error_set(error, EINVAL,
375 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
376 item, "Not supported last point for range");
381 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
382 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
385 * Only support src & dst ports, tcp flags,
386 * others should be masked.
388 if (tcp_mask->hdr.sent_seq ||
389 tcp_mask->hdr.recv_ack ||
390 tcp_mask->hdr.data_off ||
391 tcp_mask->hdr.rx_win ||
392 tcp_mask->hdr.cksum ||
393 tcp_mask->hdr.tcp_urp) {
395 sizeof(struct rte_eth_ntuple_filter));
396 rte_flow_error_set(error, EINVAL,
397 RTE_FLOW_ERROR_TYPE_ITEM,
398 item, "Not supported by ntuple filter");
402 filter->dst_port_mask = tcp_mask->hdr.dst_port;
403 filter->src_port_mask = tcp_mask->hdr.src_port;
404 if (tcp_mask->hdr.tcp_flags == 0xFF) {
405 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
406 } else if (!tcp_mask->hdr.tcp_flags) {
407 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
409 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
410 rte_flow_error_set(error, EINVAL,
411 RTE_FLOW_ERROR_TYPE_ITEM,
412 item, "Not supported by ntuple filter");
416 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
417 filter->dst_port = tcp_spec->hdr.dst_port;
418 filter->src_port = tcp_spec->hdr.src_port;
419 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
421 udp_mask = (const struct rte_flow_item_udp *)item->mask;
424 * Only support src & dst ports,
425 * others should be masked.
427 if (udp_mask->hdr.dgram_len ||
428 udp_mask->hdr.dgram_cksum) {
430 sizeof(struct rte_eth_ntuple_filter));
431 rte_flow_error_set(error, EINVAL,
432 RTE_FLOW_ERROR_TYPE_ITEM,
433 item, "Not supported by ntuple filter");
437 filter->dst_port_mask = udp_mask->hdr.dst_port;
438 filter->src_port_mask = udp_mask->hdr.src_port;
440 udp_spec = (const struct rte_flow_item_udp *)item->spec;
441 filter->dst_port = udp_spec->hdr.dst_port;
442 filter->src_port = udp_spec->hdr.src_port;
445 /* check if the next not void item is END */
447 NEXT_ITEM_OF_PATTERN(item, pattern, index);
448 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
449 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
450 rte_flow_error_set(error, EINVAL,
451 RTE_FLOW_ERROR_TYPE_ITEM,
452 item, "Not supported by ntuple filter");
460 * n-tuple only supports forwarding,
461 * check if the first not void action is QUEUE.
463 NEXT_ITEM_OF_ACTION(act, actions, index);
464 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
465 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
466 rte_flow_error_set(error, EINVAL,
467 RTE_FLOW_ERROR_TYPE_ACTION,
468 item, "Not supported action.");
472 ((const struct rte_flow_action_queue *)act->conf)->index;
474 /* check if the next not void item is END */
476 NEXT_ITEM_OF_ACTION(act, actions, index);
477 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
478 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
479 rte_flow_error_set(error, EINVAL,
480 RTE_FLOW_ERROR_TYPE_ACTION,
481 act, "Not supported action.");
486 /* must be input direction */
487 if (!attr->ingress) {
488 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
489 rte_flow_error_set(error, EINVAL,
490 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
491 attr, "Only support ingress.");
497 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
498 rte_flow_error_set(error, EINVAL,
499 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
500 attr, "Not support egress.");
504 if (attr->priority > 0xFFFF) {
505 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
506 rte_flow_error_set(error, EINVAL,
507 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
508 attr, "Error priority.");
511 filter->priority = (uint16_t)attr->priority;
512 if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
513 attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
514 filter->priority = 1;
519 /* a specific function for ixgbe because the flags is specific */
521 ixgbe_parse_ntuple_filter(const struct rte_flow_attr *attr,
522 const struct rte_flow_item pattern[],
523 const struct rte_flow_action actions[],
524 struct rte_eth_ntuple_filter *filter,
525 struct rte_flow_error *error)
529 ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
534 /* Ixgbe doesn't support tcp flags. */
535 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
536 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
537 rte_flow_error_set(error, EINVAL,
538 RTE_FLOW_ERROR_TYPE_ITEM,
539 NULL, "Not supported by ntuple filter");
543 /* Ixgbe doesn't support many priorities. */
544 if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
545 filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
546 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
547 rte_flow_error_set(error, EINVAL,
548 RTE_FLOW_ERROR_TYPE_ITEM,
549 NULL, "Priority not supported by ntuple filter");
553 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
554 filter->priority > IXGBE_5TUPLE_MAX_PRI ||
555 filter->priority < IXGBE_5TUPLE_MIN_PRI)
558 /* fixed value for ixgbe */
559 filter->flags = RTE_5TUPLE_FLAGS;
564 * Parse the rule to see if it is a ethertype rule.
565 * And get the ethertype filter info BTW.
567 * The first not void item can be ETH.
568 * The next not void item must be END.
570 * The first not void action should be QUEUE.
571 * The next not void action should be END.
574 * ETH type 0x0807 0xFFFF
576 * other members in mask and spec should set to 0x00.
577 * item->last should be NULL.
580 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
581 const struct rte_flow_item *pattern,
582 const struct rte_flow_action *actions,
583 struct rte_eth_ethertype_filter *filter,
584 struct rte_flow_error *error)
586 const struct rte_flow_item *item;
587 const struct rte_flow_action *act;
588 const struct rte_flow_item_eth *eth_spec;
589 const struct rte_flow_item_eth *eth_mask;
590 const struct rte_flow_action_queue *act_q;
594 rte_flow_error_set(error, EINVAL,
595 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
596 NULL, "NULL pattern.");
601 rte_flow_error_set(error, EINVAL,
602 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
603 NULL, "NULL action.");
608 rte_flow_error_set(error, EINVAL,
609 RTE_FLOW_ERROR_TYPE_ATTR,
610 NULL, "NULL attribute.");
617 /* The first non-void item should be MAC. */
618 item = pattern + index;
619 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
621 item = pattern + index;
623 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
624 rte_flow_error_set(error, EINVAL,
625 RTE_FLOW_ERROR_TYPE_ITEM,
626 item, "Not supported by ethertype filter");
630 /*Not supported last point for range*/
632 rte_flow_error_set(error, EINVAL,
633 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
634 item, "Not supported last point for range");
638 /* Get the MAC info. */
639 if (!item->spec || !item->mask) {
640 rte_flow_error_set(error, EINVAL,
641 RTE_FLOW_ERROR_TYPE_ITEM,
642 item, "Not supported by ethertype filter");
646 eth_spec = (const struct rte_flow_item_eth *)item->spec;
647 eth_mask = (const struct rte_flow_item_eth *)item->mask;
649 /* Mask bits of source MAC address must be full of 0.
650 * Mask bits of destination MAC address must be full
653 if (!is_zero_ether_addr(ð_mask->src) ||
654 (!is_zero_ether_addr(ð_mask->dst) &&
655 !is_broadcast_ether_addr(ð_mask->dst))) {
656 rte_flow_error_set(error, EINVAL,
657 RTE_FLOW_ERROR_TYPE_ITEM,
658 item, "Invalid ether address mask");
662 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
663 rte_flow_error_set(error, EINVAL,
664 RTE_FLOW_ERROR_TYPE_ITEM,
665 item, "Invalid ethertype mask");
669 /* If mask bits of destination MAC address
670 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
672 if (is_broadcast_ether_addr(ð_mask->dst)) {
673 filter->mac_addr = eth_spec->dst;
674 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
676 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
678 filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
680 /* Check if the next non-void item is END. */
682 item = pattern + index;
683 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
685 item = pattern + index;
687 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
688 rte_flow_error_set(error, EINVAL,
689 RTE_FLOW_ERROR_TYPE_ITEM,
690 item, "Not supported by ethertype filter.");
697 /* Check if the first non-void action is QUEUE or DROP. */
698 act = actions + index;
699 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
701 act = actions + index;
703 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
704 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
705 rte_flow_error_set(error, EINVAL,
706 RTE_FLOW_ERROR_TYPE_ACTION,
707 act, "Not supported action.");
711 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
712 act_q = (const struct rte_flow_action_queue *)act->conf;
713 filter->queue = act_q->index;
715 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
718 /* Check if the next non-void item is END */
720 act = actions + index;
721 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
723 act = actions + index;
725 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
726 rte_flow_error_set(error, EINVAL,
727 RTE_FLOW_ERROR_TYPE_ACTION,
728 act, "Not supported action.");
733 /* Must be input direction */
734 if (!attr->ingress) {
735 rte_flow_error_set(error, EINVAL,
736 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
737 attr, "Only support ingress.");
743 rte_flow_error_set(error, EINVAL,
744 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
745 attr, "Not support egress.");
750 if (attr->priority) {
751 rte_flow_error_set(error, EINVAL,
752 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
753 attr, "Not support priority.");
759 rte_flow_error_set(error, EINVAL,
760 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
761 attr, "Not support group.");
769 ixgbe_parse_ethertype_filter(const struct rte_flow_attr *attr,
770 const struct rte_flow_item pattern[],
771 const struct rte_flow_action actions[],
772 struct rte_eth_ethertype_filter *filter,
773 struct rte_flow_error *error)
777 ret = cons_parse_ethertype_filter(attr, pattern,
778 actions, filter, error);
783 /* Ixgbe doesn't support MAC address. */
784 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
785 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
786 rte_flow_error_set(error, EINVAL,
787 RTE_FLOW_ERROR_TYPE_ITEM,
788 NULL, "Not supported by ethertype filter");
792 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
793 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
794 rte_flow_error_set(error, EINVAL,
795 RTE_FLOW_ERROR_TYPE_ITEM,
796 NULL, "queue index much too big");
800 if (filter->ether_type == ETHER_TYPE_IPv4 ||
801 filter->ether_type == ETHER_TYPE_IPv6) {
802 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
803 rte_flow_error_set(error, EINVAL,
804 RTE_FLOW_ERROR_TYPE_ITEM,
805 NULL, "IPv4/IPv6 not supported by ethertype filter");
809 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
810 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
811 rte_flow_error_set(error, EINVAL,
812 RTE_FLOW_ERROR_TYPE_ITEM,
813 NULL, "mac compare is unsupported");
817 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
818 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
819 rte_flow_error_set(error, EINVAL,
820 RTE_FLOW_ERROR_TYPE_ITEM,
821 NULL, "drop option is unsupported");
829 * Parse the rule to see if it is a TCP SYN rule.
830 * And get the TCP SYN filter info BTW.
832 * The first not void item must be ETH.
833 * The second not void item must be IPV4 or IPV6.
834 * The third not void item must be TCP.
835 * The next not void item must be END.
837 * The first not void action should be QUEUE.
838 * The next not void action should be END.
842 * IPV4/IPV6 NULL NULL
843 * TCP tcp_flags 0x02 0xFF
845 * other members in mask and spec should set to 0x00.
846 * item->last should be NULL.
849 cons_parse_syn_filter(const struct rte_flow_attr *attr,
850 const struct rte_flow_item pattern[],
851 const struct rte_flow_action actions[],
852 struct rte_eth_syn_filter *filter,
853 struct rte_flow_error *error)
855 const struct rte_flow_item *item;
856 const struct rte_flow_action *act;
857 const struct rte_flow_item_tcp *tcp_spec;
858 const struct rte_flow_item_tcp *tcp_mask;
859 const struct rte_flow_action_queue *act_q;
863 rte_flow_error_set(error, EINVAL,
864 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
865 NULL, "NULL pattern.");
870 rte_flow_error_set(error, EINVAL,
871 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
872 NULL, "NULL action.");
877 rte_flow_error_set(error, EINVAL,
878 RTE_FLOW_ERROR_TYPE_ATTR,
879 NULL, "NULL attribute.");
886 /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
887 NEXT_ITEM_OF_PATTERN(item, pattern, index);
888 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
889 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
890 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
891 item->type != RTE_FLOW_ITEM_TYPE_TCP) {
892 rte_flow_error_set(error, EINVAL,
893 RTE_FLOW_ERROR_TYPE_ITEM,
894 item, "Not supported by syn filter");
897 /*Not supported last point for range*/
899 rte_flow_error_set(error, EINVAL,
900 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
901 item, "Not supported last point for range");
906 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
907 /* if the item is MAC, the content should be NULL */
908 if (item->spec || item->mask) {
909 rte_flow_error_set(error, EINVAL,
910 RTE_FLOW_ERROR_TYPE_ITEM,
911 item, "Invalid SYN address mask");
915 /* check if the next not void item is IPv4 or IPv6 */
917 NEXT_ITEM_OF_PATTERN(item, pattern, index);
918 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
919 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
920 rte_flow_error_set(error, EINVAL,
921 RTE_FLOW_ERROR_TYPE_ITEM,
922 item, "Not supported by syn filter");
928 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
929 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
930 /* if the item is IP, the content should be NULL */
931 if (item->spec || item->mask) {
932 rte_flow_error_set(error, EINVAL,
933 RTE_FLOW_ERROR_TYPE_ITEM,
934 item, "Invalid SYN mask");
938 /* check if the next not void item is TCP */
940 NEXT_ITEM_OF_PATTERN(item, pattern, index);
941 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
942 rte_flow_error_set(error, EINVAL,
943 RTE_FLOW_ERROR_TYPE_ITEM,
944 item, "Not supported by syn filter");
949 /* Get the TCP info. Only support SYN. */
950 if (!item->spec || !item->mask) {
951 rte_flow_error_set(error, EINVAL,
952 RTE_FLOW_ERROR_TYPE_ITEM,
953 item, "Invalid SYN mask");
956 /*Not supported last point for range*/
958 rte_flow_error_set(error, EINVAL,
959 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
960 item, "Not supported last point for range");
964 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
965 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
966 if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
967 tcp_mask->hdr.src_port ||
968 tcp_mask->hdr.dst_port ||
969 tcp_mask->hdr.sent_seq ||
970 tcp_mask->hdr.recv_ack ||
971 tcp_mask->hdr.data_off ||
972 tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
973 tcp_mask->hdr.rx_win ||
974 tcp_mask->hdr.cksum ||
975 tcp_mask->hdr.tcp_urp) {
976 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
977 rte_flow_error_set(error, EINVAL,
978 RTE_FLOW_ERROR_TYPE_ITEM,
979 item, "Not supported by syn filter");
983 /* check if the next not void item is END */
985 NEXT_ITEM_OF_PATTERN(item, pattern, index);
986 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
987 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
988 rte_flow_error_set(error, EINVAL,
989 RTE_FLOW_ERROR_TYPE_ITEM,
990 item, "Not supported by syn filter");
997 /* check if the first not void action is QUEUE. */
998 NEXT_ITEM_OF_ACTION(act, actions, index);
999 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1000 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1001 rte_flow_error_set(error, EINVAL,
1002 RTE_FLOW_ERROR_TYPE_ACTION,
1003 act, "Not supported action.");
1007 act_q = (const struct rte_flow_action_queue *)act->conf;
1008 filter->queue = act_q->index;
1009 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
1010 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1011 rte_flow_error_set(error, EINVAL,
1012 RTE_FLOW_ERROR_TYPE_ACTION,
1013 act, "Not supported action.");
1017 /* check if the next not void item is END */
1019 NEXT_ITEM_OF_ACTION(act, actions, index);
1020 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1021 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1022 rte_flow_error_set(error, EINVAL,
1023 RTE_FLOW_ERROR_TYPE_ACTION,
1024 act, "Not supported action.");
1029 /* must be input direction */
1030 if (!attr->ingress) {
1031 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1032 rte_flow_error_set(error, EINVAL,
1033 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1034 attr, "Only support ingress.");
1040 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1041 rte_flow_error_set(error, EINVAL,
1042 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1043 attr, "Not support egress.");
1047 /* Support 2 priorities, the lowest or highest. */
1048 if (!attr->priority) {
1049 filter->hig_pri = 0;
1050 } else if (attr->priority == (uint32_t)~0U) {
1051 filter->hig_pri = 1;
1053 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1054 rte_flow_error_set(error, EINVAL,
1055 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1056 attr, "Not support priority.");
1064 ixgbe_parse_syn_filter(const struct rte_flow_attr *attr,
1065 const struct rte_flow_item pattern[],
1066 const struct rte_flow_action actions[],
1067 struct rte_eth_syn_filter *filter,
1068 struct rte_flow_error *error)
1072 ret = cons_parse_syn_filter(attr, pattern,
1073 actions, filter, error);
1082 * Parse the rule to see if it is a L2 tunnel rule.
1083 * And get the L2 tunnel filter info BTW.
1084 * Only support E-tag now.
1086 * The first not void item can be E_TAG.
1087 * The next not void item must be END.
1089 * The first not void action should be QUEUE.
1090 * The next not void action should be END.
1094 e_cid_base 0x309 0xFFF
1096 * other members in mask and spec should set to 0x00.
1097 * item->last should be NULL.
1100 cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
1101 const struct rte_flow_item pattern[],
1102 const struct rte_flow_action actions[],
1103 struct rte_eth_l2_tunnel_conf *filter,
1104 struct rte_flow_error *error)
1106 const struct rte_flow_item *item;
1107 const struct rte_flow_item_e_tag *e_tag_spec;
1108 const struct rte_flow_item_e_tag *e_tag_mask;
1109 const struct rte_flow_action *act;
1110 const struct rte_flow_action_queue *act_q;
1114 rte_flow_error_set(error, EINVAL,
1115 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1116 NULL, "NULL pattern.");
1121 rte_flow_error_set(error, EINVAL,
1122 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1123 NULL, "NULL action.");
1128 rte_flow_error_set(error, EINVAL,
1129 RTE_FLOW_ERROR_TYPE_ATTR,
1130 NULL, "NULL attribute.");
1136 /* The first not void item should be e-tag. */
1137 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1138 if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1139 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1140 rte_flow_error_set(error, EINVAL,
1141 RTE_FLOW_ERROR_TYPE_ITEM,
1142 item, "Not supported by L2 tunnel filter");
1146 if (!item->spec || !item->mask) {
1147 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1148 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1149 item, "Not supported by L2 tunnel filter");
1153 /*Not supported last point for range*/
1155 rte_flow_error_set(error, EINVAL,
1156 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1157 item, "Not supported last point for range");
1161 e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
1162 e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
1164 /* Only care about GRP and E cid base. */
1165 if (e_tag_mask->epcp_edei_in_ecid_b ||
1166 e_tag_mask->in_ecid_e ||
1167 e_tag_mask->ecid_e ||
1168 e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1169 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1170 rte_flow_error_set(error, EINVAL,
1171 RTE_FLOW_ERROR_TYPE_ITEM,
1172 item, "Not supported by L2 tunnel filter");
1176 filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1178 * grp and e_cid_base are bit fields and only use 14 bits.
1179 * e-tag id is taken as little endian by HW.
1181 filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1183 /* check if the next not void item is END */
1185 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1186 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1187 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1188 rte_flow_error_set(error, EINVAL,
1189 RTE_FLOW_ERROR_TYPE_ITEM,
1190 item, "Not supported by L2 tunnel filter");
1195 /* must be input direction */
1196 if (!attr->ingress) {
1197 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1198 rte_flow_error_set(error, EINVAL,
1199 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1200 attr, "Only support ingress.");
1206 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1207 rte_flow_error_set(error, EINVAL,
1208 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1209 attr, "Not support egress.");
1214 if (attr->priority) {
1215 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1216 rte_flow_error_set(error, EINVAL,
1217 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1218 attr, "Not support priority.");
1225 /* check if the first not void action is QUEUE. */
1226 NEXT_ITEM_OF_ACTION(act, actions, index);
1227 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1228 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1229 rte_flow_error_set(error, EINVAL,
1230 RTE_FLOW_ERROR_TYPE_ACTION,
1231 act, "Not supported action.");
1235 act_q = (const struct rte_flow_action_queue *)act->conf;
1236 filter->pool = act_q->index;
1238 /* check if the next not void item is END */
1240 NEXT_ITEM_OF_ACTION(act, actions, index);
1241 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1242 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1243 rte_flow_error_set(error, EINVAL,
1244 RTE_FLOW_ERROR_TYPE_ACTION,
1245 act, "Not supported action.");
1253 ixgbe_validate_l2_tn_filter(struct rte_eth_dev *dev,
1254 const struct rte_flow_attr *attr,
1255 const struct rte_flow_item pattern[],
1256 const struct rte_flow_action actions[],
1257 struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1258 struct rte_flow_error *error)
1261 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1263 ret = cons_parse_l2_tn_filter(attr, pattern,
1264 actions, l2_tn_filter, error);
1266 if (hw->mac.type != ixgbe_mac_X550 &&
1267 hw->mac.type != ixgbe_mac_X550EM_x &&
1268 hw->mac.type != ixgbe_mac_X550EM_a) {
1269 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1270 rte_flow_error_set(error, EINVAL,
1271 RTE_FLOW_ERROR_TYPE_ITEM,
1272 NULL, "Not supported by L2 tunnel filter");
1279 /* Parse to get the attr and action info of flow director rule. */
1281 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1282 const struct rte_flow_action actions[],
1283 struct ixgbe_fdir_rule *rule,
1284 struct rte_flow_error *error)
1286 const struct rte_flow_action *act;
1287 const struct rte_flow_action_queue *act_q;
1288 const struct rte_flow_action_mark *mark;
1292 /* must be input direction */
1293 if (!attr->ingress) {
1294 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1295 rte_flow_error_set(error, EINVAL,
1296 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1297 attr, "Only support ingress.");
1303 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1304 rte_flow_error_set(error, EINVAL,
1305 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1306 attr, "Not support egress.");
1311 if (attr->priority) {
1312 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1313 rte_flow_error_set(error, EINVAL,
1314 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1315 attr, "Not support priority.");
1322 /* check if the first not void action is QUEUE or DROP. */
1323 NEXT_ITEM_OF_ACTION(act, actions, index);
1324 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1325 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1326 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1327 rte_flow_error_set(error, EINVAL,
1328 RTE_FLOW_ERROR_TYPE_ACTION,
1329 act, "Not supported action.");
1333 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1334 act_q = (const struct rte_flow_action_queue *)act->conf;
1335 rule->queue = act_q->index;
1337 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1340 /* check if the next not void item is MARK */
1342 NEXT_ITEM_OF_ACTION(act, actions, index);
1343 if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1344 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1345 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1346 rte_flow_error_set(error, EINVAL,
1347 RTE_FLOW_ERROR_TYPE_ACTION,
1348 act, "Not supported action.");
1354 if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1355 mark = (const struct rte_flow_action_mark *)act->conf;
1356 rule->soft_id = mark->id;
1358 NEXT_ITEM_OF_ACTION(act, actions, index);
1361 /* check if the next not void item is END */
1362 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1363 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1364 rte_flow_error_set(error, EINVAL,
1365 RTE_FLOW_ERROR_TYPE_ACTION,
1366 act, "Not supported action.");
1374 * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1375 * And get the flow director filter info BTW.
1376 * UDP/TCP/SCTP PATTERN:
1377 * The first not void item can be ETH or IPV4.
1378 * The second not void item must be IPV4 if the first one is ETH.
1379 * The third not void item must be UDP or TCP or SCTP.
1380 * The next not void item must be END.
1382 * The first not void item must be ETH.
1383 * The second not void item must be MAC VLAN.
1384 * The next not void item must be END.
1386 * The first not void action should be QUEUE or DROP.
1387 * The second not void optional action should be MARK,
1388 * mark_id is a uint32_t number.
1389 * The next not void action should be END.
1390 * UDP/TCP/SCTP pattern example:
1393 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
1394 * dst_addr 192.167.3.50 0xFFFFFFFF
1395 * UDP/TCP/SCTP src_port 80 0xFFFF
1396 * dst_port 80 0xFFFF
1398 * MAC VLAN pattern example:
1401 {0xAC, 0x7B, 0xA1, {0xFF, 0xFF, 0xFF,
1402 0x2C, 0x6D, 0x36} 0xFF, 0xFF, 0xFF}
1403 * MAC VLAN tci 0x2016 0xEFFF
1404 * tpid 0x8100 0xFFFF
1406 * Other members in mask and spec should set to 0x00.
1407 * Item->last should be NULL.
1410 ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
1411 const struct rte_flow_item pattern[],
1412 const struct rte_flow_action actions[],
1413 struct ixgbe_fdir_rule *rule,
1414 struct rte_flow_error *error)
1416 const struct rte_flow_item *item;
1417 const struct rte_flow_item_eth *eth_spec;
1418 const struct rte_flow_item_eth *eth_mask;
1419 const struct rte_flow_item_ipv4 *ipv4_spec;
1420 const struct rte_flow_item_ipv4 *ipv4_mask;
1421 const struct rte_flow_item_tcp *tcp_spec;
1422 const struct rte_flow_item_tcp *tcp_mask;
1423 const struct rte_flow_item_udp *udp_spec;
1424 const struct rte_flow_item_udp *udp_mask;
1425 const struct rte_flow_item_sctp *sctp_spec;
1426 const struct rte_flow_item_sctp *sctp_mask;
1427 const struct rte_flow_item_vlan *vlan_spec;
1428 const struct rte_flow_item_vlan *vlan_mask;
1433 rte_flow_error_set(error, EINVAL,
1434 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1435 NULL, "NULL pattern.");
1440 rte_flow_error_set(error, EINVAL,
1441 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1442 NULL, "NULL action.");
1447 rte_flow_error_set(error, EINVAL,
1448 RTE_FLOW_ERROR_TYPE_ATTR,
1449 NULL, "NULL attribute.");
1454 * Some fields may not be provided. Set spec to 0 and mask to default
1455 * value. So, we need not do anything for the not provided fields later.
1457 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1458 memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1459 rule->mask.vlan_tci_mask = 0;
1465 * The first not void item should be
1466 * MAC or IPv4 or TCP or UDP or SCTP.
1468 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1469 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1470 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1471 item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1472 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1473 item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1474 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1475 rte_flow_error_set(error, EINVAL,
1476 RTE_FLOW_ERROR_TYPE_ITEM,
1477 item, "Not supported by fdir filter");
1481 rule->mode = RTE_FDIR_MODE_PERFECT;
1483 /*Not supported last point for range*/
1485 rte_flow_error_set(error, EINVAL,
1486 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1487 item, "Not supported last point for range");
1491 /* Get the MAC info. */
1492 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1494 * Only support vlan and dst MAC address,
1495 * others should be masked.
1497 if (item->spec && !item->mask) {
1498 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1499 rte_flow_error_set(error, EINVAL,
1500 RTE_FLOW_ERROR_TYPE_ITEM,
1501 item, "Not supported by fdir filter");
1506 rule->b_spec = TRUE;
1507 eth_spec = (const struct rte_flow_item_eth *)item->spec;
1509 /* Get the dst MAC. */
1510 for (j = 0; j < ETHER_ADDR_LEN; j++) {
1511 rule->ixgbe_fdir.formatted.inner_mac[j] =
1512 eth_spec->dst.addr_bytes[j];
1518 /* If ethernet has meaning, it means MAC VLAN mode. */
1519 rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1521 rule->b_mask = TRUE;
1522 eth_mask = (const struct rte_flow_item_eth *)item->mask;
1524 /* Ether type should be masked. */
1525 if (eth_mask->type) {
1526 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1527 rte_flow_error_set(error, EINVAL,
1528 RTE_FLOW_ERROR_TYPE_ITEM,
1529 item, "Not supported by fdir filter");
1534 * src MAC address must be masked,
1535 * and don't support dst MAC address mask.
1537 for (j = 0; j < ETHER_ADDR_LEN; j++) {
1538 if (eth_mask->src.addr_bytes[j] ||
1539 eth_mask->dst.addr_bytes[j] != 0xFF) {
1541 sizeof(struct ixgbe_fdir_rule));
1542 rte_flow_error_set(error, EINVAL,
1543 RTE_FLOW_ERROR_TYPE_ITEM,
1544 item, "Not supported by fdir filter");
1549 /* When no VLAN, considered as full mask. */
1550 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1552 /*** If both spec and mask are item,
1553 * it means don't care about ETH.
1558 * Check if the next not void item is vlan or ipv4.
1559 * IPv6 is not supported.
1562 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1563 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1564 if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1565 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1566 rte_flow_error_set(error, EINVAL,
1567 RTE_FLOW_ERROR_TYPE_ITEM,
1568 item, "Not supported by fdir filter");
1572 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
1573 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1574 rte_flow_error_set(error, EINVAL,
1575 RTE_FLOW_ERROR_TYPE_ITEM,
1576 item, "Not supported by fdir filter");
1582 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1583 if (!(item->spec && item->mask)) {
1584 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1585 rte_flow_error_set(error, EINVAL,
1586 RTE_FLOW_ERROR_TYPE_ITEM,
1587 item, "Not supported by fdir filter");
1591 /*Not supported last point for range*/
1593 rte_flow_error_set(error, EINVAL,
1594 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1595 item, "Not supported last point for range");
1599 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
1600 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
1602 if (vlan_spec->tpid != rte_cpu_to_be_16(ETHER_TYPE_VLAN)) {
1603 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1604 rte_flow_error_set(error, EINVAL,
1605 RTE_FLOW_ERROR_TYPE_ITEM,
1606 item, "Not supported by fdir filter");
1610 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1612 if (vlan_mask->tpid != (uint16_t)~0U) {
1613 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1614 rte_flow_error_set(error, EINVAL,
1615 RTE_FLOW_ERROR_TYPE_ITEM,
1616 item, "Not supported by fdir filter");
1619 rule->mask.vlan_tci_mask = vlan_mask->tci;
1620 rule->mask.vlan_tci_mask &= 0xEFFF;
1621 /* More than one tags are not supported. */
1624 * Check if the next not void item is not vlan.
1627 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1628 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1629 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1630 rte_flow_error_set(error, EINVAL,
1631 RTE_FLOW_ERROR_TYPE_ITEM,
1632 item, "Not supported by fdir filter");
1634 } else if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1635 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1636 rte_flow_error_set(error, EINVAL,
1637 RTE_FLOW_ERROR_TYPE_ITEM,
1638 item, "Not supported by fdir filter");
1643 /* Get the IP info. */
1644 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1646 * Set the flow type even if there's no content
1647 * as we must have a flow type.
1649 rule->ixgbe_fdir.formatted.flow_type =
1650 IXGBE_ATR_FLOW_TYPE_IPV4;
1651 /*Not supported last point for range*/
1653 rte_flow_error_set(error, EINVAL,
1654 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1655 item, "Not supported last point for range");
1659 * Only care about src & dst addresses,
1660 * others should be masked.
1663 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1664 rte_flow_error_set(error, EINVAL,
1665 RTE_FLOW_ERROR_TYPE_ITEM,
1666 item, "Not supported by fdir filter");
1669 rule->b_mask = TRUE;
1671 (const struct rte_flow_item_ipv4 *)item->mask;
1672 if (ipv4_mask->hdr.version_ihl ||
1673 ipv4_mask->hdr.type_of_service ||
1674 ipv4_mask->hdr.total_length ||
1675 ipv4_mask->hdr.packet_id ||
1676 ipv4_mask->hdr.fragment_offset ||
1677 ipv4_mask->hdr.time_to_live ||
1678 ipv4_mask->hdr.next_proto_id ||
1679 ipv4_mask->hdr.hdr_checksum) {
1680 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1681 rte_flow_error_set(error, EINVAL,
1682 RTE_FLOW_ERROR_TYPE_ITEM,
1683 item, "Not supported by fdir filter");
1686 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1687 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1690 rule->b_spec = TRUE;
1692 (const struct rte_flow_item_ipv4 *)item->spec;
1693 rule->ixgbe_fdir.formatted.dst_ip[0] =
1694 ipv4_spec->hdr.dst_addr;
1695 rule->ixgbe_fdir.formatted.src_ip[0] =
1696 ipv4_spec->hdr.src_addr;
1700 * Check if the next not void item is
1701 * TCP or UDP or SCTP or END.
1704 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1705 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1706 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1707 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1708 item->type != RTE_FLOW_ITEM_TYPE_END) {
1709 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1710 rte_flow_error_set(error, EINVAL,
1711 RTE_FLOW_ERROR_TYPE_ITEM,
1712 item, "Not supported by fdir filter");
1717 /* Get the TCP info. */
1718 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1720 * Set the flow type even if there's no content
1721 * as we must have a flow type.
1723 rule->ixgbe_fdir.formatted.flow_type =
1724 IXGBE_ATR_FLOW_TYPE_TCPV4;
1725 /*Not supported last point for range*/
1727 rte_flow_error_set(error, EINVAL,
1728 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1729 item, "Not supported last point for range");
1733 * Only care about src & dst ports,
1734 * others should be masked.
1737 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1738 rte_flow_error_set(error, EINVAL,
1739 RTE_FLOW_ERROR_TYPE_ITEM,
1740 item, "Not supported by fdir filter");
1743 rule->b_mask = TRUE;
1744 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1745 if (tcp_mask->hdr.sent_seq ||
1746 tcp_mask->hdr.recv_ack ||
1747 tcp_mask->hdr.data_off ||
1748 tcp_mask->hdr.tcp_flags ||
1749 tcp_mask->hdr.rx_win ||
1750 tcp_mask->hdr.cksum ||
1751 tcp_mask->hdr.tcp_urp) {
1752 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1753 rte_flow_error_set(error, EINVAL,
1754 RTE_FLOW_ERROR_TYPE_ITEM,
1755 item, "Not supported by fdir filter");
1758 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1759 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1762 rule->b_spec = TRUE;
1763 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1764 rule->ixgbe_fdir.formatted.src_port =
1765 tcp_spec->hdr.src_port;
1766 rule->ixgbe_fdir.formatted.dst_port =
1767 tcp_spec->hdr.dst_port;
1771 /* Get the UDP info */
1772 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1774 * Set the flow type even if there's no content
1775 * as we must have a flow type.
1777 rule->ixgbe_fdir.formatted.flow_type =
1778 IXGBE_ATR_FLOW_TYPE_UDPV4;
1779 /*Not supported last point for range*/
1781 rte_flow_error_set(error, EINVAL,
1782 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1783 item, "Not supported last point for range");
1787 * Only care about src & dst ports,
1788 * others should be masked.
1791 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1792 rte_flow_error_set(error, EINVAL,
1793 RTE_FLOW_ERROR_TYPE_ITEM,
1794 item, "Not supported by fdir filter");
1797 rule->b_mask = TRUE;
1798 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1799 if (udp_mask->hdr.dgram_len ||
1800 udp_mask->hdr.dgram_cksum) {
1801 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1802 rte_flow_error_set(error, EINVAL,
1803 RTE_FLOW_ERROR_TYPE_ITEM,
1804 item, "Not supported by fdir filter");
1807 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1808 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1811 rule->b_spec = TRUE;
1812 udp_spec = (const struct rte_flow_item_udp *)item->spec;
1813 rule->ixgbe_fdir.formatted.src_port =
1814 udp_spec->hdr.src_port;
1815 rule->ixgbe_fdir.formatted.dst_port =
1816 udp_spec->hdr.dst_port;
1820 /* Get the SCTP info */
1821 if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
1823 * Set the flow type even if there's no content
1824 * as we must have a flow type.
1826 rule->ixgbe_fdir.formatted.flow_type =
1827 IXGBE_ATR_FLOW_TYPE_SCTPV4;
1828 /*Not supported last point for range*/
1830 rte_flow_error_set(error, EINVAL,
1831 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1832 item, "Not supported last point for range");
1836 * Only care about src & dst ports,
1837 * others should be masked.
1840 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1841 rte_flow_error_set(error, EINVAL,
1842 RTE_FLOW_ERROR_TYPE_ITEM,
1843 item, "Not supported by fdir filter");
1846 rule->b_mask = TRUE;
1848 (const struct rte_flow_item_sctp *)item->mask;
1849 if (sctp_mask->hdr.tag ||
1850 sctp_mask->hdr.cksum) {
1851 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1852 rte_flow_error_set(error, EINVAL,
1853 RTE_FLOW_ERROR_TYPE_ITEM,
1854 item, "Not supported by fdir filter");
1857 rule->mask.src_port_mask = sctp_mask->hdr.src_port;
1858 rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
1861 rule->b_spec = TRUE;
1863 (const struct rte_flow_item_sctp *)item->spec;
1864 rule->ixgbe_fdir.formatted.src_port =
1865 sctp_spec->hdr.src_port;
1866 rule->ixgbe_fdir.formatted.dst_port =
1867 sctp_spec->hdr.dst_port;
1871 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1872 /* check if the next not void item is END */
1874 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1875 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1876 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1877 rte_flow_error_set(error, EINVAL,
1878 RTE_FLOW_ERROR_TYPE_ITEM,
1879 item, "Not supported by fdir filter");
1884 return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
1887 #define NVGRE_PROTOCOL 0x6558
1890 * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
1891 * And get the flow director filter info BTW.
1893 * The first not void item must be ETH.
1894 * The second not void item must be IPV4/ IPV6.
1895 * The third not void item must be NVGRE.
1896 * The next not void item must be END.
1898 * The first not void item must be ETH.
1899 * The second not void item must be IPV4/ IPV6.
1900 * The third not void item must be NVGRE.
1901 * The next not void item must be END.
1903 * The first not void action should be QUEUE or DROP.
1904 * The second not void optional action should be MARK,
1905 * mark_id is a uint32_t number.
1906 * The next not void action should be END.
1907 * VxLAN pattern example:
1910 * IPV4/IPV6 NULL NULL
1912 * VxLAN vni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
1914 * NEGRV pattern example:
1917 * IPV4/IPV6 NULL NULL
1918 * NVGRE protocol 0x6558 0xFFFF
1919 * tni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
1921 * other members in mask and spec should set to 0x00.
1922 * item->last should be NULL.
1925 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
1926 const struct rte_flow_item pattern[],
1927 const struct rte_flow_action actions[],
1928 struct ixgbe_fdir_rule *rule,
1929 struct rte_flow_error *error)
1931 const struct rte_flow_item *item;
1932 const struct rte_flow_item_vxlan *vxlan_spec;
1933 const struct rte_flow_item_vxlan *vxlan_mask;
1934 const struct rte_flow_item_nvgre *nvgre_spec;
1935 const struct rte_flow_item_nvgre *nvgre_mask;
1936 const struct rte_flow_item_eth *eth_spec;
1937 const struct rte_flow_item_eth *eth_mask;
1938 const struct rte_flow_item_vlan *vlan_spec;
1939 const struct rte_flow_item_vlan *vlan_mask;
1943 rte_flow_error_set(error, EINVAL,
1944 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1945 NULL, "NULL pattern.");
1950 rte_flow_error_set(error, EINVAL,
1951 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1952 NULL, "NULL action.");
1957 rte_flow_error_set(error, EINVAL,
1958 RTE_FLOW_ERROR_TYPE_ATTR,
1959 NULL, "NULL attribute.");
1964 * Some fields may not be provided. Set spec to 0 and mask to default
1965 * value. So, we need not do anything for the not provided fields later.
1967 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1968 memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1969 rule->mask.vlan_tci_mask = 0;
1975 * The first not void item should be
1976 * MAC or IPv4 or IPv6 or UDP or VxLAN.
1978 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1979 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1980 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1981 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1982 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1983 item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
1984 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
1985 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1986 rte_flow_error_set(error, EINVAL,
1987 RTE_FLOW_ERROR_TYPE_ITEM,
1988 item, "Not supported by fdir filter");
1992 rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
1995 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1996 /* Only used to describe the protocol stack. */
1997 if (item->spec || item->mask) {
1998 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1999 rte_flow_error_set(error, EINVAL,
2000 RTE_FLOW_ERROR_TYPE_ITEM,
2001 item, "Not supported by fdir filter");
2004 /*Not supported last point for range*/
2006 rte_flow_error_set(error, EINVAL,
2007 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2008 item, "Not supported last point for range");
2012 /* Check if the next not void item is IPv4 or IPv6. */
2014 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2015 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2016 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2017 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2018 rte_flow_error_set(error, EINVAL,
2019 RTE_FLOW_ERROR_TYPE_ITEM,
2020 item, "Not supported by fdir filter");
2026 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2027 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2028 /* Only used to describe the protocol stack. */
2029 if (item->spec || item->mask) {
2030 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2031 rte_flow_error_set(error, EINVAL,
2032 RTE_FLOW_ERROR_TYPE_ITEM,
2033 item, "Not supported by fdir filter");
2036 /*Not supported last point for range*/
2038 rte_flow_error_set(error, EINVAL,
2039 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2040 item, "Not supported last point for range");
2044 /* Check if the next not void item is UDP or NVGRE. */
2046 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2047 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2048 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2049 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2050 rte_flow_error_set(error, EINVAL,
2051 RTE_FLOW_ERROR_TYPE_ITEM,
2052 item, "Not supported by fdir filter");
2058 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2059 /* Only used to describe the protocol stack. */
2060 if (item->spec || item->mask) {
2061 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2062 rte_flow_error_set(error, EINVAL,
2063 RTE_FLOW_ERROR_TYPE_ITEM,
2064 item, "Not supported by fdir filter");
2067 /*Not supported last point for range*/
2069 rte_flow_error_set(error, EINVAL,
2070 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2071 item, "Not supported last point for range");
2075 /* Check if the next not void item is VxLAN. */
2077 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2078 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2079 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2080 rte_flow_error_set(error, EINVAL,
2081 RTE_FLOW_ERROR_TYPE_ITEM,
2082 item, "Not supported by fdir filter");
2087 /* Get the VxLAN info */
2088 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2089 rule->ixgbe_fdir.formatted.tunnel_type =
2090 RTE_FDIR_TUNNEL_TYPE_VXLAN;
2092 /* Only care about VNI, others should be masked. */
2094 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2095 rte_flow_error_set(error, EINVAL,
2096 RTE_FLOW_ERROR_TYPE_ITEM,
2097 item, "Not supported by fdir filter");
2100 /*Not supported last point for range*/
2102 rte_flow_error_set(error, EINVAL,
2103 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2104 item, "Not supported last point for range");
2107 rule->b_mask = TRUE;
2109 /* Tunnel type is always meaningful. */
2110 rule->mask.tunnel_type_mask = 1;
2113 (const struct rte_flow_item_vxlan *)item->mask;
2114 if (vxlan_mask->flags) {
2115 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2116 rte_flow_error_set(error, EINVAL,
2117 RTE_FLOW_ERROR_TYPE_ITEM,
2118 item, "Not supported by fdir filter");
2121 /* VNI must be totally masked or not. */
2122 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2123 vxlan_mask->vni[2]) &&
2124 ((vxlan_mask->vni[0] != 0xFF) ||
2125 (vxlan_mask->vni[1] != 0xFF) ||
2126 (vxlan_mask->vni[2] != 0xFF))) {
2127 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2128 rte_flow_error_set(error, EINVAL,
2129 RTE_FLOW_ERROR_TYPE_ITEM,
2130 item, "Not supported by fdir filter");
2134 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2135 RTE_DIM(vxlan_mask->vni));
2138 rule->b_spec = TRUE;
2139 vxlan_spec = (const struct rte_flow_item_vxlan *)
2141 rte_memcpy(((uint8_t *)
2142 &rule->ixgbe_fdir.formatted.tni_vni + 1),
2143 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2144 rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
2145 rule->ixgbe_fdir.formatted.tni_vni);
2149 /* Get the NVGRE info */
2150 if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2151 rule->ixgbe_fdir.formatted.tunnel_type =
2152 RTE_FDIR_TUNNEL_TYPE_NVGRE;
2155 * Only care about flags0, flags1, protocol and TNI,
2156 * others should be masked.
2159 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2160 rte_flow_error_set(error, EINVAL,
2161 RTE_FLOW_ERROR_TYPE_ITEM,
2162 item, "Not supported by fdir filter");
2165 /*Not supported last point for range*/
2167 rte_flow_error_set(error, EINVAL,
2168 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2169 item, "Not supported last point for range");
2172 rule->b_mask = TRUE;
2174 /* Tunnel type is always meaningful. */
2175 rule->mask.tunnel_type_mask = 1;
2178 (const struct rte_flow_item_nvgre *)item->mask;
2179 if (nvgre_mask->flow_id) {
2180 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2181 rte_flow_error_set(error, EINVAL,
2182 RTE_FLOW_ERROR_TYPE_ITEM,
2183 item, "Not supported by fdir filter");
2186 if (nvgre_mask->c_k_s_rsvd0_ver !=
2187 rte_cpu_to_be_16(0x3000) ||
2188 nvgre_mask->protocol != 0xFFFF) {
2189 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2190 rte_flow_error_set(error, EINVAL,
2191 RTE_FLOW_ERROR_TYPE_ITEM,
2192 item, "Not supported by fdir filter");
2195 /* TNI must be totally masked or not. */
2196 if (nvgre_mask->tni[0] &&
2197 ((nvgre_mask->tni[0] != 0xFF) ||
2198 (nvgre_mask->tni[1] != 0xFF) ||
2199 (nvgre_mask->tni[2] != 0xFF))) {
2200 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2201 rte_flow_error_set(error, EINVAL,
2202 RTE_FLOW_ERROR_TYPE_ITEM,
2203 item, "Not supported by fdir filter");
2206 /* tni is a 24-bits bit field */
2207 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2208 RTE_DIM(nvgre_mask->tni));
2209 rule->mask.tunnel_id_mask <<= 8;
2212 rule->b_spec = TRUE;
2214 (const struct rte_flow_item_nvgre *)item->spec;
2215 if (nvgre_spec->c_k_s_rsvd0_ver !=
2216 rte_cpu_to_be_16(0x2000) ||
2217 nvgre_spec->protocol !=
2218 rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2219 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2220 rte_flow_error_set(error, EINVAL,
2221 RTE_FLOW_ERROR_TYPE_ITEM,
2222 item, "Not supported by fdir filter");
2225 /* tni is a 24-bits bit field */
2226 rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2227 nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2228 rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2232 /* check if the next not void item is MAC */
2234 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2235 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2236 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2237 rte_flow_error_set(error, EINVAL,
2238 RTE_FLOW_ERROR_TYPE_ITEM,
2239 item, "Not supported by fdir filter");
2244 * Only support vlan and dst MAC address,
2245 * others should be masked.
2249 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2250 rte_flow_error_set(error, EINVAL,
2251 RTE_FLOW_ERROR_TYPE_ITEM,
2252 item, "Not supported by fdir filter");
2255 /*Not supported last point for range*/
2257 rte_flow_error_set(error, EINVAL,
2258 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2259 item, "Not supported last point for range");
2262 rule->b_mask = TRUE;
2263 eth_mask = (const struct rte_flow_item_eth *)item->mask;
2265 /* Ether type should be masked. */
2266 if (eth_mask->type) {
2267 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2268 rte_flow_error_set(error, EINVAL,
2269 RTE_FLOW_ERROR_TYPE_ITEM,
2270 item, "Not supported by fdir filter");
2274 /* src MAC address should be masked. */
2275 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2276 if (eth_mask->src.addr_bytes[j]) {
2278 sizeof(struct ixgbe_fdir_rule));
2279 rte_flow_error_set(error, EINVAL,
2280 RTE_FLOW_ERROR_TYPE_ITEM,
2281 item, "Not supported by fdir filter");
2285 rule->mask.mac_addr_byte_mask = 0;
2286 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2287 /* It's a per byte mask. */
2288 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2289 rule->mask.mac_addr_byte_mask |= 0x1 << j;
2290 } else if (eth_mask->dst.addr_bytes[j]) {
2291 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2292 rte_flow_error_set(error, EINVAL,
2293 RTE_FLOW_ERROR_TYPE_ITEM,
2294 item, "Not supported by fdir filter");
2299 /* When no vlan, considered as full mask. */
2300 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2303 rule->b_spec = TRUE;
2304 eth_spec = (const struct rte_flow_item_eth *)item->spec;
2306 /* Get the dst MAC. */
2307 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2308 rule->ixgbe_fdir.formatted.inner_mac[j] =
2309 eth_spec->dst.addr_bytes[j];
2314 * Check if the next not void item is vlan or ipv4.
2315 * IPv6 is not supported.
2318 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2319 if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2320 (item->type != RTE_FLOW_ITEM_TYPE_VLAN)) {
2321 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2322 rte_flow_error_set(error, EINVAL,
2323 RTE_FLOW_ERROR_TYPE_ITEM,
2324 item, "Not supported by fdir filter");
2327 /*Not supported last point for range*/
2329 rte_flow_error_set(error, EINVAL,
2330 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2331 item, "Not supported last point for range");
2335 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2336 if (!(item->spec && item->mask)) {
2337 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2338 rte_flow_error_set(error, EINVAL,
2339 RTE_FLOW_ERROR_TYPE_ITEM,
2340 item, "Not supported by fdir filter");
2344 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
2345 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
2347 if (vlan_spec->tpid != rte_cpu_to_be_16(ETHER_TYPE_VLAN)) {
2348 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2349 rte_flow_error_set(error, EINVAL,
2350 RTE_FLOW_ERROR_TYPE_ITEM,
2351 item, "Not supported by fdir filter");
2355 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2357 if (vlan_mask->tpid != (uint16_t)~0U) {
2358 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2359 rte_flow_error_set(error, EINVAL,
2360 RTE_FLOW_ERROR_TYPE_ITEM,
2361 item, "Not supported by fdir filter");
2364 rule->mask.vlan_tci_mask = vlan_mask->tci;
2365 rule->mask.vlan_tci_mask &= 0xEFFF;
2366 /* More than one tags are not supported. */
2369 * Check if the next not void item is not vlan.
2372 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2373 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2374 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2375 rte_flow_error_set(error, EINVAL,
2376 RTE_FLOW_ERROR_TYPE_ITEM,
2377 item, "Not supported by fdir filter");
2379 } else if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2380 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2381 rte_flow_error_set(error, EINVAL,
2382 RTE_FLOW_ERROR_TYPE_ITEM,
2383 item, "Not supported by fdir filter");
2386 /* check if the next not void item is END */
2388 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2389 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2390 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2391 rte_flow_error_set(error, EINVAL,
2392 RTE_FLOW_ERROR_TYPE_ITEM,
2393 item, "Not supported by fdir filter");
2399 * If the tags is 0, it means don't care about the VLAN.
2403 return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2407 ixgbe_validate_fdir_filter(struct rte_eth_dev *dev,
2408 const struct rte_flow_attr *attr,
2409 const struct rte_flow_item pattern[],
2410 const struct rte_flow_action actions[],
2411 struct ixgbe_fdir_rule *rule,
2412 struct rte_flow_error *error)
2416 enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2418 ixgbe_parse_fdir_filter(attr, pattern, actions,
2422 if (fdir_mode == RTE_FDIR_MODE_NONE ||
2423 fdir_mode != rule->mode)
2430 ixgbe_parse_fdir_filter(const struct rte_flow_attr *attr,
2431 const struct rte_flow_item pattern[],
2432 const struct rte_flow_action actions[],
2433 struct ixgbe_fdir_rule *rule,
2434 struct rte_flow_error *error)
2438 ret = ixgbe_parse_fdir_filter_normal(attr, pattern,
2439 actions, rule, error);
2444 ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2445 actions, rule, error);
2451 ixgbe_filterlist_flush(void)
2453 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2454 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2455 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2456 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2457 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2458 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2460 while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2461 TAILQ_REMOVE(&filter_ntuple_list,
2464 rte_free(ntuple_filter_ptr);
2467 while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2468 TAILQ_REMOVE(&filter_ethertype_list,
2469 ethertype_filter_ptr,
2471 rte_free(ethertype_filter_ptr);
2474 while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2475 TAILQ_REMOVE(&filter_syn_list,
2478 rte_free(syn_filter_ptr);
2481 while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2482 TAILQ_REMOVE(&filter_l2_tunnel_list,
2485 rte_free(l2_tn_filter_ptr);
2488 while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2489 TAILQ_REMOVE(&filter_fdir_list,
2492 rte_free(fdir_rule_ptr);
2495 while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
2496 TAILQ_REMOVE(&ixgbe_flow_list,
2499 rte_free(ixgbe_flow_mem_ptr->flow);
2500 rte_free(ixgbe_flow_mem_ptr);
2505 * Create or destroy a flow rule.
2506 * Theorically one rule can match more than one filters.
2507 * We will let it use the filter which it hitt first.
2508 * So, the sequence matters.
2510 static struct rte_flow *
2511 ixgbe_flow_create(struct rte_eth_dev *dev,
2512 const struct rte_flow_attr *attr,
2513 const struct rte_flow_item pattern[],
2514 const struct rte_flow_action actions[],
2515 struct rte_flow_error *error)
2518 struct rte_eth_ntuple_filter ntuple_filter;
2519 struct rte_eth_ethertype_filter ethertype_filter;
2520 struct rte_eth_syn_filter syn_filter;
2521 struct ixgbe_fdir_rule fdir_rule;
2522 struct rte_eth_l2_tunnel_conf l2_tn_filter;
2523 struct ixgbe_hw_fdir_info *fdir_info =
2524 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2525 struct rte_flow *flow = NULL;
2526 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2527 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2528 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2529 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2530 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2531 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2533 flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
2535 PMD_DRV_LOG(ERR, "failed to allocate memory");
2536 return (struct rte_flow *)flow;
2538 ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
2539 sizeof(struct ixgbe_flow_mem), 0);
2540 if (!ixgbe_flow_mem_ptr) {
2541 PMD_DRV_LOG(ERR, "failed to allocate memory");
2545 ixgbe_flow_mem_ptr->flow = flow;
2546 TAILQ_INSERT_TAIL(&ixgbe_flow_list,
2547 ixgbe_flow_mem_ptr, entries);
2549 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2550 ret = ixgbe_parse_ntuple_filter(attr, pattern,
2551 actions, &ntuple_filter, error);
2553 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2555 ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
2556 sizeof(struct ixgbe_ntuple_filter_ele), 0);
2557 (void)rte_memcpy(&ntuple_filter_ptr->filter_info,
2559 sizeof(struct rte_eth_ntuple_filter));
2560 TAILQ_INSERT_TAIL(&filter_ntuple_list,
2561 ntuple_filter_ptr, entries);
2562 flow->rule = ntuple_filter_ptr;
2563 flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2569 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2570 ret = ixgbe_parse_ethertype_filter(attr, pattern,
2571 actions, ðertype_filter, error);
2573 ret = ixgbe_add_del_ethertype_filter(dev,
2574 ðertype_filter, TRUE);
2576 ethertype_filter_ptr = rte_zmalloc(
2577 "ixgbe_ethertype_filter",
2578 sizeof(struct ixgbe_ethertype_filter_ele), 0);
2579 (void)rte_memcpy(ðertype_filter_ptr->filter_info,
2581 sizeof(struct rte_eth_ethertype_filter));
2582 TAILQ_INSERT_TAIL(&filter_ethertype_list,
2583 ethertype_filter_ptr, entries);
2584 flow->rule = ethertype_filter_ptr;
2585 flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2591 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2592 ret = cons_parse_syn_filter(attr, pattern, actions, &syn_filter, error);
2594 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
2596 syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
2597 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
2598 (void)rte_memcpy(&syn_filter_ptr->filter_info,
2600 sizeof(struct rte_eth_syn_filter));
2601 TAILQ_INSERT_TAIL(&filter_syn_list,
2604 flow->rule = syn_filter_ptr;
2605 flow->filter_type = RTE_ETH_FILTER_SYN;
2611 memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2612 ret = ixgbe_parse_fdir_filter(attr, pattern,
2613 actions, &fdir_rule, error);
2615 /* A mask cannot be deleted. */
2616 if (fdir_rule.b_mask) {
2617 if (!fdir_info->mask_added) {
2618 /* It's the first time the mask is set. */
2619 rte_memcpy(&fdir_info->mask,
2621 sizeof(struct ixgbe_hw_fdir_mask));
2622 ret = ixgbe_fdir_set_input_mask(dev);
2626 fdir_info->mask_added = TRUE;
2629 * Only support one global mask,
2630 * all the masks should be the same.
2632 ret = memcmp(&fdir_info->mask,
2634 sizeof(struct ixgbe_hw_fdir_mask));
2640 if (fdir_rule.b_spec) {
2641 ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
2644 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
2645 sizeof(struct ixgbe_fdir_rule_ele), 0);
2646 (void)rte_memcpy(&fdir_rule_ptr->filter_info,
2648 sizeof(struct ixgbe_fdir_rule));
2649 TAILQ_INSERT_TAIL(&filter_fdir_list,
2650 fdir_rule_ptr, entries);
2651 flow->rule = fdir_rule_ptr;
2652 flow->filter_type = RTE_ETH_FILTER_FDIR;
2664 memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2665 ret = cons_parse_l2_tn_filter(attr, pattern,
2666 actions, &l2_tn_filter, error);
2668 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
2670 l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
2671 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
2672 (void)rte_memcpy(&l2_tn_filter_ptr->filter_info,
2674 sizeof(struct rte_eth_l2_tunnel_conf));
2675 TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
2676 l2_tn_filter_ptr, entries);
2677 flow->rule = l2_tn_filter_ptr;
2678 flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
2684 TAILQ_REMOVE(&ixgbe_flow_list,
2685 ixgbe_flow_mem_ptr, entries);
2686 rte_free(ixgbe_flow_mem_ptr);
2692 * Check if the flow rule is supported by ixgbe.
2693 * It only checkes the format. Don't guarantee the rule can be programmed into
2694 * the HW. Because there can be no enough room for the rule.
2697 ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
2698 const struct rte_flow_attr *attr,
2699 const struct rte_flow_item pattern[],
2700 const struct rte_flow_action actions[],
2701 struct rte_flow_error *error)
2703 struct rte_eth_ntuple_filter ntuple_filter;
2704 struct rte_eth_ethertype_filter ethertype_filter;
2705 struct rte_eth_syn_filter syn_filter;
2706 struct rte_eth_l2_tunnel_conf l2_tn_filter;
2707 struct ixgbe_fdir_rule fdir_rule;
2710 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2711 ret = ixgbe_parse_ntuple_filter(attr, pattern,
2712 actions, &ntuple_filter, error);
2716 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2717 ret = ixgbe_parse_ethertype_filter(attr, pattern,
2718 actions, ðertype_filter, error);
2722 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2723 ret = ixgbe_parse_syn_filter(attr, pattern,
2724 actions, &syn_filter, error);
2728 memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2729 ret = ixgbe_validate_fdir_filter(dev, attr, pattern,
2730 actions, &fdir_rule, error);
2734 memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2735 ret = ixgbe_validate_l2_tn_filter(dev, attr, pattern,
2736 actions, &l2_tn_filter, error);
2741 /* Destroy a flow rule on ixgbe. */
2743 ixgbe_flow_destroy(struct rte_eth_dev *dev,
2744 struct rte_flow *flow,
2745 struct rte_flow_error *error)
2748 struct rte_flow *pmd_flow = flow;
2749 enum rte_filter_type filter_type = pmd_flow->filter_type;
2750 struct rte_eth_ntuple_filter ntuple_filter;
2751 struct rte_eth_ethertype_filter ethertype_filter;
2752 struct rte_eth_syn_filter syn_filter;
2753 struct ixgbe_fdir_rule fdir_rule;
2754 struct rte_eth_l2_tunnel_conf l2_tn_filter;
2755 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2756 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2757 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2758 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2759 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2760 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2762 switch (filter_type) {
2763 case RTE_ETH_FILTER_NTUPLE:
2764 ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
2766 (void)rte_memcpy(&ntuple_filter,
2767 &ntuple_filter_ptr->filter_info,
2768 sizeof(struct rte_eth_ntuple_filter));
2769 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
2771 TAILQ_REMOVE(&filter_ntuple_list,
2772 ntuple_filter_ptr, entries);
2773 rte_free(ntuple_filter_ptr);
2776 case RTE_ETH_FILTER_ETHERTYPE:
2777 ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
2779 (void)rte_memcpy(ðertype_filter,
2780 ðertype_filter_ptr->filter_info,
2781 sizeof(struct rte_eth_ethertype_filter));
2782 ret = ixgbe_add_del_ethertype_filter(dev,
2783 ðertype_filter, FALSE);
2785 TAILQ_REMOVE(&filter_ethertype_list,
2786 ethertype_filter_ptr, entries);
2787 rte_free(ethertype_filter_ptr);
2790 case RTE_ETH_FILTER_SYN:
2791 syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
2793 (void)rte_memcpy(&syn_filter,
2794 &syn_filter_ptr->filter_info,
2795 sizeof(struct rte_eth_syn_filter));
2796 ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
2798 TAILQ_REMOVE(&filter_syn_list,
2799 syn_filter_ptr, entries);
2800 rte_free(syn_filter_ptr);
2803 case RTE_ETH_FILTER_FDIR:
2804 fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
2805 (void)rte_memcpy(&fdir_rule,
2806 &fdir_rule_ptr->filter_info,
2807 sizeof(struct ixgbe_fdir_rule));
2808 ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
2810 TAILQ_REMOVE(&filter_fdir_list,
2811 fdir_rule_ptr, entries);
2812 rte_free(fdir_rule_ptr);
2815 case RTE_ETH_FILTER_L2_TUNNEL:
2816 l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
2818 (void)rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
2819 sizeof(struct rte_eth_l2_tunnel_conf));
2820 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
2822 TAILQ_REMOVE(&filter_l2_tunnel_list,
2823 l2_tn_filter_ptr, entries);
2824 rte_free(l2_tn_filter_ptr);
2828 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
2835 rte_flow_error_set(error, EINVAL,
2836 RTE_FLOW_ERROR_TYPE_HANDLE,
2837 NULL, "Failed to destroy flow");
2841 TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
2842 if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
2843 TAILQ_REMOVE(&ixgbe_flow_list,
2844 ixgbe_flow_mem_ptr, entries);
2845 rte_free(ixgbe_flow_mem_ptr);
2853 /* Destroy all flow rules associated with a port on ixgbe. */
2855 ixgbe_flow_flush(struct rte_eth_dev *dev,
2856 struct rte_flow_error *error)
2860 ixgbe_clear_all_ntuple_filter(dev);
2861 ixgbe_clear_all_ethertype_filter(dev);
2862 ixgbe_clear_syn_filter(dev);
2864 ret = ixgbe_clear_all_fdir_filter(dev);
2866 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
2867 NULL, "Failed to flush rule");
2871 ret = ixgbe_clear_all_l2_tn_filter(dev);
2873 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
2874 NULL, "Failed to flush rule");
2878 ixgbe_filterlist_flush();