4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
47 #include <rte_interrupts.h>
49 #include <rte_debug.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_atomic.h>
60 #include <rte_malloc.h>
61 #include <rte_random.h>
63 #include <rte_hash_crc.h>
65 #include <rte_flow_driver.h>
67 #include "ixgbe_logs.h"
68 #include "base/ixgbe_api.h"
69 #include "base/ixgbe_vf.h"
70 #include "base/ixgbe_common.h"
71 #include "ixgbe_ethdev.h"
72 #include "ixgbe_bypass.h"
73 #include "ixgbe_rxtx.h"
74 #include "base/ixgbe_type.h"
75 #include "base/ixgbe_phy.h"
76 #include "rte_pmd_ixgbe.h"
78 static int ixgbe_flow_flush(struct rte_eth_dev *dev,
79 struct rte_flow_error *error);
81 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
82 const struct rte_flow_item pattern[],
83 const struct rte_flow_action actions[],
84 struct rte_eth_ntuple_filter *filter,
85 struct rte_flow_error *error);
87 ixgbe_parse_ntuple_filter(const struct rte_flow_attr *attr,
88 const struct rte_flow_item pattern[],
89 const struct rte_flow_action actions[],
90 struct rte_eth_ntuple_filter *filter,
91 struct rte_flow_error *error);
93 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
94 const struct rte_flow_item *pattern,
95 const struct rte_flow_action *actions,
96 struct rte_eth_ethertype_filter *filter,
97 struct rte_flow_error *error);
99 ixgbe_parse_ethertype_filter(const struct rte_flow_attr *attr,
100 const struct rte_flow_item pattern[],
101 const struct rte_flow_action actions[],
102 struct rte_eth_ethertype_filter *filter,
103 struct rte_flow_error *error);
105 cons_parse_syn_filter(const struct rte_flow_attr *attr,
106 const struct rte_flow_item pattern[],
107 const struct rte_flow_action actions[],
108 struct rte_eth_syn_filter *filter,
109 struct rte_flow_error *error);
111 ixgbe_parse_syn_filter(const struct rte_flow_attr *attr,
112 const struct rte_flow_item pattern[],
113 const struct rte_flow_action actions[],
114 struct rte_eth_syn_filter *filter,
115 struct rte_flow_error *error);
117 cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
118 const struct rte_flow_item pattern[],
119 const struct rte_flow_action actions[],
120 struct rte_eth_l2_tunnel_conf *filter,
121 struct rte_flow_error *error);
123 ixgbe_validate_l2_tn_filter(struct rte_eth_dev *dev,
124 const struct rte_flow_attr *attr,
125 const struct rte_flow_item pattern[],
126 const struct rte_flow_action actions[],
127 struct rte_eth_l2_tunnel_conf *rule,
128 struct rte_flow_error *error);
130 ixgbe_validate_fdir_filter(struct rte_eth_dev *dev,
131 const struct rte_flow_attr *attr,
132 const struct rte_flow_item pattern[],
133 const struct rte_flow_action actions[],
134 struct ixgbe_fdir_rule *rule,
135 struct rte_flow_error *error);
137 ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
138 const struct rte_flow_item pattern[],
139 const struct rte_flow_action actions[],
140 struct ixgbe_fdir_rule *rule,
141 struct rte_flow_error *error);
143 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
144 const struct rte_flow_item pattern[],
145 const struct rte_flow_action actions[],
146 struct ixgbe_fdir_rule *rule,
147 struct rte_flow_error *error);
149 ixgbe_parse_fdir_filter(const struct rte_flow_attr *attr,
150 const struct rte_flow_item pattern[],
151 const struct rte_flow_action actions[],
152 struct ixgbe_fdir_rule *rule,
153 struct rte_flow_error *error);
155 ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
156 const struct rte_flow_attr *attr,
157 const struct rte_flow_item pattern[],
158 const struct rte_flow_action actions[],
159 struct rte_flow_error *error);
160 static struct rte_flow *ixgbe_flow_create(struct rte_eth_dev *dev,
161 const struct rte_flow_attr *attr,
162 const struct rte_flow_item pattern[],
163 const struct rte_flow_action actions[],
164 struct rte_flow_error *error);
165 static int ixgbe_flow_destroy(struct rte_eth_dev *dev,
166 struct rte_flow *flow,
167 struct rte_flow_error *error);
169 const struct rte_flow_ops ixgbe_flow_ops = {
177 #define IXGBE_MIN_N_TUPLE_PRIO 1
178 #define IXGBE_MAX_N_TUPLE_PRIO 7
179 #define NEXT_ITEM_OF_PATTERN(item, pattern, index)\
181 item = pattern + index;\
182 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\
184 item = pattern + index; \
188 #define NEXT_ITEM_OF_ACTION(act, actions, index)\
190 act = actions + index; \
191 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
193 act = actions + index; \
198 * Please aware there's an asumption for all the parsers.
199 * rte_flow_item is using big endian, rte_flow_attr and
200 * rte_flow_action are using CPU order.
201 * Because the pattern is used to describe the packets,
202 * normally the packets should use network order.
206 * Parse the rule to see if it is a n-tuple rule.
207 * And get the n-tuple filter info BTW.
209 * The first not void item can be ETH or IPV4.
210 * The second not void item must be IPV4 if the first one is ETH.
211 * The third not void item must be UDP or TCP.
212 * The next not void item must be END.
214 * The first not void action should be QUEUE.
215 * The next not void action should be END.
219 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
220 * dst_addr 192.167.3.50 0xFFFFFFFF
221 * next_proto_id 17 0xFF
222 * UDP/TCP src_port 80 0xFFFF
225 * other members in mask and spec should set to 0x00.
226 * item->last should be NULL.
229 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
230 const struct rte_flow_item pattern[],
231 const struct rte_flow_action actions[],
232 struct rte_eth_ntuple_filter *filter,
233 struct rte_flow_error *error)
235 const struct rte_flow_item *item;
236 const struct rte_flow_action *act;
237 const struct rte_flow_item_ipv4 *ipv4_spec;
238 const struct rte_flow_item_ipv4 *ipv4_mask;
239 const struct rte_flow_item_tcp *tcp_spec;
240 const struct rte_flow_item_tcp *tcp_mask;
241 const struct rte_flow_item_udp *udp_spec;
242 const struct rte_flow_item_udp *udp_mask;
246 rte_flow_error_set(error,
247 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
248 NULL, "NULL pattern.");
253 rte_flow_error_set(error, EINVAL,
254 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
255 NULL, "NULL action.");
259 rte_flow_error_set(error, EINVAL,
260 RTE_FLOW_ERROR_TYPE_ATTR,
261 NULL, "NULL attribute.");
268 /* the first not void item can be MAC or IPv4 */
269 NEXT_ITEM_OF_PATTERN(item, pattern, index);
271 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
272 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
273 rte_flow_error_set(error, EINVAL,
274 RTE_FLOW_ERROR_TYPE_ITEM,
275 item, "Not supported by ntuple filter");
279 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
280 /*Not supported last point for range*/
282 rte_flow_error_set(error,
284 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
285 item, "Not supported last point for range");
289 /* if the first item is MAC, the content should be NULL */
290 if (item->spec || item->mask) {
291 rte_flow_error_set(error, EINVAL,
292 RTE_FLOW_ERROR_TYPE_ITEM,
293 item, "Not supported by ntuple filter");
296 /* check if the next not void item is IPv4 */
298 NEXT_ITEM_OF_PATTERN(item, pattern, index);
299 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
300 rte_flow_error_set(error,
301 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
302 item, "Not supported by ntuple filter");
307 /* get the IPv4 info */
308 if (!item->spec || !item->mask) {
309 rte_flow_error_set(error, EINVAL,
310 RTE_FLOW_ERROR_TYPE_ITEM,
311 item, "Invalid ntuple mask");
314 /*Not supported last point for range*/
316 rte_flow_error_set(error, EINVAL,
317 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
318 item, "Not supported last point for range");
323 ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
325 * Only support src & dst addresses, protocol,
326 * others should be masked.
328 if (ipv4_mask->hdr.version_ihl ||
329 ipv4_mask->hdr.type_of_service ||
330 ipv4_mask->hdr.total_length ||
331 ipv4_mask->hdr.packet_id ||
332 ipv4_mask->hdr.fragment_offset ||
333 ipv4_mask->hdr.time_to_live ||
334 ipv4_mask->hdr.hdr_checksum) {
335 rte_flow_error_set(error,
336 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
337 item, "Not supported by ntuple filter");
341 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
342 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
343 filter->proto_mask = ipv4_mask->hdr.next_proto_id;
345 ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
346 filter->dst_ip = ipv4_spec->hdr.dst_addr;
347 filter->src_ip = ipv4_spec->hdr.src_addr;
348 filter->proto = ipv4_spec->hdr.next_proto_id;
350 /* check if the next not void item is TCP or UDP */
352 NEXT_ITEM_OF_PATTERN(item, pattern, index);
353 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
354 item->type != RTE_FLOW_ITEM_TYPE_UDP) {
355 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
356 rte_flow_error_set(error, EINVAL,
357 RTE_FLOW_ERROR_TYPE_ITEM,
358 item, "Not supported by ntuple filter");
362 /* get the TCP/UDP info */
363 if (!item->spec || !item->mask) {
364 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
365 rte_flow_error_set(error, EINVAL,
366 RTE_FLOW_ERROR_TYPE_ITEM,
367 item, "Invalid ntuple mask");
371 /*Not supported last point for range*/
373 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
374 rte_flow_error_set(error, EINVAL,
375 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
376 item, "Not supported last point for range");
381 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
382 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
385 * Only support src & dst ports, tcp flags,
386 * others should be masked.
388 if (tcp_mask->hdr.sent_seq ||
389 tcp_mask->hdr.recv_ack ||
390 tcp_mask->hdr.data_off ||
391 tcp_mask->hdr.rx_win ||
392 tcp_mask->hdr.cksum ||
393 tcp_mask->hdr.tcp_urp) {
395 sizeof(struct rte_eth_ntuple_filter));
396 rte_flow_error_set(error, EINVAL,
397 RTE_FLOW_ERROR_TYPE_ITEM,
398 item, "Not supported by ntuple filter");
402 filter->dst_port_mask = tcp_mask->hdr.dst_port;
403 filter->src_port_mask = tcp_mask->hdr.src_port;
404 if (tcp_mask->hdr.tcp_flags == 0xFF) {
405 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
406 } else if (!tcp_mask->hdr.tcp_flags) {
407 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
409 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
410 rte_flow_error_set(error, EINVAL,
411 RTE_FLOW_ERROR_TYPE_ITEM,
412 item, "Not supported by ntuple filter");
416 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
417 filter->dst_port = tcp_spec->hdr.dst_port;
418 filter->src_port = tcp_spec->hdr.src_port;
419 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
421 udp_mask = (const struct rte_flow_item_udp *)item->mask;
424 * Only support src & dst ports,
425 * others should be masked.
427 if (udp_mask->hdr.dgram_len ||
428 udp_mask->hdr.dgram_cksum) {
430 sizeof(struct rte_eth_ntuple_filter));
431 rte_flow_error_set(error, EINVAL,
432 RTE_FLOW_ERROR_TYPE_ITEM,
433 item, "Not supported by ntuple filter");
437 filter->dst_port_mask = udp_mask->hdr.dst_port;
438 filter->src_port_mask = udp_mask->hdr.src_port;
440 udp_spec = (const struct rte_flow_item_udp *)item->spec;
441 filter->dst_port = udp_spec->hdr.dst_port;
442 filter->src_port = udp_spec->hdr.src_port;
445 /* check if the next not void item is END */
447 NEXT_ITEM_OF_PATTERN(item, pattern, index);
448 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
449 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
450 rte_flow_error_set(error, EINVAL,
451 RTE_FLOW_ERROR_TYPE_ITEM,
452 item, "Not supported by ntuple filter");
460 * n-tuple only supports forwarding,
461 * check if the first not void action is QUEUE.
463 NEXT_ITEM_OF_ACTION(act, actions, index);
464 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
465 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
466 rte_flow_error_set(error, EINVAL,
467 RTE_FLOW_ERROR_TYPE_ACTION,
468 item, "Not supported action.");
472 ((const struct rte_flow_action_queue *)act->conf)->index;
474 /* check if the next not void item is END */
476 NEXT_ITEM_OF_ACTION(act, actions, index);
477 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
478 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
479 rte_flow_error_set(error, EINVAL,
480 RTE_FLOW_ERROR_TYPE_ACTION,
481 act, "Not supported action.");
486 /* must be input direction */
487 if (!attr->ingress) {
488 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
489 rte_flow_error_set(error, EINVAL,
490 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
491 attr, "Only support ingress.");
497 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
498 rte_flow_error_set(error, EINVAL,
499 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
500 attr, "Not support egress.");
504 if (attr->priority > 0xFFFF) {
505 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
506 rte_flow_error_set(error, EINVAL,
507 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
508 attr, "Error priority.");
511 filter->priority = (uint16_t)attr->priority;
512 if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
513 attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
514 filter->priority = 1;
519 /* a specific function for ixgbe because the flags is specific */
521 ixgbe_parse_ntuple_filter(const struct rte_flow_attr *attr,
522 const struct rte_flow_item pattern[],
523 const struct rte_flow_action actions[],
524 struct rte_eth_ntuple_filter *filter,
525 struct rte_flow_error *error)
529 ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
534 /* Ixgbe doesn't support tcp flags. */
535 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
536 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
537 rte_flow_error_set(error, EINVAL,
538 RTE_FLOW_ERROR_TYPE_ITEM,
539 NULL, "Not supported by ntuple filter");
543 /* Ixgbe doesn't support many priorities. */
544 if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
545 filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
546 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
547 rte_flow_error_set(error, EINVAL,
548 RTE_FLOW_ERROR_TYPE_ITEM,
549 NULL, "Priority not supported by ntuple filter");
553 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
554 filter->priority > IXGBE_5TUPLE_MAX_PRI ||
555 filter->priority < IXGBE_5TUPLE_MIN_PRI)
558 /* fixed value for ixgbe */
559 filter->flags = RTE_5TUPLE_FLAGS;
564 * Parse the rule to see if it is a ethertype rule.
565 * And get the ethertype filter info BTW.
567 * The first not void item can be ETH.
568 * The next not void item must be END.
570 * The first not void action should be QUEUE.
571 * The next not void action should be END.
574 * ETH type 0x0807 0xFFFF
576 * other members in mask and spec should set to 0x00.
577 * item->last should be NULL.
580 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
581 const struct rte_flow_item *pattern,
582 const struct rte_flow_action *actions,
583 struct rte_eth_ethertype_filter *filter,
584 struct rte_flow_error *error)
586 const struct rte_flow_item *item;
587 const struct rte_flow_action *act;
588 const struct rte_flow_item_eth *eth_spec;
589 const struct rte_flow_item_eth *eth_mask;
590 const struct rte_flow_action_queue *act_q;
594 rte_flow_error_set(error, EINVAL,
595 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
596 NULL, "NULL pattern.");
601 rte_flow_error_set(error, EINVAL,
602 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
603 NULL, "NULL action.");
608 rte_flow_error_set(error, EINVAL,
609 RTE_FLOW_ERROR_TYPE_ATTR,
610 NULL, "NULL attribute.");
617 /* The first non-void item should be MAC. */
618 item = pattern + index;
619 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
621 item = pattern + index;
623 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
624 rte_flow_error_set(error, EINVAL,
625 RTE_FLOW_ERROR_TYPE_ITEM,
626 item, "Not supported by ethertype filter");
630 /*Not supported last point for range*/
632 rte_flow_error_set(error, EINVAL,
633 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
634 item, "Not supported last point for range");
638 /* Get the MAC info. */
639 if (!item->spec || !item->mask) {
640 rte_flow_error_set(error, EINVAL,
641 RTE_FLOW_ERROR_TYPE_ITEM,
642 item, "Not supported by ethertype filter");
646 eth_spec = (const struct rte_flow_item_eth *)item->spec;
647 eth_mask = (const struct rte_flow_item_eth *)item->mask;
649 /* Mask bits of source MAC address must be full of 0.
650 * Mask bits of destination MAC address must be full
653 if (!is_zero_ether_addr(ð_mask->src) ||
654 (!is_zero_ether_addr(ð_mask->dst) &&
655 !is_broadcast_ether_addr(ð_mask->dst))) {
656 rte_flow_error_set(error, EINVAL,
657 RTE_FLOW_ERROR_TYPE_ITEM,
658 item, "Invalid ether address mask");
662 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
663 rte_flow_error_set(error, EINVAL,
664 RTE_FLOW_ERROR_TYPE_ITEM,
665 item, "Invalid ethertype mask");
669 /* If mask bits of destination MAC address
670 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
672 if (is_broadcast_ether_addr(ð_mask->dst)) {
673 filter->mac_addr = eth_spec->dst;
674 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
676 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
678 filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
680 /* Check if the next non-void item is END. */
682 item = pattern + index;
683 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
685 item = pattern + index;
687 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
688 rte_flow_error_set(error, EINVAL,
689 RTE_FLOW_ERROR_TYPE_ITEM,
690 item, "Not supported by ethertype filter.");
697 /* Check if the first non-void action is QUEUE or DROP. */
698 act = actions + index;
699 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
701 act = actions + index;
703 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
704 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
705 rte_flow_error_set(error, EINVAL,
706 RTE_FLOW_ERROR_TYPE_ACTION,
707 act, "Not supported action.");
711 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
712 act_q = (const struct rte_flow_action_queue *)act->conf;
713 filter->queue = act_q->index;
715 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
718 /* Check if the next non-void item is END */
720 act = actions + index;
721 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
723 act = actions + index;
725 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
726 rte_flow_error_set(error, EINVAL,
727 RTE_FLOW_ERROR_TYPE_ACTION,
728 act, "Not supported action.");
733 /* Must be input direction */
734 if (!attr->ingress) {
735 rte_flow_error_set(error, EINVAL,
736 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
737 attr, "Only support ingress.");
743 rte_flow_error_set(error, EINVAL,
744 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
745 attr, "Not support egress.");
750 if (attr->priority) {
751 rte_flow_error_set(error, EINVAL,
752 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
753 attr, "Not support priority.");
759 rte_flow_error_set(error, EINVAL,
760 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
761 attr, "Not support group.");
769 ixgbe_parse_ethertype_filter(const struct rte_flow_attr *attr,
770 const struct rte_flow_item pattern[],
771 const struct rte_flow_action actions[],
772 struct rte_eth_ethertype_filter *filter,
773 struct rte_flow_error *error)
777 ret = cons_parse_ethertype_filter(attr, pattern,
778 actions, filter, error);
783 /* Ixgbe doesn't support MAC address. */
784 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
785 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
786 rte_flow_error_set(error, EINVAL,
787 RTE_FLOW_ERROR_TYPE_ITEM,
788 NULL, "Not supported by ethertype filter");
792 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
793 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
794 rte_flow_error_set(error, EINVAL,
795 RTE_FLOW_ERROR_TYPE_ITEM,
796 NULL, "queue index much too big");
800 if (filter->ether_type == ETHER_TYPE_IPv4 ||
801 filter->ether_type == ETHER_TYPE_IPv6) {
802 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
803 rte_flow_error_set(error, EINVAL,
804 RTE_FLOW_ERROR_TYPE_ITEM,
805 NULL, "IPv4/IPv6 not supported by ethertype filter");
809 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
810 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
811 rte_flow_error_set(error, EINVAL,
812 RTE_FLOW_ERROR_TYPE_ITEM,
813 NULL, "mac compare is unsupported");
817 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
818 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
819 rte_flow_error_set(error, EINVAL,
820 RTE_FLOW_ERROR_TYPE_ITEM,
821 NULL, "drop option is unsupported");
829 * Parse the rule to see if it is a TCP SYN rule.
830 * And get the TCP SYN filter info BTW.
832 * The first not void item must be ETH.
833 * The second not void item must be IPV4 or IPV6.
834 * The third not void item must be TCP.
835 * The next not void item must be END.
837 * The first not void action should be QUEUE.
838 * The next not void action should be END.
842 * IPV4/IPV6 NULL NULL
843 * TCP tcp_flags 0x02 0xFF
845 * other members in mask and spec should set to 0x00.
846 * item->last should be NULL.
849 cons_parse_syn_filter(const struct rte_flow_attr *attr,
850 const struct rte_flow_item pattern[],
851 const struct rte_flow_action actions[],
852 struct rte_eth_syn_filter *filter,
853 struct rte_flow_error *error)
855 const struct rte_flow_item *item;
856 const struct rte_flow_action *act;
857 const struct rte_flow_item_tcp *tcp_spec;
858 const struct rte_flow_item_tcp *tcp_mask;
859 const struct rte_flow_action_queue *act_q;
863 rte_flow_error_set(error, EINVAL,
864 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
865 NULL, "NULL pattern.");
870 rte_flow_error_set(error, EINVAL,
871 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
872 NULL, "NULL action.");
877 rte_flow_error_set(error, EINVAL,
878 RTE_FLOW_ERROR_TYPE_ATTR,
879 NULL, "NULL attribute.");
886 /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
887 NEXT_ITEM_OF_PATTERN(item, pattern, index);
888 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
889 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
890 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
891 item->type != RTE_FLOW_ITEM_TYPE_TCP) {
892 rte_flow_error_set(error, EINVAL,
893 RTE_FLOW_ERROR_TYPE_ITEM,
894 item, "Not supported by syn filter");
897 /*Not supported last point for range*/
899 rte_flow_error_set(error, EINVAL,
900 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
901 item, "Not supported last point for range");
906 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
907 /* if the item is MAC, the content should be NULL */
908 if (item->spec || item->mask) {
909 rte_flow_error_set(error, EINVAL,
910 RTE_FLOW_ERROR_TYPE_ITEM,
911 item, "Invalid SYN address mask");
915 /* check if the next not void item is IPv4 or IPv6 */
917 NEXT_ITEM_OF_PATTERN(item, pattern, index);
918 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
919 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
920 rte_flow_error_set(error, EINVAL,
921 RTE_FLOW_ERROR_TYPE_ITEM,
922 item, "Not supported by syn filter");
928 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
929 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
930 /* if the item is IP, the content should be NULL */
931 if (item->spec || item->mask) {
932 rte_flow_error_set(error, EINVAL,
933 RTE_FLOW_ERROR_TYPE_ITEM,
934 item, "Invalid SYN mask");
938 /* check if the next not void item is TCP */
940 NEXT_ITEM_OF_PATTERN(item, pattern, index);
941 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
942 rte_flow_error_set(error, EINVAL,
943 RTE_FLOW_ERROR_TYPE_ITEM,
944 item, "Not supported by syn filter");
949 /* Get the TCP info. Only support SYN. */
950 if (!item->spec || !item->mask) {
951 rte_flow_error_set(error, EINVAL,
952 RTE_FLOW_ERROR_TYPE_ITEM,
953 item, "Invalid SYN mask");
956 /*Not supported last point for range*/
958 rte_flow_error_set(error, EINVAL,
959 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
960 item, "Not supported last point for range");
964 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
965 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
966 if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
967 tcp_mask->hdr.src_port ||
968 tcp_mask->hdr.dst_port ||
969 tcp_mask->hdr.sent_seq ||
970 tcp_mask->hdr.recv_ack ||
971 tcp_mask->hdr.data_off ||
972 tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
973 tcp_mask->hdr.rx_win ||
974 tcp_mask->hdr.cksum ||
975 tcp_mask->hdr.tcp_urp) {
976 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
977 rte_flow_error_set(error, EINVAL,
978 RTE_FLOW_ERROR_TYPE_ITEM,
979 item, "Not supported by syn filter");
983 /* check if the next not void item is END */
985 NEXT_ITEM_OF_PATTERN(item, pattern, index);
986 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
987 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
988 rte_flow_error_set(error, EINVAL,
989 RTE_FLOW_ERROR_TYPE_ITEM,
990 item, "Not supported by syn filter");
997 /* check if the first not void action is QUEUE. */
998 NEXT_ITEM_OF_ACTION(act, actions, index);
999 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1000 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1001 rte_flow_error_set(error, EINVAL,
1002 RTE_FLOW_ERROR_TYPE_ACTION,
1003 act, "Not supported action.");
1007 act_q = (const struct rte_flow_action_queue *)act->conf;
1008 filter->queue = act_q->index;
1009 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
1010 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1011 rte_flow_error_set(error, EINVAL,
1012 RTE_FLOW_ERROR_TYPE_ACTION,
1013 act, "Not supported action.");
1017 /* check if the next not void item is END */
1019 NEXT_ITEM_OF_ACTION(act, actions, index);
1020 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1021 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1022 rte_flow_error_set(error, EINVAL,
1023 RTE_FLOW_ERROR_TYPE_ACTION,
1024 act, "Not supported action.");
1029 /* must be input direction */
1030 if (!attr->ingress) {
1031 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1032 rte_flow_error_set(error, EINVAL,
1033 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1034 attr, "Only support ingress.");
1040 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1041 rte_flow_error_set(error, EINVAL,
1042 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1043 attr, "Not support egress.");
1047 /* Support 2 priorities, the lowest or highest. */
1048 if (!attr->priority) {
1049 filter->hig_pri = 0;
1050 } else if (attr->priority == (uint32_t)~0U) {
1051 filter->hig_pri = 1;
1053 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1054 rte_flow_error_set(error, EINVAL,
1055 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1056 attr, "Not support priority.");
1064 ixgbe_parse_syn_filter(const struct rte_flow_attr *attr,
1065 const struct rte_flow_item pattern[],
1066 const struct rte_flow_action actions[],
1067 struct rte_eth_syn_filter *filter,
1068 struct rte_flow_error *error)
1072 ret = cons_parse_syn_filter(attr, pattern,
1073 actions, filter, error);
1082 * Parse the rule to see if it is a L2 tunnel rule.
1083 * And get the L2 tunnel filter info BTW.
1084 * Only support E-tag now.
1086 * The first not void item can be E_TAG.
1087 * The next not void item must be END.
1089 * The first not void action should be QUEUE.
1090 * The next not void action should be END.
1094 e_cid_base 0x309 0xFFF
1096 * other members in mask and spec should set to 0x00.
1097 * item->last should be NULL.
1100 cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
1101 const struct rte_flow_item pattern[],
1102 const struct rte_flow_action actions[],
1103 struct rte_eth_l2_tunnel_conf *filter,
1104 struct rte_flow_error *error)
1106 const struct rte_flow_item *item;
1107 const struct rte_flow_item_e_tag *e_tag_spec;
1108 const struct rte_flow_item_e_tag *e_tag_mask;
1109 const struct rte_flow_action *act;
1110 const struct rte_flow_action_queue *act_q;
1114 rte_flow_error_set(error, EINVAL,
1115 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1116 NULL, "NULL pattern.");
1121 rte_flow_error_set(error, EINVAL,
1122 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1123 NULL, "NULL action.");
1128 rte_flow_error_set(error, EINVAL,
1129 RTE_FLOW_ERROR_TYPE_ATTR,
1130 NULL, "NULL attribute.");
1136 /* The first not void item should be e-tag. */
1137 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1138 if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1139 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1140 rte_flow_error_set(error, EINVAL,
1141 RTE_FLOW_ERROR_TYPE_ITEM,
1142 item, "Not supported by L2 tunnel filter");
1146 if (!item->spec || !item->mask) {
1147 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1148 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1149 item, "Not supported by L2 tunnel filter");
1153 /*Not supported last point for range*/
1155 rte_flow_error_set(error, EINVAL,
1156 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1157 item, "Not supported last point for range");
1161 e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
1162 e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
1164 /* Only care about GRP and E cid base. */
1165 if (e_tag_mask->epcp_edei_in_ecid_b ||
1166 e_tag_mask->in_ecid_e ||
1167 e_tag_mask->ecid_e ||
1168 e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1169 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1170 rte_flow_error_set(error, EINVAL,
1171 RTE_FLOW_ERROR_TYPE_ITEM,
1172 item, "Not supported by L2 tunnel filter");
1176 filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1178 * grp and e_cid_base are bit fields and only use 14 bits.
1179 * e-tag id is taken as little endian by HW.
1181 filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1183 /* check if the next not void item is END */
1185 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1186 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1187 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1188 rte_flow_error_set(error, EINVAL,
1189 RTE_FLOW_ERROR_TYPE_ITEM,
1190 item, "Not supported by L2 tunnel filter");
1195 /* must be input direction */
1196 if (!attr->ingress) {
1197 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1198 rte_flow_error_set(error, EINVAL,
1199 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1200 attr, "Only support ingress.");
1206 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1207 rte_flow_error_set(error, EINVAL,
1208 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1209 attr, "Not support egress.");
1214 if (attr->priority) {
1215 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1216 rte_flow_error_set(error, EINVAL,
1217 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1218 attr, "Not support priority.");
1225 /* check if the first not void action is QUEUE. */
1226 NEXT_ITEM_OF_ACTION(act, actions, index);
1227 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1228 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1229 rte_flow_error_set(error, EINVAL,
1230 RTE_FLOW_ERROR_TYPE_ACTION,
1231 act, "Not supported action.");
1235 act_q = (const struct rte_flow_action_queue *)act->conf;
1236 filter->pool = act_q->index;
1238 /* check if the next not void item is END */
1240 NEXT_ITEM_OF_ACTION(act, actions, index);
1241 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1242 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1243 rte_flow_error_set(error, EINVAL,
1244 RTE_FLOW_ERROR_TYPE_ACTION,
1245 act, "Not supported action.");
1253 ixgbe_validate_l2_tn_filter(struct rte_eth_dev *dev,
1254 const struct rte_flow_attr *attr,
1255 const struct rte_flow_item pattern[],
1256 const struct rte_flow_action actions[],
1257 struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1258 struct rte_flow_error *error)
1261 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1263 ret = cons_parse_l2_tn_filter(attr, pattern,
1264 actions, l2_tn_filter, error);
1266 if (hw->mac.type != ixgbe_mac_X550 &&
1267 hw->mac.type != ixgbe_mac_X550EM_x &&
1268 hw->mac.type != ixgbe_mac_X550EM_a) {
1269 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1270 rte_flow_error_set(error, EINVAL,
1271 RTE_FLOW_ERROR_TYPE_ITEM,
1272 NULL, "Not supported by L2 tunnel filter");
1279 /* Parse to get the attr and action info of flow director rule. */
1281 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1282 const struct rte_flow_action actions[],
1283 struct ixgbe_fdir_rule *rule,
1284 struct rte_flow_error *error)
1286 const struct rte_flow_action *act;
1287 const struct rte_flow_action_queue *act_q;
1288 const struct rte_flow_action_mark *mark;
1292 /* must be input direction */
1293 if (!attr->ingress) {
1294 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1295 rte_flow_error_set(error, EINVAL,
1296 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1297 attr, "Only support ingress.");
1303 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1304 rte_flow_error_set(error, EINVAL,
1305 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1306 attr, "Not support egress.");
1311 if (attr->priority) {
1312 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1313 rte_flow_error_set(error, EINVAL,
1314 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1315 attr, "Not support priority.");
1322 /* check if the first not void action is QUEUE or DROP. */
1323 NEXT_ITEM_OF_ACTION(act, actions, index);
1324 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1325 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1326 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1327 rte_flow_error_set(error, EINVAL,
1328 RTE_FLOW_ERROR_TYPE_ACTION,
1329 act, "Not supported action.");
1333 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1334 act_q = (const struct rte_flow_action_queue *)act->conf;
1335 rule->queue = act_q->index;
1337 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1340 /* check if the next not void item is MARK */
1342 NEXT_ITEM_OF_ACTION(act, actions, index);
1343 if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1344 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1345 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1346 rte_flow_error_set(error, EINVAL,
1347 RTE_FLOW_ERROR_TYPE_ACTION,
1348 act, "Not supported action.");
1354 if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1355 mark = (const struct rte_flow_action_mark *)act->conf;
1356 rule->soft_id = mark->id;
1358 NEXT_ITEM_OF_ACTION(act, actions, index);
1361 /* check if the next not void item is END */
1362 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1363 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1364 rte_flow_error_set(error, EINVAL,
1365 RTE_FLOW_ERROR_TYPE_ACTION,
1366 act, "Not supported action.");
1374 * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1375 * And get the flow director filter info BTW.
1376 * UDP/TCP/SCTP PATTERN:
1377 * The first not void item can be ETH or IPV4.
1378 * The second not void item must be IPV4 if the first one is ETH.
1379 * The third not void item must be UDP or TCP or SCTP.
1380 * The next not void item must be END.
1382 * The first not void item must be ETH.
1383 * The second not void item must be MAC VLAN.
1384 * The next not void item must be END.
1386 * The first not void action should be QUEUE or DROP.
1387 * The second not void optional action should be MARK,
1388 * mark_id is a uint32_t number.
1389 * The next not void action should be END.
1390 * UDP/TCP/SCTP pattern example:
1393 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
1394 * dst_addr 192.167.3.50 0xFFFFFFFF
1395 * UDP/TCP/SCTP src_port 80 0xFFFF
1396 * dst_port 80 0xFFFF
1398 * MAC VLAN pattern example:
1401 {0xAC, 0x7B, 0xA1, {0xFF, 0xFF, 0xFF,
1402 0x2C, 0x6D, 0x36} 0xFF, 0xFF, 0xFF}
1403 * MAC VLAN tci 0x2016 0xFFFF
1404 * tpid 0x8100 0xFFFF
1406 * Other members in mask and spec should set to 0x00.
1407 * Item->last should be NULL.
1410 ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
1411 const struct rte_flow_item pattern[],
1412 const struct rte_flow_action actions[],
1413 struct ixgbe_fdir_rule *rule,
1414 struct rte_flow_error *error)
1416 const struct rte_flow_item *item;
1417 const struct rte_flow_item_eth *eth_spec;
1418 const struct rte_flow_item_eth *eth_mask;
1419 const struct rte_flow_item_ipv4 *ipv4_spec;
1420 const struct rte_flow_item_ipv4 *ipv4_mask;
1421 const struct rte_flow_item_tcp *tcp_spec;
1422 const struct rte_flow_item_tcp *tcp_mask;
1423 const struct rte_flow_item_udp *udp_spec;
1424 const struct rte_flow_item_udp *udp_mask;
1425 const struct rte_flow_item_sctp *sctp_spec;
1426 const struct rte_flow_item_sctp *sctp_mask;
1427 const struct rte_flow_item_vlan *vlan_spec;
1428 const struct rte_flow_item_vlan *vlan_mask;
1433 rte_flow_error_set(error, EINVAL,
1434 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1435 NULL, "NULL pattern.");
1440 rte_flow_error_set(error, EINVAL,
1441 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1442 NULL, "NULL action.");
1447 rte_flow_error_set(error, EINVAL,
1448 RTE_FLOW_ERROR_TYPE_ATTR,
1449 NULL, "NULL attribute.");
1454 * Some fields may not be provided. Set spec to 0 and mask to default
1455 * value. So, we need not do anything for the not provided fields later.
1457 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1458 memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1459 rule->mask.vlan_tci_mask = 0;
1465 * The first not void item should be
1466 * MAC or IPv4 or TCP or UDP or SCTP.
1468 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1469 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1470 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1471 item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1472 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1473 item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1474 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1475 rte_flow_error_set(error, EINVAL,
1476 RTE_FLOW_ERROR_TYPE_ITEM,
1477 item, "Not supported by fdir filter");
1481 rule->mode = RTE_FDIR_MODE_PERFECT;
1483 /*Not supported last point for range*/
1485 rte_flow_error_set(error, EINVAL,
1486 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1487 item, "Not supported last point for range");
1491 /* Get the MAC info. */
1492 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1494 * Only support vlan and dst MAC address,
1495 * others should be masked.
1497 if (item->spec && !item->mask) {
1498 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1499 rte_flow_error_set(error, EINVAL,
1500 RTE_FLOW_ERROR_TYPE_ITEM,
1501 item, "Not supported by fdir filter");
1506 rule->b_spec = TRUE;
1507 eth_spec = (const struct rte_flow_item_eth *)item->spec;
1509 /* Get the dst MAC. */
1510 for (j = 0; j < ETHER_ADDR_LEN; j++) {
1511 rule->ixgbe_fdir.formatted.inner_mac[j] =
1512 eth_spec->dst.addr_bytes[j];
1518 /* If ethernet has meaning, it means MAC VLAN mode. */
1519 rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1521 rule->b_mask = TRUE;
1522 eth_mask = (const struct rte_flow_item_eth *)item->mask;
1524 /* Ether type should be masked. */
1525 if (eth_mask->type) {
1526 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1527 rte_flow_error_set(error, EINVAL,
1528 RTE_FLOW_ERROR_TYPE_ITEM,
1529 item, "Not supported by fdir filter");
1534 * src MAC address must be masked,
1535 * and don't support dst MAC address mask.
1537 for (j = 0; j < ETHER_ADDR_LEN; j++) {
1538 if (eth_mask->src.addr_bytes[j] ||
1539 eth_mask->dst.addr_bytes[j] != 0xFF) {
1541 sizeof(struct ixgbe_fdir_rule));
1542 rte_flow_error_set(error, EINVAL,
1543 RTE_FLOW_ERROR_TYPE_ITEM,
1544 item, "Not supported by fdir filter");
1549 /* When no VLAN, considered as full mask. */
1550 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1552 /*** If both spec and mask are item,
1553 * it means don't care about ETH.
1558 * Check if the next not void item is vlan or ipv4.
1559 * IPv6 is not supported.
1562 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1563 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1564 if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1565 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1566 rte_flow_error_set(error, EINVAL,
1567 RTE_FLOW_ERROR_TYPE_ITEM,
1568 item, "Not supported by fdir filter");
1572 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
1573 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1574 rte_flow_error_set(error, EINVAL,
1575 RTE_FLOW_ERROR_TYPE_ITEM,
1576 item, "Not supported by fdir filter");
1582 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1583 if (!(item->spec && item->mask)) {
1584 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1585 rte_flow_error_set(error, EINVAL,
1586 RTE_FLOW_ERROR_TYPE_ITEM,
1587 item, "Not supported by fdir filter");
1591 /*Not supported last point for range*/
1593 rte_flow_error_set(error, EINVAL,
1594 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1595 item, "Not supported last point for range");
1599 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
1600 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
1602 if (vlan_spec->tpid != rte_cpu_to_be_16(ETHER_TYPE_VLAN)) {
1603 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1604 rte_flow_error_set(error, EINVAL,
1605 RTE_FLOW_ERROR_TYPE_ITEM,
1606 item, "Not supported by fdir filter");
1610 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1612 if (vlan_mask->tpid != (uint16_t)~0U) {
1613 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1614 rte_flow_error_set(error, EINVAL,
1615 RTE_FLOW_ERROR_TYPE_ITEM,
1616 item, "Not supported by fdir filter");
1619 rule->mask.vlan_tci_mask = vlan_mask->tci;
1620 /* More than one tags are not supported. */
1623 * Check if the next not void item is not vlan.
1626 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1627 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1628 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1629 rte_flow_error_set(error, EINVAL,
1630 RTE_FLOW_ERROR_TYPE_ITEM,
1631 item, "Not supported by fdir filter");
1633 } else if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1634 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1635 rte_flow_error_set(error, EINVAL,
1636 RTE_FLOW_ERROR_TYPE_ITEM,
1637 item, "Not supported by fdir filter");
1642 /* Get the IP info. */
1643 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1645 * Set the flow type even if there's no content
1646 * as we must have a flow type.
1648 rule->ixgbe_fdir.formatted.flow_type =
1649 IXGBE_ATR_FLOW_TYPE_IPV4;
1650 /*Not supported last point for range*/
1652 rte_flow_error_set(error, EINVAL,
1653 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1654 item, "Not supported last point for range");
1658 * Only care about src & dst addresses,
1659 * others should be masked.
1662 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1663 rte_flow_error_set(error, EINVAL,
1664 RTE_FLOW_ERROR_TYPE_ITEM,
1665 item, "Not supported by fdir filter");
1668 rule->b_mask = TRUE;
1670 (const struct rte_flow_item_ipv4 *)item->mask;
1671 if (ipv4_mask->hdr.version_ihl ||
1672 ipv4_mask->hdr.type_of_service ||
1673 ipv4_mask->hdr.total_length ||
1674 ipv4_mask->hdr.packet_id ||
1675 ipv4_mask->hdr.fragment_offset ||
1676 ipv4_mask->hdr.time_to_live ||
1677 ipv4_mask->hdr.next_proto_id ||
1678 ipv4_mask->hdr.hdr_checksum) {
1679 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1680 rte_flow_error_set(error, EINVAL,
1681 RTE_FLOW_ERROR_TYPE_ITEM,
1682 item, "Not supported by fdir filter");
1685 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1686 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1689 rule->b_spec = TRUE;
1691 (const struct rte_flow_item_ipv4 *)item->spec;
1692 rule->ixgbe_fdir.formatted.dst_ip[0] =
1693 ipv4_spec->hdr.dst_addr;
1694 rule->ixgbe_fdir.formatted.src_ip[0] =
1695 ipv4_spec->hdr.src_addr;
1699 * Check if the next not void item is
1700 * TCP or UDP or SCTP or END.
1703 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1704 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1705 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1706 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1707 item->type != RTE_FLOW_ITEM_TYPE_END) {
1708 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1709 rte_flow_error_set(error, EINVAL,
1710 RTE_FLOW_ERROR_TYPE_ITEM,
1711 item, "Not supported by fdir filter");
1716 /* Get the TCP info. */
1717 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1719 * Set the flow type even if there's no content
1720 * as we must have a flow type.
1722 rule->ixgbe_fdir.formatted.flow_type =
1723 IXGBE_ATR_FLOW_TYPE_TCPV4;
1724 /*Not supported last point for range*/
1726 rte_flow_error_set(error, EINVAL,
1727 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1728 item, "Not supported last point for range");
1732 * Only care about src & dst ports,
1733 * others should be masked.
1736 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1737 rte_flow_error_set(error, EINVAL,
1738 RTE_FLOW_ERROR_TYPE_ITEM,
1739 item, "Not supported by fdir filter");
1742 rule->b_mask = TRUE;
1743 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1744 if (tcp_mask->hdr.sent_seq ||
1745 tcp_mask->hdr.recv_ack ||
1746 tcp_mask->hdr.data_off ||
1747 tcp_mask->hdr.tcp_flags ||
1748 tcp_mask->hdr.rx_win ||
1749 tcp_mask->hdr.cksum ||
1750 tcp_mask->hdr.tcp_urp) {
1751 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1752 rte_flow_error_set(error, EINVAL,
1753 RTE_FLOW_ERROR_TYPE_ITEM,
1754 item, "Not supported by fdir filter");
1757 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1758 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1761 rule->b_spec = TRUE;
1762 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1763 rule->ixgbe_fdir.formatted.src_port =
1764 tcp_spec->hdr.src_port;
1765 rule->ixgbe_fdir.formatted.dst_port =
1766 tcp_spec->hdr.dst_port;
1770 /* Get the UDP info */
1771 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1773 * Set the flow type even if there's no content
1774 * as we must have a flow type.
1776 rule->ixgbe_fdir.formatted.flow_type =
1777 IXGBE_ATR_FLOW_TYPE_UDPV4;
1778 /*Not supported last point for range*/
1780 rte_flow_error_set(error, EINVAL,
1781 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1782 item, "Not supported last point for range");
1786 * Only care about src & dst ports,
1787 * others should be masked.
1790 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1791 rte_flow_error_set(error, EINVAL,
1792 RTE_FLOW_ERROR_TYPE_ITEM,
1793 item, "Not supported by fdir filter");
1796 rule->b_mask = TRUE;
1797 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1798 if (udp_mask->hdr.dgram_len ||
1799 udp_mask->hdr.dgram_cksum) {
1800 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1801 rte_flow_error_set(error, EINVAL,
1802 RTE_FLOW_ERROR_TYPE_ITEM,
1803 item, "Not supported by fdir filter");
1806 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1807 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1810 rule->b_spec = TRUE;
1811 udp_spec = (const struct rte_flow_item_udp *)item->spec;
1812 rule->ixgbe_fdir.formatted.src_port =
1813 udp_spec->hdr.src_port;
1814 rule->ixgbe_fdir.formatted.dst_port =
1815 udp_spec->hdr.dst_port;
1819 /* Get the SCTP info */
1820 if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
1822 * Set the flow type even if there's no content
1823 * as we must have a flow type.
1825 rule->ixgbe_fdir.formatted.flow_type =
1826 IXGBE_ATR_FLOW_TYPE_SCTPV4;
1827 /*Not supported last point for range*/
1829 rte_flow_error_set(error, EINVAL,
1830 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1831 item, "Not supported last point for range");
1835 * Only care about src & dst ports,
1836 * others should be masked.
1839 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1840 rte_flow_error_set(error, EINVAL,
1841 RTE_FLOW_ERROR_TYPE_ITEM,
1842 item, "Not supported by fdir filter");
1845 rule->b_mask = TRUE;
1847 (const struct rte_flow_item_sctp *)item->mask;
1848 if (sctp_mask->hdr.tag ||
1849 sctp_mask->hdr.cksum) {
1850 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1851 rte_flow_error_set(error, EINVAL,
1852 RTE_FLOW_ERROR_TYPE_ITEM,
1853 item, "Not supported by fdir filter");
1856 rule->mask.src_port_mask = sctp_mask->hdr.src_port;
1857 rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
1860 rule->b_spec = TRUE;
1862 (const struct rte_flow_item_sctp *)item->spec;
1863 rule->ixgbe_fdir.formatted.src_port =
1864 sctp_spec->hdr.src_port;
1865 rule->ixgbe_fdir.formatted.dst_port =
1866 sctp_spec->hdr.dst_port;
1870 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1871 /* check if the next not void item is END */
1873 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1874 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1875 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1876 rte_flow_error_set(error, EINVAL,
1877 RTE_FLOW_ERROR_TYPE_ITEM,
1878 item, "Not supported by fdir filter");
1883 return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
1886 #define NVGRE_PROTOCOL 0x6558
1889 * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
1890 * And get the flow director filter info BTW.
1892 * The first not void item must be ETH.
1893 * The second not void item must be IPV4/ IPV6.
1894 * The third not void item must be NVGRE.
1895 * The next not void item must be END.
1897 * The first not void item must be ETH.
1898 * The second not void item must be IPV4/ IPV6.
1899 * The third not void item must be NVGRE.
1900 * The next not void item must be END.
1902 * The first not void action should be QUEUE or DROP.
1903 * The second not void optional action should be MARK,
1904 * mark_id is a uint32_t number.
1905 * The next not void action should be END.
1906 * VxLAN pattern example:
1909 * IPV4/IPV6 NULL NULL
1911 * VxLAN vni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
1913 * NEGRV pattern example:
1916 * IPV4/IPV6 NULL NULL
1917 * NVGRE protocol 0x6558 0xFFFF
1918 * tni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
1920 * other members in mask and spec should set to 0x00.
1921 * item->last should be NULL.
1924 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
1925 const struct rte_flow_item pattern[],
1926 const struct rte_flow_action actions[],
1927 struct ixgbe_fdir_rule *rule,
1928 struct rte_flow_error *error)
1930 const struct rte_flow_item *item;
1931 const struct rte_flow_item_vxlan *vxlan_spec;
1932 const struct rte_flow_item_vxlan *vxlan_mask;
1933 const struct rte_flow_item_nvgre *nvgre_spec;
1934 const struct rte_flow_item_nvgre *nvgre_mask;
1935 const struct rte_flow_item_eth *eth_spec;
1936 const struct rte_flow_item_eth *eth_mask;
1937 const struct rte_flow_item_vlan *vlan_spec;
1938 const struct rte_flow_item_vlan *vlan_mask;
1942 rte_flow_error_set(error, EINVAL,
1943 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1944 NULL, "NULL pattern.");
1949 rte_flow_error_set(error, EINVAL,
1950 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1951 NULL, "NULL action.");
1956 rte_flow_error_set(error, EINVAL,
1957 RTE_FLOW_ERROR_TYPE_ATTR,
1958 NULL, "NULL attribute.");
1963 * Some fields may not be provided. Set spec to 0 and mask to default
1964 * value. So, we need not do anything for the not provided fields later.
1966 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1967 memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1968 rule->mask.vlan_tci_mask = 0;
1974 * The first not void item should be
1975 * MAC or IPv4 or IPv6 or UDP or VxLAN.
1977 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1978 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1979 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1980 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1981 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1982 item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
1983 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
1984 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1985 rte_flow_error_set(error, EINVAL,
1986 RTE_FLOW_ERROR_TYPE_ITEM,
1987 item, "Not supported by fdir filter");
1991 rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
1994 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1995 /* Only used to describe the protocol stack. */
1996 if (item->spec || item->mask) {
1997 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1998 rte_flow_error_set(error, EINVAL,
1999 RTE_FLOW_ERROR_TYPE_ITEM,
2000 item, "Not supported by fdir filter");
2003 /*Not supported last point for range*/
2005 rte_flow_error_set(error, EINVAL,
2006 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2007 item, "Not supported last point for range");
2011 /* Check if the next not void item is IPv4 or IPv6. */
2013 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2014 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2015 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2016 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2017 rte_flow_error_set(error, EINVAL,
2018 RTE_FLOW_ERROR_TYPE_ITEM,
2019 item, "Not supported by fdir filter");
2025 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2026 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2027 /* Only used to describe the protocol stack. */
2028 if (item->spec || item->mask) {
2029 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2030 rte_flow_error_set(error, EINVAL,
2031 RTE_FLOW_ERROR_TYPE_ITEM,
2032 item, "Not supported by fdir filter");
2035 /*Not supported last point for range*/
2037 rte_flow_error_set(error, EINVAL,
2038 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2039 item, "Not supported last point for range");
2043 /* Check if the next not void item is UDP or NVGRE. */
2045 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2046 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2047 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2048 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2049 rte_flow_error_set(error, EINVAL,
2050 RTE_FLOW_ERROR_TYPE_ITEM,
2051 item, "Not supported by fdir filter");
2057 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2058 /* Only used to describe the protocol stack. */
2059 if (item->spec || item->mask) {
2060 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2061 rte_flow_error_set(error, EINVAL,
2062 RTE_FLOW_ERROR_TYPE_ITEM,
2063 item, "Not supported by fdir filter");
2066 /*Not supported last point for range*/
2068 rte_flow_error_set(error, EINVAL,
2069 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2070 item, "Not supported last point for range");
2074 /* Check if the next not void item is VxLAN. */
2076 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2077 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2078 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2079 rte_flow_error_set(error, EINVAL,
2080 RTE_FLOW_ERROR_TYPE_ITEM,
2081 item, "Not supported by fdir filter");
2086 /* Get the VxLAN info */
2087 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2088 rule->ixgbe_fdir.formatted.tunnel_type =
2089 RTE_FDIR_TUNNEL_TYPE_VXLAN;
2091 /* Only care about VNI, others should be masked. */
2093 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2094 rte_flow_error_set(error, EINVAL,
2095 RTE_FLOW_ERROR_TYPE_ITEM,
2096 item, "Not supported by fdir filter");
2099 /*Not supported last point for range*/
2101 rte_flow_error_set(error, EINVAL,
2102 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2103 item, "Not supported last point for range");
2106 rule->b_mask = TRUE;
2108 /* Tunnel type is always meaningful. */
2109 rule->mask.tunnel_type_mask = 1;
2112 (const struct rte_flow_item_vxlan *)item->mask;
2113 if (vxlan_mask->flags) {
2114 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2115 rte_flow_error_set(error, EINVAL,
2116 RTE_FLOW_ERROR_TYPE_ITEM,
2117 item, "Not supported by fdir filter");
2120 /* VNI must be totally masked or not. */
2121 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2122 vxlan_mask->vni[2]) &&
2123 ((vxlan_mask->vni[0] != 0xFF) ||
2124 (vxlan_mask->vni[1] != 0xFF) ||
2125 (vxlan_mask->vni[2] != 0xFF))) {
2126 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2127 rte_flow_error_set(error, EINVAL,
2128 RTE_FLOW_ERROR_TYPE_ITEM,
2129 item, "Not supported by fdir filter");
2133 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2134 RTE_DIM(vxlan_mask->vni));
2135 rule->mask.tunnel_id_mask <<= 8;
2138 rule->b_spec = TRUE;
2139 vxlan_spec = (const struct rte_flow_item_vxlan *)
2141 rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2142 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2143 rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2147 /* Get the NVGRE info */
2148 if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2149 rule->ixgbe_fdir.formatted.tunnel_type =
2150 RTE_FDIR_TUNNEL_TYPE_NVGRE;
2153 * Only care about flags0, flags1, protocol and TNI,
2154 * others should be masked.
2157 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2158 rte_flow_error_set(error, EINVAL,
2159 RTE_FLOW_ERROR_TYPE_ITEM,
2160 item, "Not supported by fdir filter");
2163 /*Not supported last point for range*/
2165 rte_flow_error_set(error, EINVAL,
2166 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2167 item, "Not supported last point for range");
2170 rule->b_mask = TRUE;
2172 /* Tunnel type is always meaningful. */
2173 rule->mask.tunnel_type_mask = 1;
2176 (const struct rte_flow_item_nvgre *)item->mask;
2177 if (nvgre_mask->flow_id) {
2178 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2179 rte_flow_error_set(error, EINVAL,
2180 RTE_FLOW_ERROR_TYPE_ITEM,
2181 item, "Not supported by fdir filter");
2184 if (nvgre_mask->c_k_s_rsvd0_ver !=
2185 rte_cpu_to_be_16(0x3000) ||
2186 nvgre_mask->protocol != 0xFFFF) {
2187 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2188 rte_flow_error_set(error, EINVAL,
2189 RTE_FLOW_ERROR_TYPE_ITEM,
2190 item, "Not supported by fdir filter");
2193 /* TNI must be totally masked or not. */
2194 if (nvgre_mask->tni[0] &&
2195 ((nvgre_mask->tni[0] != 0xFF) ||
2196 (nvgre_mask->tni[1] != 0xFF) ||
2197 (nvgre_mask->tni[2] != 0xFF))) {
2198 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2199 rte_flow_error_set(error, EINVAL,
2200 RTE_FLOW_ERROR_TYPE_ITEM,
2201 item, "Not supported by fdir filter");
2204 /* tni is a 24-bits bit field */
2205 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2206 RTE_DIM(nvgre_mask->tni));
2207 rule->mask.tunnel_id_mask <<= 8;
2210 rule->b_spec = TRUE;
2212 (const struct rte_flow_item_nvgre *)item->spec;
2213 if (nvgre_spec->c_k_s_rsvd0_ver !=
2214 rte_cpu_to_be_16(0x2000) ||
2215 nvgre_spec->protocol !=
2216 rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2217 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2218 rte_flow_error_set(error, EINVAL,
2219 RTE_FLOW_ERROR_TYPE_ITEM,
2220 item, "Not supported by fdir filter");
2223 /* tni is a 24-bits bit field */
2224 rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2225 nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2226 rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2230 /* check if the next not void item is MAC */
2232 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2233 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2234 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2235 rte_flow_error_set(error, EINVAL,
2236 RTE_FLOW_ERROR_TYPE_ITEM,
2237 item, "Not supported by fdir filter");
2242 * Only support vlan and dst MAC address,
2243 * others should be masked.
2247 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2248 rte_flow_error_set(error, EINVAL,
2249 RTE_FLOW_ERROR_TYPE_ITEM,
2250 item, "Not supported by fdir filter");
2253 /*Not supported last point for range*/
2255 rte_flow_error_set(error, EINVAL,
2256 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2257 item, "Not supported last point for range");
2260 rule->b_mask = TRUE;
2261 eth_mask = (const struct rte_flow_item_eth *)item->mask;
2263 /* Ether type should be masked. */
2264 if (eth_mask->type) {
2265 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2266 rte_flow_error_set(error, EINVAL,
2267 RTE_FLOW_ERROR_TYPE_ITEM,
2268 item, "Not supported by fdir filter");
2272 /* src MAC address should be masked. */
2273 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2274 if (eth_mask->src.addr_bytes[j]) {
2276 sizeof(struct ixgbe_fdir_rule));
2277 rte_flow_error_set(error, EINVAL,
2278 RTE_FLOW_ERROR_TYPE_ITEM,
2279 item, "Not supported by fdir filter");
2283 rule->mask.mac_addr_byte_mask = 0;
2284 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2285 /* It's a per byte mask. */
2286 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2287 rule->mask.mac_addr_byte_mask |= 0x1 << j;
2288 } else if (eth_mask->dst.addr_bytes[j]) {
2289 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2290 rte_flow_error_set(error, EINVAL,
2291 RTE_FLOW_ERROR_TYPE_ITEM,
2292 item, "Not supported by fdir filter");
2297 /* When no vlan, considered as full mask. */
2298 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2301 rule->b_spec = TRUE;
2302 eth_spec = (const struct rte_flow_item_eth *)item->spec;
2304 /* Get the dst MAC. */
2305 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2306 rule->ixgbe_fdir.formatted.inner_mac[j] =
2307 eth_spec->dst.addr_bytes[j];
2312 * Check if the next not void item is vlan or ipv4.
2313 * IPv6 is not supported.
2316 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2317 if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2318 (item->type != RTE_FLOW_ITEM_TYPE_VLAN)) {
2319 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2320 rte_flow_error_set(error, EINVAL,
2321 RTE_FLOW_ERROR_TYPE_ITEM,
2322 item, "Not supported by fdir filter");
2325 /*Not supported last point for range*/
2327 rte_flow_error_set(error, EINVAL,
2328 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2329 item, "Not supported last point for range");
2333 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2334 if (!(item->spec && item->mask)) {
2335 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2336 rte_flow_error_set(error, EINVAL,
2337 RTE_FLOW_ERROR_TYPE_ITEM,
2338 item, "Not supported by fdir filter");
2342 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
2343 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
2345 if (vlan_spec->tpid != rte_cpu_to_be_16(ETHER_TYPE_VLAN)) {
2346 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2347 rte_flow_error_set(error, EINVAL,
2348 RTE_FLOW_ERROR_TYPE_ITEM,
2349 item, "Not supported by fdir filter");
2353 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2355 if (vlan_mask->tpid != (uint16_t)~0U) {
2356 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2357 rte_flow_error_set(error, EINVAL,
2358 RTE_FLOW_ERROR_TYPE_ITEM,
2359 item, "Not supported by fdir filter");
2362 rule->mask.vlan_tci_mask = vlan_mask->tci;
2363 /* More than one tags are not supported. */
2366 * Check if the next not void item is not vlan.
2369 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2370 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2371 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2372 rte_flow_error_set(error, EINVAL,
2373 RTE_FLOW_ERROR_TYPE_ITEM,
2374 item, "Not supported by fdir filter");
2376 } else if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2377 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2378 rte_flow_error_set(error, EINVAL,
2379 RTE_FLOW_ERROR_TYPE_ITEM,
2380 item, "Not supported by fdir filter");
2383 /* check if the next not void item is END */
2385 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2386 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2387 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2388 rte_flow_error_set(error, EINVAL,
2389 RTE_FLOW_ERROR_TYPE_ITEM,
2390 item, "Not supported by fdir filter");
2396 * If the tags is 0, it means don't care about the VLAN.
2400 return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2404 ixgbe_validate_fdir_filter(struct rte_eth_dev *dev,
2405 const struct rte_flow_attr *attr,
2406 const struct rte_flow_item pattern[],
2407 const struct rte_flow_action actions[],
2408 struct ixgbe_fdir_rule *rule,
2409 struct rte_flow_error *error)
2413 enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2415 ixgbe_parse_fdir_filter(attr, pattern, actions,
2419 if (fdir_mode == RTE_FDIR_MODE_NONE ||
2420 fdir_mode != rule->mode)
2427 ixgbe_parse_fdir_filter(const struct rte_flow_attr *attr,
2428 const struct rte_flow_item pattern[],
2429 const struct rte_flow_action actions[],
2430 struct ixgbe_fdir_rule *rule,
2431 struct rte_flow_error *error)
2435 ret = ixgbe_parse_fdir_filter_normal(attr, pattern,
2436 actions, rule, error);
2441 ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2442 actions, rule, error);
2448 ixgbe_filterlist_flush(void)
2450 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2451 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2452 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2453 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2454 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2455 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2457 while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2458 TAILQ_REMOVE(&filter_ntuple_list,
2461 rte_free(ntuple_filter_ptr);
2464 while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2465 TAILQ_REMOVE(&filter_ethertype_list,
2466 ethertype_filter_ptr,
2468 rte_free(ethertype_filter_ptr);
2471 while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2472 TAILQ_REMOVE(&filter_syn_list,
2475 rte_free(syn_filter_ptr);
2478 while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2479 TAILQ_REMOVE(&filter_l2_tunnel_list,
2482 rte_free(l2_tn_filter_ptr);
2485 while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2486 TAILQ_REMOVE(&filter_fdir_list,
2489 rte_free(fdir_rule_ptr);
2492 while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
2493 TAILQ_REMOVE(&ixgbe_flow_list,
2496 rte_free(ixgbe_flow_mem_ptr->flow);
2497 rte_free(ixgbe_flow_mem_ptr);
2502 * Create or destroy a flow rule.
2503 * Theorically one rule can match more than one filters.
2504 * We will let it use the filter which it hitt first.
2505 * So, the sequence matters.
2507 static struct rte_flow *
2508 ixgbe_flow_create(struct rte_eth_dev *dev,
2509 const struct rte_flow_attr *attr,
2510 const struct rte_flow_item pattern[],
2511 const struct rte_flow_action actions[],
2512 struct rte_flow_error *error)
2515 struct rte_eth_ntuple_filter ntuple_filter;
2516 struct rte_eth_ethertype_filter ethertype_filter;
2517 struct rte_eth_syn_filter syn_filter;
2518 struct ixgbe_fdir_rule fdir_rule;
2519 struct rte_eth_l2_tunnel_conf l2_tn_filter;
2520 struct ixgbe_hw_fdir_info *fdir_info =
2521 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2522 struct rte_flow *flow = NULL;
2523 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2524 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2525 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2526 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2527 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2528 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2530 flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
2532 PMD_DRV_LOG(ERR, "failed to allocate memory");
2533 return (struct rte_flow *)flow;
2535 ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
2536 sizeof(struct ixgbe_flow_mem), 0);
2537 if (!ixgbe_flow_mem_ptr) {
2538 PMD_DRV_LOG(ERR, "failed to allocate memory");
2542 ixgbe_flow_mem_ptr->flow = flow;
2543 TAILQ_INSERT_TAIL(&ixgbe_flow_list,
2544 ixgbe_flow_mem_ptr, entries);
2546 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2547 ret = ixgbe_parse_ntuple_filter(attr, pattern,
2548 actions, &ntuple_filter, error);
2550 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2552 ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
2553 sizeof(struct ixgbe_ntuple_filter_ele), 0);
2554 (void)rte_memcpy(&ntuple_filter_ptr->filter_info,
2556 sizeof(struct rte_eth_ntuple_filter));
2557 TAILQ_INSERT_TAIL(&filter_ntuple_list,
2558 ntuple_filter_ptr, entries);
2559 flow->rule = ntuple_filter_ptr;
2560 flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2566 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2567 ret = ixgbe_parse_ethertype_filter(attr, pattern,
2568 actions, ðertype_filter, error);
2570 ret = ixgbe_add_del_ethertype_filter(dev,
2571 ðertype_filter, TRUE);
2573 ethertype_filter_ptr = rte_zmalloc(
2574 "ixgbe_ethertype_filter",
2575 sizeof(struct ixgbe_ethertype_filter_ele), 0);
2576 (void)rte_memcpy(ðertype_filter_ptr->filter_info,
2578 sizeof(struct rte_eth_ethertype_filter));
2579 TAILQ_INSERT_TAIL(&filter_ethertype_list,
2580 ethertype_filter_ptr, entries);
2581 flow->rule = ethertype_filter_ptr;
2582 flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2588 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2589 ret = cons_parse_syn_filter(attr, pattern, actions, &syn_filter, error);
2591 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
2593 syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
2594 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
2595 (void)rte_memcpy(&syn_filter_ptr->filter_info,
2597 sizeof(struct rte_eth_syn_filter));
2598 TAILQ_INSERT_TAIL(&filter_syn_list,
2601 flow->rule = syn_filter_ptr;
2602 flow->filter_type = RTE_ETH_FILTER_SYN;
2608 memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2609 ret = ixgbe_parse_fdir_filter(attr, pattern,
2610 actions, &fdir_rule, error);
2612 /* A mask cannot be deleted. */
2613 if (fdir_rule.b_mask) {
2614 if (!fdir_info->mask_added) {
2615 /* It's the first time the mask is set. */
2616 rte_memcpy(&fdir_info->mask,
2618 sizeof(struct ixgbe_hw_fdir_mask));
2619 ret = ixgbe_fdir_set_input_mask(dev);
2623 fdir_info->mask_added = TRUE;
2626 * Only support one global mask,
2627 * all the masks should be the same.
2629 ret = memcmp(&fdir_info->mask,
2631 sizeof(struct ixgbe_hw_fdir_mask));
2637 if (fdir_rule.b_spec) {
2638 ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
2641 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
2642 sizeof(struct ixgbe_fdir_rule_ele), 0);
2643 (void)rte_memcpy(&fdir_rule_ptr->filter_info,
2645 sizeof(struct ixgbe_fdir_rule));
2646 TAILQ_INSERT_TAIL(&filter_fdir_list,
2647 fdir_rule_ptr, entries);
2648 flow->rule = fdir_rule_ptr;
2649 flow->filter_type = RTE_ETH_FILTER_FDIR;
2661 memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2662 ret = cons_parse_l2_tn_filter(attr, pattern,
2663 actions, &l2_tn_filter, error);
2665 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
2667 l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
2668 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
2669 (void)rte_memcpy(&l2_tn_filter_ptr->filter_info,
2671 sizeof(struct rte_eth_l2_tunnel_conf));
2672 TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
2673 l2_tn_filter_ptr, entries);
2674 flow->rule = l2_tn_filter_ptr;
2675 flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
2681 TAILQ_REMOVE(&ixgbe_flow_list,
2682 ixgbe_flow_mem_ptr, entries);
2683 rte_free(ixgbe_flow_mem_ptr);
2689 * Check if the flow rule is supported by ixgbe.
2690 * It only checkes the format. Don't guarantee the rule can be programmed into
2691 * the HW. Because there can be no enough room for the rule.
2694 ixgbe_flow_validate(__rte_unused struct rte_eth_dev *dev,
2695 const struct rte_flow_attr *attr,
2696 const struct rte_flow_item pattern[],
2697 const struct rte_flow_action actions[],
2698 struct rte_flow_error *error)
2700 struct rte_eth_ntuple_filter ntuple_filter;
2701 struct rte_eth_ethertype_filter ethertype_filter;
2702 struct rte_eth_syn_filter syn_filter;
2703 struct rte_eth_l2_tunnel_conf l2_tn_filter;
2704 struct ixgbe_fdir_rule fdir_rule;
2707 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2708 ret = ixgbe_parse_ntuple_filter(attr, pattern,
2709 actions, &ntuple_filter, error);
2713 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2714 ret = ixgbe_parse_ethertype_filter(attr, pattern,
2715 actions, ðertype_filter, error);
2719 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2720 ret = ixgbe_parse_syn_filter(attr, pattern,
2721 actions, &syn_filter, error);
2725 memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2726 ret = ixgbe_validate_fdir_filter(dev, attr, pattern,
2727 actions, &fdir_rule, error);
2731 memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2732 ret = ixgbe_validate_l2_tn_filter(dev, attr, pattern,
2733 actions, &l2_tn_filter, error);
2738 /* Destroy a flow rule on ixgbe. */
2740 ixgbe_flow_destroy(struct rte_eth_dev *dev,
2741 struct rte_flow *flow,
2742 struct rte_flow_error *error)
2745 struct rte_flow *pmd_flow = flow;
2746 enum rte_filter_type filter_type = pmd_flow->filter_type;
2747 struct rte_eth_ntuple_filter ntuple_filter;
2748 struct rte_eth_ethertype_filter ethertype_filter;
2749 struct rte_eth_syn_filter syn_filter;
2750 struct ixgbe_fdir_rule fdir_rule;
2751 struct rte_eth_l2_tunnel_conf l2_tn_filter;
2752 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2753 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2754 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2755 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2756 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2757 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2759 switch (filter_type) {
2760 case RTE_ETH_FILTER_NTUPLE:
2761 ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
2763 (void)rte_memcpy(&ntuple_filter,
2764 &ntuple_filter_ptr->filter_info,
2765 sizeof(struct rte_eth_ntuple_filter));
2766 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
2768 TAILQ_REMOVE(&filter_ntuple_list,
2769 ntuple_filter_ptr, entries);
2770 rte_free(ntuple_filter_ptr);
2773 case RTE_ETH_FILTER_ETHERTYPE:
2774 ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
2776 (void)rte_memcpy(ðertype_filter,
2777 ðertype_filter_ptr->filter_info,
2778 sizeof(struct rte_eth_ethertype_filter));
2779 ret = ixgbe_add_del_ethertype_filter(dev,
2780 ðertype_filter, FALSE);
2782 TAILQ_REMOVE(&filter_ethertype_list,
2783 ethertype_filter_ptr, entries);
2784 rte_free(ethertype_filter_ptr);
2787 case RTE_ETH_FILTER_SYN:
2788 syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
2790 (void)rte_memcpy(&syn_filter,
2791 &syn_filter_ptr->filter_info,
2792 sizeof(struct rte_eth_syn_filter));
2793 ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
2795 TAILQ_REMOVE(&filter_syn_list,
2796 syn_filter_ptr, entries);
2797 rte_free(syn_filter_ptr);
2800 case RTE_ETH_FILTER_FDIR:
2801 fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
2802 (void)rte_memcpy(&fdir_rule,
2803 &fdir_rule_ptr->filter_info,
2804 sizeof(struct ixgbe_fdir_rule));
2805 ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
2807 TAILQ_REMOVE(&filter_fdir_list,
2808 fdir_rule_ptr, entries);
2809 rte_free(fdir_rule_ptr);
2812 case RTE_ETH_FILTER_L2_TUNNEL:
2813 l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
2815 (void)rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
2816 sizeof(struct rte_eth_l2_tunnel_conf));
2817 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
2819 TAILQ_REMOVE(&filter_l2_tunnel_list,
2820 l2_tn_filter_ptr, entries);
2821 rte_free(l2_tn_filter_ptr);
2825 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
2832 rte_flow_error_set(error, EINVAL,
2833 RTE_FLOW_ERROR_TYPE_HANDLE,
2834 NULL, "Failed to destroy flow");
2838 TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
2839 if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
2840 TAILQ_REMOVE(&ixgbe_flow_list,
2841 ixgbe_flow_mem_ptr, entries);
2842 rte_free(ixgbe_flow_mem_ptr);
2850 /* Destroy all flow rules associated with a port on ixgbe. */
2852 ixgbe_flow_flush(struct rte_eth_dev *dev,
2853 struct rte_flow_error *error)
2857 ixgbe_clear_all_ntuple_filter(dev);
2858 ixgbe_clear_all_ethertype_filter(dev);
2859 ixgbe_clear_syn_filter(dev);
2861 ret = ixgbe_clear_all_fdir_filter(dev);
2863 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
2864 NULL, "Failed to flush rule");
2868 ret = ixgbe_clear_all_l2_tn_filter(dev);
2870 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
2871 NULL, "Failed to flush rule");
2875 ixgbe_filterlist_flush();