4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
47 #include <rte_interrupts.h>
49 #include <rte_debug.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_atomic.h>
60 #include <rte_malloc.h>
61 #include <rte_random.h>
63 #include <rte_hash_crc.h>
65 #include <rte_flow_driver.h>
67 #include "ixgbe_logs.h"
68 #include "base/ixgbe_api.h"
69 #include "base/ixgbe_vf.h"
70 #include "base/ixgbe_common.h"
71 #include "ixgbe_ethdev.h"
72 #include "ixgbe_bypass.h"
73 #include "ixgbe_rxtx.h"
74 #include "base/ixgbe_type.h"
75 #include "base/ixgbe_phy.h"
76 #include "rte_pmd_ixgbe.h"
79 #define IXGBE_MIN_N_TUPLE_PRIO 1
80 #define IXGBE_MAX_N_TUPLE_PRIO 7
81 #define IXGBE_MAX_FLX_SOURCE_OFF 62
82 #define NEXT_ITEM_OF_PATTERN(item, pattern, index)\
84 item = pattern + index;\
85 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {\
87 item = pattern + index; \
91 #define NEXT_ITEM_OF_ACTION(act, actions, index)\
93 act = actions + index; \
94 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {\
96 act = actions + index; \
101 * Please aware there's an asumption for all the parsers.
102 * rte_flow_item is using big endian, rte_flow_attr and
103 * rte_flow_action are using CPU order.
104 * Because the pattern is used to describe the packets,
105 * normally the packets should use network order.
109 * Parse the rule to see if it is a n-tuple rule.
110 * And get the n-tuple filter info BTW.
112 * The first not void item can be ETH or IPV4.
113 * The second not void item must be IPV4 if the first one is ETH.
114 * The third not void item must be UDP or TCP.
115 * The next not void item must be END.
117 * The first not void action should be QUEUE.
118 * The next not void action should be END.
122 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
123 * dst_addr 192.167.3.50 0xFFFFFFFF
124 * next_proto_id 17 0xFF
125 * UDP/TCP/ src_port 80 0xFFFF
126 * SCTP dst_port 80 0xFFFF
128 * other members in mask and spec should set to 0x00.
129 * item->last should be NULL.
132 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
133 const struct rte_flow_item pattern[],
134 const struct rte_flow_action actions[],
135 struct rte_eth_ntuple_filter *filter,
136 struct rte_flow_error *error)
138 const struct rte_flow_item *item;
139 const struct rte_flow_action *act;
140 const struct rte_flow_item_ipv4 *ipv4_spec;
141 const struct rte_flow_item_ipv4 *ipv4_mask;
142 const struct rte_flow_item_tcp *tcp_spec;
143 const struct rte_flow_item_tcp *tcp_mask;
144 const struct rte_flow_item_udp *udp_spec;
145 const struct rte_flow_item_udp *udp_mask;
146 const struct rte_flow_item_sctp *sctp_spec;
147 const struct rte_flow_item_sctp *sctp_mask;
151 rte_flow_error_set(error,
152 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
153 NULL, "NULL pattern.");
158 rte_flow_error_set(error, EINVAL,
159 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
160 NULL, "NULL action.");
164 rte_flow_error_set(error, EINVAL,
165 RTE_FLOW_ERROR_TYPE_ATTR,
166 NULL, "NULL attribute.");
173 /* the first not void item can be MAC or IPv4 */
174 NEXT_ITEM_OF_PATTERN(item, pattern, index);
176 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
177 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
178 rte_flow_error_set(error, EINVAL,
179 RTE_FLOW_ERROR_TYPE_ITEM,
180 item, "Not supported by ntuple filter");
184 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
185 /*Not supported last point for range*/
187 rte_flow_error_set(error,
189 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
190 item, "Not supported last point for range");
194 /* if the first item is MAC, the content should be NULL */
195 if (item->spec || item->mask) {
196 rte_flow_error_set(error, EINVAL,
197 RTE_FLOW_ERROR_TYPE_ITEM,
198 item, "Not supported by ntuple filter");
201 /* check if the next not void item is IPv4 */
203 NEXT_ITEM_OF_PATTERN(item, pattern, index);
204 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
205 rte_flow_error_set(error,
206 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
207 item, "Not supported by ntuple filter");
212 /* get the IPv4 info */
213 if (!item->spec || !item->mask) {
214 rte_flow_error_set(error, EINVAL,
215 RTE_FLOW_ERROR_TYPE_ITEM,
216 item, "Invalid ntuple mask");
219 /*Not supported last point for range*/
221 rte_flow_error_set(error, EINVAL,
222 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
223 item, "Not supported last point for range");
228 ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
230 * Only support src & dst addresses, protocol,
231 * others should be masked.
233 if (ipv4_mask->hdr.version_ihl ||
234 ipv4_mask->hdr.type_of_service ||
235 ipv4_mask->hdr.total_length ||
236 ipv4_mask->hdr.packet_id ||
237 ipv4_mask->hdr.fragment_offset ||
238 ipv4_mask->hdr.time_to_live ||
239 ipv4_mask->hdr.hdr_checksum) {
240 rte_flow_error_set(error,
241 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
242 item, "Not supported by ntuple filter");
246 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
247 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
248 filter->proto_mask = ipv4_mask->hdr.next_proto_id;
250 ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
251 filter->dst_ip = ipv4_spec->hdr.dst_addr;
252 filter->src_ip = ipv4_spec->hdr.src_addr;
253 filter->proto = ipv4_spec->hdr.next_proto_id;
255 /* check if the next not void item is TCP or UDP */
257 NEXT_ITEM_OF_PATTERN(item, pattern, index);
258 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
259 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
260 item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
261 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
262 rte_flow_error_set(error, EINVAL,
263 RTE_FLOW_ERROR_TYPE_ITEM,
264 item, "Not supported by ntuple filter");
268 /* get the TCP/UDP info */
269 if (!item->spec || !item->mask) {
270 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
271 rte_flow_error_set(error, EINVAL,
272 RTE_FLOW_ERROR_TYPE_ITEM,
273 item, "Invalid ntuple mask");
277 /*Not supported last point for range*/
279 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
280 rte_flow_error_set(error, EINVAL,
281 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
282 item, "Not supported last point for range");
287 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
288 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
291 * Only support src & dst ports, tcp flags,
292 * others should be masked.
294 if (tcp_mask->hdr.sent_seq ||
295 tcp_mask->hdr.recv_ack ||
296 tcp_mask->hdr.data_off ||
297 tcp_mask->hdr.rx_win ||
298 tcp_mask->hdr.cksum ||
299 tcp_mask->hdr.tcp_urp) {
301 sizeof(struct rte_eth_ntuple_filter));
302 rte_flow_error_set(error, EINVAL,
303 RTE_FLOW_ERROR_TYPE_ITEM,
304 item, "Not supported by ntuple filter");
308 filter->dst_port_mask = tcp_mask->hdr.dst_port;
309 filter->src_port_mask = tcp_mask->hdr.src_port;
310 if (tcp_mask->hdr.tcp_flags == 0xFF) {
311 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
312 } else if (!tcp_mask->hdr.tcp_flags) {
313 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
315 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
316 rte_flow_error_set(error, EINVAL,
317 RTE_FLOW_ERROR_TYPE_ITEM,
318 item, "Not supported by ntuple filter");
322 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
323 filter->dst_port = tcp_spec->hdr.dst_port;
324 filter->src_port = tcp_spec->hdr.src_port;
325 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
326 } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
327 udp_mask = (const struct rte_flow_item_udp *)item->mask;
330 * Only support src & dst ports,
331 * others should be masked.
333 if (udp_mask->hdr.dgram_len ||
334 udp_mask->hdr.dgram_cksum) {
336 sizeof(struct rte_eth_ntuple_filter));
337 rte_flow_error_set(error, EINVAL,
338 RTE_FLOW_ERROR_TYPE_ITEM,
339 item, "Not supported by ntuple filter");
343 filter->dst_port_mask = udp_mask->hdr.dst_port;
344 filter->src_port_mask = udp_mask->hdr.src_port;
346 udp_spec = (const struct rte_flow_item_udp *)item->spec;
347 filter->dst_port = udp_spec->hdr.dst_port;
348 filter->src_port = udp_spec->hdr.src_port;
350 sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
353 * Only support src & dst ports,
354 * others should be masked.
356 if (sctp_mask->hdr.tag ||
357 sctp_mask->hdr.cksum) {
359 sizeof(struct rte_eth_ntuple_filter));
360 rte_flow_error_set(error, EINVAL,
361 RTE_FLOW_ERROR_TYPE_ITEM,
362 item, "Not supported by ntuple filter");
366 filter->dst_port_mask = sctp_mask->hdr.dst_port;
367 filter->src_port_mask = sctp_mask->hdr.src_port;
369 sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
370 filter->dst_port = sctp_spec->hdr.dst_port;
371 filter->src_port = sctp_spec->hdr.src_port;
374 /* check if the next not void item is END */
376 NEXT_ITEM_OF_PATTERN(item, pattern, index);
377 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
378 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
379 rte_flow_error_set(error, EINVAL,
380 RTE_FLOW_ERROR_TYPE_ITEM,
381 item, "Not supported by ntuple filter");
389 * n-tuple only supports forwarding,
390 * check if the first not void action is QUEUE.
392 NEXT_ITEM_OF_ACTION(act, actions, index);
393 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
394 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
395 rte_flow_error_set(error, EINVAL,
396 RTE_FLOW_ERROR_TYPE_ACTION,
397 item, "Not supported action.");
401 ((const struct rte_flow_action_queue *)act->conf)->index;
403 /* check if the next not void item is END */
405 NEXT_ITEM_OF_ACTION(act, actions, index);
406 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
407 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
408 rte_flow_error_set(error, EINVAL,
409 RTE_FLOW_ERROR_TYPE_ACTION,
410 act, "Not supported action.");
415 /* must be input direction */
416 if (!attr->ingress) {
417 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
418 rte_flow_error_set(error, EINVAL,
419 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
420 attr, "Only support ingress.");
426 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
427 rte_flow_error_set(error, EINVAL,
428 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
429 attr, "Not support egress.");
433 if (attr->priority > 0xFFFF) {
434 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
435 rte_flow_error_set(error, EINVAL,
436 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
437 attr, "Error priority.");
440 filter->priority = (uint16_t)attr->priority;
441 if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
442 attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
443 filter->priority = 1;
448 /* a specific function for ixgbe because the flags is specific */
450 ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
451 const struct rte_flow_attr *attr,
452 const struct rte_flow_item pattern[],
453 const struct rte_flow_action actions[],
454 struct rte_eth_ntuple_filter *filter,
455 struct rte_flow_error *error)
458 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
460 MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
462 ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
467 /* Ixgbe doesn't support tcp flags. */
468 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
469 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
470 rte_flow_error_set(error, EINVAL,
471 RTE_FLOW_ERROR_TYPE_ITEM,
472 NULL, "Not supported by ntuple filter");
476 /* Ixgbe doesn't support many priorities. */
477 if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
478 filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
479 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
480 rte_flow_error_set(error, EINVAL,
481 RTE_FLOW_ERROR_TYPE_ITEM,
482 NULL, "Priority not supported by ntuple filter");
486 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
487 filter->priority > IXGBE_5TUPLE_MAX_PRI ||
488 filter->priority < IXGBE_5TUPLE_MIN_PRI)
491 /* fixed value for ixgbe */
492 filter->flags = RTE_5TUPLE_FLAGS;
497 * Parse the rule to see if it is a ethertype rule.
498 * And get the ethertype filter info BTW.
500 * The first not void item can be ETH.
501 * The next not void item must be END.
503 * The first not void action should be QUEUE.
504 * The next not void action should be END.
507 * ETH type 0x0807 0xFFFF
509 * other members in mask and spec should set to 0x00.
510 * item->last should be NULL.
513 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
514 const struct rte_flow_item *pattern,
515 const struct rte_flow_action *actions,
516 struct rte_eth_ethertype_filter *filter,
517 struct rte_flow_error *error)
519 const struct rte_flow_item *item;
520 const struct rte_flow_action *act;
521 const struct rte_flow_item_eth *eth_spec;
522 const struct rte_flow_item_eth *eth_mask;
523 const struct rte_flow_action_queue *act_q;
527 rte_flow_error_set(error, EINVAL,
528 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
529 NULL, "NULL pattern.");
534 rte_flow_error_set(error, EINVAL,
535 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
536 NULL, "NULL action.");
541 rte_flow_error_set(error, EINVAL,
542 RTE_FLOW_ERROR_TYPE_ATTR,
543 NULL, "NULL attribute.");
550 /* The first non-void item should be MAC. */
551 item = pattern + index;
552 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
554 item = pattern + index;
556 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
557 rte_flow_error_set(error, EINVAL,
558 RTE_FLOW_ERROR_TYPE_ITEM,
559 item, "Not supported by ethertype filter");
563 /*Not supported last point for range*/
565 rte_flow_error_set(error, EINVAL,
566 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
567 item, "Not supported last point for range");
571 /* Get the MAC info. */
572 if (!item->spec || !item->mask) {
573 rte_flow_error_set(error, EINVAL,
574 RTE_FLOW_ERROR_TYPE_ITEM,
575 item, "Not supported by ethertype filter");
579 eth_spec = (const struct rte_flow_item_eth *)item->spec;
580 eth_mask = (const struct rte_flow_item_eth *)item->mask;
582 /* Mask bits of source MAC address must be full of 0.
583 * Mask bits of destination MAC address must be full
586 if (!is_zero_ether_addr(ð_mask->src) ||
587 (!is_zero_ether_addr(ð_mask->dst) &&
588 !is_broadcast_ether_addr(ð_mask->dst))) {
589 rte_flow_error_set(error, EINVAL,
590 RTE_FLOW_ERROR_TYPE_ITEM,
591 item, "Invalid ether address mask");
595 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
596 rte_flow_error_set(error, EINVAL,
597 RTE_FLOW_ERROR_TYPE_ITEM,
598 item, "Invalid ethertype mask");
602 /* If mask bits of destination MAC address
603 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
605 if (is_broadcast_ether_addr(ð_mask->dst)) {
606 filter->mac_addr = eth_spec->dst;
607 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
609 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
611 filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
613 /* Check if the next non-void item is END. */
615 item = pattern + index;
616 while (item->type == RTE_FLOW_ITEM_TYPE_VOID) {
618 item = pattern + index;
620 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
621 rte_flow_error_set(error, EINVAL,
622 RTE_FLOW_ERROR_TYPE_ITEM,
623 item, "Not supported by ethertype filter.");
630 /* Check if the first non-void action is QUEUE or DROP. */
631 act = actions + index;
632 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
634 act = actions + index;
636 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
637 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
638 rte_flow_error_set(error, EINVAL,
639 RTE_FLOW_ERROR_TYPE_ACTION,
640 act, "Not supported action.");
644 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
645 act_q = (const struct rte_flow_action_queue *)act->conf;
646 filter->queue = act_q->index;
648 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
651 /* Check if the next non-void item is END */
653 act = actions + index;
654 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) {
656 act = actions + index;
658 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
659 rte_flow_error_set(error, EINVAL,
660 RTE_FLOW_ERROR_TYPE_ACTION,
661 act, "Not supported action.");
666 /* Must be input direction */
667 if (!attr->ingress) {
668 rte_flow_error_set(error, EINVAL,
669 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
670 attr, "Only support ingress.");
676 rte_flow_error_set(error, EINVAL,
677 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
678 attr, "Not support egress.");
683 if (attr->priority) {
684 rte_flow_error_set(error, EINVAL,
685 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
686 attr, "Not support priority.");
692 rte_flow_error_set(error, EINVAL,
693 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
694 attr, "Not support group.");
702 ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
703 const struct rte_flow_attr *attr,
704 const struct rte_flow_item pattern[],
705 const struct rte_flow_action actions[],
706 struct rte_eth_ethertype_filter *filter,
707 struct rte_flow_error *error)
710 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
712 MAC_TYPE_FILTER_SUP(hw->mac.type);
714 ret = cons_parse_ethertype_filter(attr, pattern,
715 actions, filter, error);
720 /* Ixgbe doesn't support MAC address. */
721 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
722 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
723 rte_flow_error_set(error, EINVAL,
724 RTE_FLOW_ERROR_TYPE_ITEM,
725 NULL, "Not supported by ethertype filter");
729 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
730 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
731 rte_flow_error_set(error, EINVAL,
732 RTE_FLOW_ERROR_TYPE_ITEM,
733 NULL, "queue index much too big");
737 if (filter->ether_type == ETHER_TYPE_IPv4 ||
738 filter->ether_type == ETHER_TYPE_IPv6) {
739 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
740 rte_flow_error_set(error, EINVAL,
741 RTE_FLOW_ERROR_TYPE_ITEM,
742 NULL, "IPv4/IPv6 not supported by ethertype filter");
746 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
747 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
748 rte_flow_error_set(error, EINVAL,
749 RTE_FLOW_ERROR_TYPE_ITEM,
750 NULL, "mac compare is unsupported");
754 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
755 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
756 rte_flow_error_set(error, EINVAL,
757 RTE_FLOW_ERROR_TYPE_ITEM,
758 NULL, "drop option is unsupported");
766 * Parse the rule to see if it is a TCP SYN rule.
767 * And get the TCP SYN filter info BTW.
769 * The first not void item must be ETH.
770 * The second not void item must be IPV4 or IPV6.
771 * The third not void item must be TCP.
772 * The next not void item must be END.
774 * The first not void action should be QUEUE.
775 * The next not void action should be END.
779 * IPV4/IPV6 NULL NULL
780 * TCP tcp_flags 0x02 0xFF
782 * other members in mask and spec should set to 0x00.
783 * item->last should be NULL.
786 cons_parse_syn_filter(const struct rte_flow_attr *attr,
787 const struct rte_flow_item pattern[],
788 const struct rte_flow_action actions[],
789 struct rte_eth_syn_filter *filter,
790 struct rte_flow_error *error)
792 const struct rte_flow_item *item;
793 const struct rte_flow_action *act;
794 const struct rte_flow_item_tcp *tcp_spec;
795 const struct rte_flow_item_tcp *tcp_mask;
796 const struct rte_flow_action_queue *act_q;
800 rte_flow_error_set(error, EINVAL,
801 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
802 NULL, "NULL pattern.");
807 rte_flow_error_set(error, EINVAL,
808 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
809 NULL, "NULL action.");
814 rte_flow_error_set(error, EINVAL,
815 RTE_FLOW_ERROR_TYPE_ATTR,
816 NULL, "NULL attribute.");
823 /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
824 NEXT_ITEM_OF_PATTERN(item, pattern, index);
825 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
826 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
827 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
828 item->type != RTE_FLOW_ITEM_TYPE_TCP) {
829 rte_flow_error_set(error, EINVAL,
830 RTE_FLOW_ERROR_TYPE_ITEM,
831 item, "Not supported by syn filter");
834 /*Not supported last point for range*/
836 rte_flow_error_set(error, EINVAL,
837 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
838 item, "Not supported last point for range");
843 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
844 /* if the item is MAC, the content should be NULL */
845 if (item->spec || item->mask) {
846 rte_flow_error_set(error, EINVAL,
847 RTE_FLOW_ERROR_TYPE_ITEM,
848 item, "Invalid SYN address mask");
852 /* check if the next not void item is IPv4 or IPv6 */
854 NEXT_ITEM_OF_PATTERN(item, pattern, index);
855 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
856 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
857 rte_flow_error_set(error, EINVAL,
858 RTE_FLOW_ERROR_TYPE_ITEM,
859 item, "Not supported by syn filter");
865 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
866 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
867 /* if the item is IP, the content should be NULL */
868 if (item->spec || item->mask) {
869 rte_flow_error_set(error, EINVAL,
870 RTE_FLOW_ERROR_TYPE_ITEM,
871 item, "Invalid SYN mask");
875 /* check if the next not void item is TCP */
877 NEXT_ITEM_OF_PATTERN(item, pattern, index);
878 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
879 rte_flow_error_set(error, EINVAL,
880 RTE_FLOW_ERROR_TYPE_ITEM,
881 item, "Not supported by syn filter");
886 /* Get the TCP info. Only support SYN. */
887 if (!item->spec || !item->mask) {
888 rte_flow_error_set(error, EINVAL,
889 RTE_FLOW_ERROR_TYPE_ITEM,
890 item, "Invalid SYN mask");
893 /*Not supported last point for range*/
895 rte_flow_error_set(error, EINVAL,
896 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
897 item, "Not supported last point for range");
901 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
902 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
903 if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
904 tcp_mask->hdr.src_port ||
905 tcp_mask->hdr.dst_port ||
906 tcp_mask->hdr.sent_seq ||
907 tcp_mask->hdr.recv_ack ||
908 tcp_mask->hdr.data_off ||
909 tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
910 tcp_mask->hdr.rx_win ||
911 tcp_mask->hdr.cksum ||
912 tcp_mask->hdr.tcp_urp) {
913 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
914 rte_flow_error_set(error, EINVAL,
915 RTE_FLOW_ERROR_TYPE_ITEM,
916 item, "Not supported by syn filter");
920 /* check if the next not void item is END */
922 NEXT_ITEM_OF_PATTERN(item, pattern, index);
923 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
924 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
925 rte_flow_error_set(error, EINVAL,
926 RTE_FLOW_ERROR_TYPE_ITEM,
927 item, "Not supported by syn filter");
934 /* check if the first not void action is QUEUE. */
935 NEXT_ITEM_OF_ACTION(act, actions, index);
936 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
937 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
938 rte_flow_error_set(error, EINVAL,
939 RTE_FLOW_ERROR_TYPE_ACTION,
940 act, "Not supported action.");
944 act_q = (const struct rte_flow_action_queue *)act->conf;
945 filter->queue = act_q->index;
946 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
947 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
948 rte_flow_error_set(error, EINVAL,
949 RTE_FLOW_ERROR_TYPE_ACTION,
950 act, "Not supported action.");
954 /* check if the next not void item is END */
956 NEXT_ITEM_OF_ACTION(act, actions, index);
957 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
958 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
959 rte_flow_error_set(error, EINVAL,
960 RTE_FLOW_ERROR_TYPE_ACTION,
961 act, "Not supported action.");
966 /* must be input direction */
967 if (!attr->ingress) {
968 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
969 rte_flow_error_set(error, EINVAL,
970 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
971 attr, "Only support ingress.");
977 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
978 rte_flow_error_set(error, EINVAL,
979 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
980 attr, "Not support egress.");
984 /* Support 2 priorities, the lowest or highest. */
985 if (!attr->priority) {
987 } else if (attr->priority == (uint32_t)~0U) {
990 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
991 rte_flow_error_set(error, EINVAL,
992 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
993 attr, "Not support priority.");
1001 ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
1002 const struct rte_flow_attr *attr,
1003 const struct rte_flow_item pattern[],
1004 const struct rte_flow_action actions[],
1005 struct rte_eth_syn_filter *filter,
1006 struct rte_flow_error *error)
1009 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1011 MAC_TYPE_FILTER_SUP(hw->mac.type);
1013 ret = cons_parse_syn_filter(attr, pattern,
1014 actions, filter, error);
1023 * Parse the rule to see if it is a L2 tunnel rule.
1024 * And get the L2 tunnel filter info BTW.
1025 * Only support E-tag now.
1027 * The first not void item can be E_TAG.
1028 * The next not void item must be END.
1030 * The first not void action should be QUEUE.
1031 * The next not void action should be END.
1035 e_cid_base 0x309 0xFFF
1037 * other members in mask and spec should set to 0x00.
1038 * item->last should be NULL.
1041 cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
1042 const struct rte_flow_item pattern[],
1043 const struct rte_flow_action actions[],
1044 struct rte_eth_l2_tunnel_conf *filter,
1045 struct rte_flow_error *error)
1047 const struct rte_flow_item *item;
1048 const struct rte_flow_item_e_tag *e_tag_spec;
1049 const struct rte_flow_item_e_tag *e_tag_mask;
1050 const struct rte_flow_action *act;
1051 const struct rte_flow_action_queue *act_q;
1055 rte_flow_error_set(error, EINVAL,
1056 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1057 NULL, "NULL pattern.");
1062 rte_flow_error_set(error, EINVAL,
1063 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1064 NULL, "NULL action.");
1069 rte_flow_error_set(error, EINVAL,
1070 RTE_FLOW_ERROR_TYPE_ATTR,
1071 NULL, "NULL attribute.");
1077 /* The first not void item should be e-tag. */
1078 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1079 if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1080 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1081 rte_flow_error_set(error, EINVAL,
1082 RTE_FLOW_ERROR_TYPE_ITEM,
1083 item, "Not supported by L2 tunnel filter");
1087 if (!item->spec || !item->mask) {
1088 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1089 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1090 item, "Not supported by L2 tunnel filter");
1094 /*Not supported last point for range*/
1096 rte_flow_error_set(error, EINVAL,
1097 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1098 item, "Not supported last point for range");
1102 e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
1103 e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
1105 /* Only care about GRP and E cid base. */
1106 if (e_tag_mask->epcp_edei_in_ecid_b ||
1107 e_tag_mask->in_ecid_e ||
1108 e_tag_mask->ecid_e ||
1109 e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1110 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1111 rte_flow_error_set(error, EINVAL,
1112 RTE_FLOW_ERROR_TYPE_ITEM,
1113 item, "Not supported by L2 tunnel filter");
1117 filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1119 * grp and e_cid_base are bit fields and only use 14 bits.
1120 * e-tag id is taken as little endian by HW.
1122 filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1124 /* check if the next not void item is END */
1126 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1127 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1128 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1129 rte_flow_error_set(error, EINVAL,
1130 RTE_FLOW_ERROR_TYPE_ITEM,
1131 item, "Not supported by L2 tunnel filter");
1136 /* must be input direction */
1137 if (!attr->ingress) {
1138 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1139 rte_flow_error_set(error, EINVAL,
1140 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1141 attr, "Only support ingress.");
1147 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1148 rte_flow_error_set(error, EINVAL,
1149 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1150 attr, "Not support egress.");
1155 if (attr->priority) {
1156 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1157 rte_flow_error_set(error, EINVAL,
1158 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1159 attr, "Not support priority.");
1166 /* check if the first not void action is QUEUE. */
1167 NEXT_ITEM_OF_ACTION(act, actions, index);
1168 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1169 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1170 rte_flow_error_set(error, EINVAL,
1171 RTE_FLOW_ERROR_TYPE_ACTION,
1172 act, "Not supported action.");
1176 act_q = (const struct rte_flow_action_queue *)act->conf;
1177 filter->pool = act_q->index;
1179 /* check if the next not void item is END */
1181 NEXT_ITEM_OF_ACTION(act, actions, index);
1182 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1183 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1184 rte_flow_error_set(error, EINVAL,
1185 RTE_FLOW_ERROR_TYPE_ACTION,
1186 act, "Not supported action.");
1194 ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1195 const struct rte_flow_attr *attr,
1196 const struct rte_flow_item pattern[],
1197 const struct rte_flow_action actions[],
1198 struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1199 struct rte_flow_error *error)
1202 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1204 ret = cons_parse_l2_tn_filter(attr, pattern,
1205 actions, l2_tn_filter, error);
1207 if (hw->mac.type != ixgbe_mac_X550 &&
1208 hw->mac.type != ixgbe_mac_X550EM_x &&
1209 hw->mac.type != ixgbe_mac_X550EM_a) {
1210 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1211 rte_flow_error_set(error, EINVAL,
1212 RTE_FLOW_ERROR_TYPE_ITEM,
1213 NULL, "Not supported by L2 tunnel filter");
1220 /* Parse to get the attr and action info of flow director rule. */
1222 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1223 const struct rte_flow_action actions[],
1224 struct ixgbe_fdir_rule *rule,
1225 struct rte_flow_error *error)
1227 const struct rte_flow_action *act;
1228 const struct rte_flow_action_queue *act_q;
1229 const struct rte_flow_action_mark *mark;
1233 /* must be input direction */
1234 if (!attr->ingress) {
1235 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1236 rte_flow_error_set(error, EINVAL,
1237 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1238 attr, "Only support ingress.");
1244 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1245 rte_flow_error_set(error, EINVAL,
1246 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1247 attr, "Not support egress.");
1252 if (attr->priority) {
1253 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1254 rte_flow_error_set(error, EINVAL,
1255 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1256 attr, "Not support priority.");
1263 /* check if the first not void action is QUEUE or DROP. */
1264 NEXT_ITEM_OF_ACTION(act, actions, index);
1265 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1266 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1267 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1268 rte_flow_error_set(error, EINVAL,
1269 RTE_FLOW_ERROR_TYPE_ACTION,
1270 act, "Not supported action.");
1274 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1275 act_q = (const struct rte_flow_action_queue *)act->conf;
1276 rule->queue = act_q->index;
1278 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1281 /* check if the next not void item is MARK */
1283 NEXT_ITEM_OF_ACTION(act, actions, index);
1284 if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1285 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1286 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1287 rte_flow_error_set(error, EINVAL,
1288 RTE_FLOW_ERROR_TYPE_ACTION,
1289 act, "Not supported action.");
1295 if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1296 mark = (const struct rte_flow_action_mark *)act->conf;
1297 rule->soft_id = mark->id;
1299 NEXT_ITEM_OF_ACTION(act, actions, index);
1302 /* check if the next not void item is END */
1303 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1304 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1305 rte_flow_error_set(error, EINVAL,
1306 RTE_FLOW_ERROR_TYPE_ACTION,
1307 act, "Not supported action.");
1315 * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1316 * And get the flow director filter info BTW.
1317 * UDP/TCP/SCTP PATTERN:
1318 * The first not void item can be ETH or IPV4.
1319 * The second not void item must be IPV4 if the first one is ETH.
1320 * The next not void item could be UDP or TCP or SCTP (optional)
1321 * The next not void item could be RAW (for flexbyte, optional)
1322 * The next not void item must be END.
1324 * The first not void item must be ETH.
1325 * The second not void item must be MAC VLAN.
1326 * The next not void item must be END.
1328 * The first not void action should be QUEUE or DROP.
1329 * The second not void optional action should be MARK,
1330 * mark_id is a uint32_t number.
1331 * The next not void action should be END.
1332 * UDP/TCP/SCTP pattern example:
1335 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
1336 * dst_addr 192.167.3.50 0xFFFFFFFF
1337 * UDP/TCP/SCTP src_port 80 0xFFFF
1338 * dst_port 80 0xFFFF
1339 * FLEX relative 0 0x1
1342 * offset 12 0xFFFFFFFF
1345 * pattern[0] 0x86 0xFF
1346 * pattern[1] 0xDD 0xFF
1348 * MAC VLAN pattern example:
1351 {0xAC, 0x7B, 0xA1, {0xFF, 0xFF, 0xFF,
1352 0x2C, 0x6D, 0x36} 0xFF, 0xFF, 0xFF}
1353 * MAC VLAN tci 0x2016 0xEFFF
1355 * Other members in mask and spec should set to 0x00.
1356 * Item->last should be NULL.
1359 ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
1360 const struct rte_flow_item pattern[],
1361 const struct rte_flow_action actions[],
1362 struct ixgbe_fdir_rule *rule,
1363 struct rte_flow_error *error)
1365 const struct rte_flow_item *item;
1366 const struct rte_flow_item_eth *eth_spec;
1367 const struct rte_flow_item_eth *eth_mask;
1368 const struct rte_flow_item_ipv4 *ipv4_spec;
1369 const struct rte_flow_item_ipv4 *ipv4_mask;
1370 const struct rte_flow_item_tcp *tcp_spec;
1371 const struct rte_flow_item_tcp *tcp_mask;
1372 const struct rte_flow_item_udp *udp_spec;
1373 const struct rte_flow_item_udp *udp_mask;
1374 const struct rte_flow_item_sctp *sctp_spec;
1375 const struct rte_flow_item_sctp *sctp_mask;
1376 const struct rte_flow_item_vlan *vlan_spec;
1377 const struct rte_flow_item_vlan *vlan_mask;
1378 const struct rte_flow_item_raw *raw_mask;
1379 const struct rte_flow_item_raw *raw_spec;
1384 rte_flow_error_set(error, EINVAL,
1385 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1386 NULL, "NULL pattern.");
1391 rte_flow_error_set(error, EINVAL,
1392 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1393 NULL, "NULL action.");
1398 rte_flow_error_set(error, EINVAL,
1399 RTE_FLOW_ERROR_TYPE_ATTR,
1400 NULL, "NULL attribute.");
1405 * Some fields may not be provided. Set spec to 0 and mask to default
1406 * value. So, we need not do anything for the not provided fields later.
1408 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1409 memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1410 rule->mask.vlan_tci_mask = 0;
1411 rule->mask.flex_bytes_mask = 0;
1417 * The first not void item should be
1418 * MAC or IPv4 or TCP or UDP or SCTP.
1420 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1421 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1422 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1423 item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1424 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1425 item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1426 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1427 rte_flow_error_set(error, EINVAL,
1428 RTE_FLOW_ERROR_TYPE_ITEM,
1429 item, "Not supported by fdir filter");
1433 rule->mode = RTE_FDIR_MODE_PERFECT;
1435 /*Not supported last point for range*/
1437 rte_flow_error_set(error, EINVAL,
1438 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1439 item, "Not supported last point for range");
1443 /* Get the MAC info. */
1444 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1446 * Only support vlan and dst MAC address,
1447 * others should be masked.
1449 if (item->spec && !item->mask) {
1450 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1451 rte_flow_error_set(error, EINVAL,
1452 RTE_FLOW_ERROR_TYPE_ITEM,
1453 item, "Not supported by fdir filter");
1458 rule->b_spec = TRUE;
1459 eth_spec = (const struct rte_flow_item_eth *)item->spec;
1461 /* Get the dst MAC. */
1462 for (j = 0; j < ETHER_ADDR_LEN; j++) {
1463 rule->ixgbe_fdir.formatted.inner_mac[j] =
1464 eth_spec->dst.addr_bytes[j];
1470 /* If ethernet has meaning, it means MAC VLAN mode. */
1471 rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1473 rule->b_mask = TRUE;
1474 eth_mask = (const struct rte_flow_item_eth *)item->mask;
1476 /* Ether type should be masked. */
1477 if (eth_mask->type) {
1478 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1479 rte_flow_error_set(error, EINVAL,
1480 RTE_FLOW_ERROR_TYPE_ITEM,
1481 item, "Not supported by fdir filter");
1486 * src MAC address must be masked,
1487 * and don't support dst MAC address mask.
1489 for (j = 0; j < ETHER_ADDR_LEN; j++) {
1490 if (eth_mask->src.addr_bytes[j] ||
1491 eth_mask->dst.addr_bytes[j] != 0xFF) {
1493 sizeof(struct ixgbe_fdir_rule));
1494 rte_flow_error_set(error, EINVAL,
1495 RTE_FLOW_ERROR_TYPE_ITEM,
1496 item, "Not supported by fdir filter");
1501 /* When no VLAN, considered as full mask. */
1502 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1504 /*** If both spec and mask are item,
1505 * it means don't care about ETH.
1510 * Check if the next not void item is vlan or ipv4.
1511 * IPv6 is not supported.
1514 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1515 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1516 if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1517 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1518 rte_flow_error_set(error, EINVAL,
1519 RTE_FLOW_ERROR_TYPE_ITEM,
1520 item, "Not supported by fdir filter");
1524 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
1525 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1526 rte_flow_error_set(error, EINVAL,
1527 RTE_FLOW_ERROR_TYPE_ITEM,
1528 item, "Not supported by fdir filter");
1534 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1535 if (!(item->spec && item->mask)) {
1536 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1537 rte_flow_error_set(error, EINVAL,
1538 RTE_FLOW_ERROR_TYPE_ITEM,
1539 item, "Not supported by fdir filter");
1543 /*Not supported last point for range*/
1545 rte_flow_error_set(error, EINVAL,
1546 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1547 item, "Not supported last point for range");
1551 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
1552 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
1554 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1556 rule->mask.vlan_tci_mask = vlan_mask->tci;
1557 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
1558 /* More than one tags are not supported. */
1560 /* Next not void item must be END */
1562 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1563 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1564 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1565 rte_flow_error_set(error, EINVAL,
1566 RTE_FLOW_ERROR_TYPE_ITEM,
1567 item, "Not supported by fdir filter");
1572 /* Get the IP info. */
1573 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1575 * Set the flow type even if there's no content
1576 * as we must have a flow type.
1578 rule->ixgbe_fdir.formatted.flow_type =
1579 IXGBE_ATR_FLOW_TYPE_IPV4;
1580 /*Not supported last point for range*/
1582 rte_flow_error_set(error, EINVAL,
1583 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1584 item, "Not supported last point for range");
1588 * Only care about src & dst addresses,
1589 * others should be masked.
1592 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1593 rte_flow_error_set(error, EINVAL,
1594 RTE_FLOW_ERROR_TYPE_ITEM,
1595 item, "Not supported by fdir filter");
1598 rule->b_mask = TRUE;
1600 (const struct rte_flow_item_ipv4 *)item->mask;
1601 if (ipv4_mask->hdr.version_ihl ||
1602 ipv4_mask->hdr.type_of_service ||
1603 ipv4_mask->hdr.total_length ||
1604 ipv4_mask->hdr.packet_id ||
1605 ipv4_mask->hdr.fragment_offset ||
1606 ipv4_mask->hdr.time_to_live ||
1607 ipv4_mask->hdr.next_proto_id ||
1608 ipv4_mask->hdr.hdr_checksum) {
1609 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1610 rte_flow_error_set(error, EINVAL,
1611 RTE_FLOW_ERROR_TYPE_ITEM,
1612 item, "Not supported by fdir filter");
1615 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1616 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1619 rule->b_spec = TRUE;
1621 (const struct rte_flow_item_ipv4 *)item->spec;
1622 rule->ixgbe_fdir.formatted.dst_ip[0] =
1623 ipv4_spec->hdr.dst_addr;
1624 rule->ixgbe_fdir.formatted.src_ip[0] =
1625 ipv4_spec->hdr.src_addr;
1629 * Check if the next not void item is
1630 * TCP or UDP or SCTP or END.
1633 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1634 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1635 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1636 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1637 item->type != RTE_FLOW_ITEM_TYPE_END &&
1638 item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1639 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1640 rte_flow_error_set(error, EINVAL,
1641 RTE_FLOW_ERROR_TYPE_ITEM,
1642 item, "Not supported by fdir filter");
1647 /* Get the TCP info. */
1648 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1650 * Set the flow type even if there's no content
1651 * as we must have a flow type.
1653 rule->ixgbe_fdir.formatted.flow_type =
1654 IXGBE_ATR_FLOW_TYPE_TCPV4;
1655 /*Not supported last point for range*/
1657 rte_flow_error_set(error, EINVAL,
1658 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1659 item, "Not supported last point for range");
1663 * Only care about src & dst ports,
1664 * others should be masked.
1667 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1668 rte_flow_error_set(error, EINVAL,
1669 RTE_FLOW_ERROR_TYPE_ITEM,
1670 item, "Not supported by fdir filter");
1673 rule->b_mask = TRUE;
1674 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1675 if (tcp_mask->hdr.sent_seq ||
1676 tcp_mask->hdr.recv_ack ||
1677 tcp_mask->hdr.data_off ||
1678 tcp_mask->hdr.tcp_flags ||
1679 tcp_mask->hdr.rx_win ||
1680 tcp_mask->hdr.cksum ||
1681 tcp_mask->hdr.tcp_urp) {
1682 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1683 rte_flow_error_set(error, EINVAL,
1684 RTE_FLOW_ERROR_TYPE_ITEM,
1685 item, "Not supported by fdir filter");
1688 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1689 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1692 rule->b_spec = TRUE;
1693 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1694 rule->ixgbe_fdir.formatted.src_port =
1695 tcp_spec->hdr.src_port;
1696 rule->ixgbe_fdir.formatted.dst_port =
1697 tcp_spec->hdr.dst_port;
1701 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1702 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1703 item->type != RTE_FLOW_ITEM_TYPE_END) {
1704 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1705 rte_flow_error_set(error, EINVAL,
1706 RTE_FLOW_ERROR_TYPE_ITEM,
1707 item, "Not supported by fdir filter");
1713 /* Get the UDP info */
1714 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1716 * Set the flow type even if there's no content
1717 * as we must have a flow type.
1719 rule->ixgbe_fdir.formatted.flow_type =
1720 IXGBE_ATR_FLOW_TYPE_UDPV4;
1721 /*Not supported last point for range*/
1723 rte_flow_error_set(error, EINVAL,
1724 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1725 item, "Not supported last point for range");
1729 * Only care about src & dst ports,
1730 * others should be masked.
1733 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1734 rte_flow_error_set(error, EINVAL,
1735 RTE_FLOW_ERROR_TYPE_ITEM,
1736 item, "Not supported by fdir filter");
1739 rule->b_mask = TRUE;
1740 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1741 if (udp_mask->hdr.dgram_len ||
1742 udp_mask->hdr.dgram_cksum) {
1743 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1744 rte_flow_error_set(error, EINVAL,
1745 RTE_FLOW_ERROR_TYPE_ITEM,
1746 item, "Not supported by fdir filter");
1749 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1750 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1753 rule->b_spec = TRUE;
1754 udp_spec = (const struct rte_flow_item_udp *)item->spec;
1755 rule->ixgbe_fdir.formatted.src_port =
1756 udp_spec->hdr.src_port;
1757 rule->ixgbe_fdir.formatted.dst_port =
1758 udp_spec->hdr.dst_port;
1762 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1763 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1764 item->type != RTE_FLOW_ITEM_TYPE_END) {
1765 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1766 rte_flow_error_set(error, EINVAL,
1767 RTE_FLOW_ERROR_TYPE_ITEM,
1768 item, "Not supported by fdir filter");
1774 /* Get the SCTP info */
1775 if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
1777 * Set the flow type even if there's no content
1778 * as we must have a flow type.
1780 rule->ixgbe_fdir.formatted.flow_type =
1781 IXGBE_ATR_FLOW_TYPE_SCTPV4;
1782 /*Not supported last point for range*/
1784 rte_flow_error_set(error, EINVAL,
1785 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1786 item, "Not supported last point for range");
1790 * Only care about src & dst ports,
1791 * others should be masked.
1794 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1795 rte_flow_error_set(error, EINVAL,
1796 RTE_FLOW_ERROR_TYPE_ITEM,
1797 item, "Not supported by fdir filter");
1800 rule->b_mask = TRUE;
1802 (const struct rte_flow_item_sctp *)item->mask;
1803 if (sctp_mask->hdr.tag ||
1804 sctp_mask->hdr.cksum) {
1805 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1806 rte_flow_error_set(error, EINVAL,
1807 RTE_FLOW_ERROR_TYPE_ITEM,
1808 item, "Not supported by fdir filter");
1811 rule->mask.src_port_mask = sctp_mask->hdr.src_port;
1812 rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
1815 rule->b_spec = TRUE;
1817 (const struct rte_flow_item_sctp *)item->spec;
1818 rule->ixgbe_fdir.formatted.src_port =
1819 sctp_spec->hdr.src_port;
1820 rule->ixgbe_fdir.formatted.dst_port =
1821 sctp_spec->hdr.dst_port;
1825 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1826 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1827 item->type != RTE_FLOW_ITEM_TYPE_END) {
1828 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1829 rte_flow_error_set(error, EINVAL,
1830 RTE_FLOW_ERROR_TYPE_ITEM,
1831 item, "Not supported by fdir filter");
1836 /* Get the flex byte info */
1837 if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
1838 /* Not supported last point for range*/
1840 rte_flow_error_set(error, EINVAL,
1841 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1842 item, "Not supported last point for range");
1845 /* mask should not be null */
1846 if (!item->mask || !item->spec) {
1847 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1848 rte_flow_error_set(error, EINVAL,
1849 RTE_FLOW_ERROR_TYPE_ITEM,
1850 item, "Not supported by fdir filter");
1854 raw_mask = (const struct rte_flow_item_raw *)item->mask;
1857 if (raw_mask->relative != 0x1 ||
1858 raw_mask->search != 0x1 ||
1859 raw_mask->reserved != 0x0 ||
1860 (uint32_t)raw_mask->offset != 0xffffffff ||
1861 raw_mask->limit != 0xffff ||
1862 raw_mask->length != 0xffff) {
1863 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1864 rte_flow_error_set(error, EINVAL,
1865 RTE_FLOW_ERROR_TYPE_ITEM,
1866 item, "Not supported by fdir filter");
1870 raw_spec = (const struct rte_flow_item_raw *)item->spec;
1873 if (raw_spec->relative != 0 ||
1874 raw_spec->search != 0 ||
1875 raw_spec->reserved != 0 ||
1876 raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
1877 raw_spec->offset % 2 ||
1878 raw_spec->limit != 0 ||
1879 raw_spec->length != 2 ||
1880 /* pattern can't be 0xffff */
1881 (raw_spec->pattern[0] == 0xff &&
1882 raw_spec->pattern[1] == 0xff)) {
1883 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1884 rte_flow_error_set(error, EINVAL,
1885 RTE_FLOW_ERROR_TYPE_ITEM,
1886 item, "Not supported by fdir filter");
1890 /* check pattern mask */
1891 if (raw_mask->pattern[0] != 0xff ||
1892 raw_mask->pattern[1] != 0xff) {
1893 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1894 rte_flow_error_set(error, EINVAL,
1895 RTE_FLOW_ERROR_TYPE_ITEM,
1896 item, "Not supported by fdir filter");
1900 rule->mask.flex_bytes_mask = 0xffff;
1901 rule->ixgbe_fdir.formatted.flex_bytes =
1902 (((uint16_t)raw_spec->pattern[1]) << 8) |
1903 raw_spec->pattern[0];
1904 rule->flex_bytes_offset = raw_spec->offset;
1907 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1908 /* check if the next not void item is END */
1910 NEXT_ITEM_OF_PATTERN(item, pattern, index);
1911 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1912 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1913 rte_flow_error_set(error, EINVAL,
1914 RTE_FLOW_ERROR_TYPE_ITEM,
1915 item, "Not supported by fdir filter");
1920 return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
1923 #define NVGRE_PROTOCOL 0x6558
1926 * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
1927 * And get the flow director filter info BTW.
1929 * The first not void item must be ETH.
1930 * The second not void item must be IPV4/ IPV6.
1931 * The third not void item must be NVGRE.
1932 * The next not void item must be END.
1934 * The first not void item must be ETH.
1935 * The second not void item must be IPV4/ IPV6.
1936 * The third not void item must be NVGRE.
1937 * The next not void item must be END.
1939 * The first not void action should be QUEUE or DROP.
1940 * The second not void optional action should be MARK,
1941 * mark_id is a uint32_t number.
1942 * The next not void action should be END.
1943 * VxLAN pattern example:
1946 * IPV4/IPV6 NULL NULL
1948 * VxLAN vni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
1949 * MAC VLAN tci 0x2016 0xEFFF
1951 * NEGRV pattern example:
1954 * IPV4/IPV6 NULL NULL
1955 * NVGRE protocol 0x6558 0xFFFF
1956 * tni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
1957 * MAC VLAN tci 0x2016 0xEFFF
1959 * other members in mask and spec should set to 0x00.
1960 * item->last should be NULL.
1963 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
1964 const struct rte_flow_item pattern[],
1965 const struct rte_flow_action actions[],
1966 struct ixgbe_fdir_rule *rule,
1967 struct rte_flow_error *error)
1969 const struct rte_flow_item *item;
1970 const struct rte_flow_item_vxlan *vxlan_spec;
1971 const struct rte_flow_item_vxlan *vxlan_mask;
1972 const struct rte_flow_item_nvgre *nvgre_spec;
1973 const struct rte_flow_item_nvgre *nvgre_mask;
1974 const struct rte_flow_item_eth *eth_spec;
1975 const struct rte_flow_item_eth *eth_mask;
1976 const struct rte_flow_item_vlan *vlan_spec;
1977 const struct rte_flow_item_vlan *vlan_mask;
1981 rte_flow_error_set(error, EINVAL,
1982 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1983 NULL, "NULL pattern.");
1988 rte_flow_error_set(error, EINVAL,
1989 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1990 NULL, "NULL action.");
1995 rte_flow_error_set(error, EINVAL,
1996 RTE_FLOW_ERROR_TYPE_ATTR,
1997 NULL, "NULL attribute.");
2002 * Some fields may not be provided. Set spec to 0 and mask to default
2003 * value. So, we need not do anything for the not provided fields later.
2005 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2006 memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
2007 rule->mask.vlan_tci_mask = 0;
2013 * The first not void item should be
2014 * MAC or IPv4 or IPv6 or UDP or VxLAN.
2016 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2017 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
2018 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2019 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
2020 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2021 item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
2022 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2023 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2024 rte_flow_error_set(error, EINVAL,
2025 RTE_FLOW_ERROR_TYPE_ITEM,
2026 item, "Not supported by fdir filter");
2030 rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
2033 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
2034 /* Only used to describe the protocol stack. */
2035 if (item->spec || item->mask) {
2036 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2037 rte_flow_error_set(error, EINVAL,
2038 RTE_FLOW_ERROR_TYPE_ITEM,
2039 item, "Not supported by fdir filter");
2042 /* Not supported last point for range*/
2044 rte_flow_error_set(error, EINVAL,
2045 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2046 item, "Not supported last point for range");
2050 /* Check if the next not void item is IPv4 or IPv6. */
2052 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2053 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2054 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2055 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2056 rte_flow_error_set(error, EINVAL,
2057 RTE_FLOW_ERROR_TYPE_ITEM,
2058 item, "Not supported by fdir filter");
2064 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2065 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2066 /* Only used to describe the protocol stack. */
2067 if (item->spec || item->mask) {
2068 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2069 rte_flow_error_set(error, EINVAL,
2070 RTE_FLOW_ERROR_TYPE_ITEM,
2071 item, "Not supported by fdir filter");
2074 /*Not supported last point for range*/
2076 rte_flow_error_set(error, EINVAL,
2077 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2078 item, "Not supported last point for range");
2082 /* Check if the next not void item is UDP or NVGRE. */
2084 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2085 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2086 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2087 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2088 rte_flow_error_set(error, EINVAL,
2089 RTE_FLOW_ERROR_TYPE_ITEM,
2090 item, "Not supported by fdir filter");
2096 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2097 /* Only used to describe the protocol stack. */
2098 if (item->spec || item->mask) {
2099 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2100 rte_flow_error_set(error, EINVAL,
2101 RTE_FLOW_ERROR_TYPE_ITEM,
2102 item, "Not supported by fdir filter");
2105 /*Not supported last point for range*/
2107 rte_flow_error_set(error, EINVAL,
2108 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2109 item, "Not supported last point for range");
2113 /* Check if the next not void item is VxLAN. */
2115 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2116 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2117 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2118 rte_flow_error_set(error, EINVAL,
2119 RTE_FLOW_ERROR_TYPE_ITEM,
2120 item, "Not supported by fdir filter");
2125 /* Get the VxLAN info */
2126 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2127 rule->ixgbe_fdir.formatted.tunnel_type =
2128 RTE_FDIR_TUNNEL_TYPE_VXLAN;
2130 /* Only care about VNI, others should be masked. */
2132 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2133 rte_flow_error_set(error, EINVAL,
2134 RTE_FLOW_ERROR_TYPE_ITEM,
2135 item, "Not supported by fdir filter");
2138 /*Not supported last point for range*/
2140 rte_flow_error_set(error, EINVAL,
2141 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2142 item, "Not supported last point for range");
2145 rule->b_mask = TRUE;
2147 /* Tunnel type is always meaningful. */
2148 rule->mask.tunnel_type_mask = 1;
2151 (const struct rte_flow_item_vxlan *)item->mask;
2152 if (vxlan_mask->flags) {
2153 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2154 rte_flow_error_set(error, EINVAL,
2155 RTE_FLOW_ERROR_TYPE_ITEM,
2156 item, "Not supported by fdir filter");
2159 /* VNI must be totally masked or not. */
2160 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2161 vxlan_mask->vni[2]) &&
2162 ((vxlan_mask->vni[0] != 0xFF) ||
2163 (vxlan_mask->vni[1] != 0xFF) ||
2164 (vxlan_mask->vni[2] != 0xFF))) {
2165 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2166 rte_flow_error_set(error, EINVAL,
2167 RTE_FLOW_ERROR_TYPE_ITEM,
2168 item, "Not supported by fdir filter");
2172 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2173 RTE_DIM(vxlan_mask->vni));
2176 rule->b_spec = TRUE;
2177 vxlan_spec = (const struct rte_flow_item_vxlan *)
2179 rte_memcpy(((uint8_t *)
2180 &rule->ixgbe_fdir.formatted.tni_vni + 1),
2181 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2182 rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
2183 rule->ixgbe_fdir.formatted.tni_vni);
2187 /* Get the NVGRE info */
2188 if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2189 rule->ixgbe_fdir.formatted.tunnel_type =
2190 RTE_FDIR_TUNNEL_TYPE_NVGRE;
2193 * Only care about flags0, flags1, protocol and TNI,
2194 * others should be masked.
2197 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2198 rte_flow_error_set(error, EINVAL,
2199 RTE_FLOW_ERROR_TYPE_ITEM,
2200 item, "Not supported by fdir filter");
2203 /*Not supported last point for range*/
2205 rte_flow_error_set(error, EINVAL,
2206 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2207 item, "Not supported last point for range");
2210 rule->b_mask = TRUE;
2212 /* Tunnel type is always meaningful. */
2213 rule->mask.tunnel_type_mask = 1;
2216 (const struct rte_flow_item_nvgre *)item->mask;
2217 if (nvgre_mask->flow_id) {
2218 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2219 rte_flow_error_set(error, EINVAL,
2220 RTE_FLOW_ERROR_TYPE_ITEM,
2221 item, "Not supported by fdir filter");
2224 if (nvgre_mask->c_k_s_rsvd0_ver !=
2225 rte_cpu_to_be_16(0x3000) ||
2226 nvgre_mask->protocol != 0xFFFF) {
2227 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2228 rte_flow_error_set(error, EINVAL,
2229 RTE_FLOW_ERROR_TYPE_ITEM,
2230 item, "Not supported by fdir filter");
2233 /* TNI must be totally masked or not. */
2234 if (nvgre_mask->tni[0] &&
2235 ((nvgre_mask->tni[0] != 0xFF) ||
2236 (nvgre_mask->tni[1] != 0xFF) ||
2237 (nvgre_mask->tni[2] != 0xFF))) {
2238 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2239 rte_flow_error_set(error, EINVAL,
2240 RTE_FLOW_ERROR_TYPE_ITEM,
2241 item, "Not supported by fdir filter");
2244 /* tni is a 24-bits bit field */
2245 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2246 RTE_DIM(nvgre_mask->tni));
2247 rule->mask.tunnel_id_mask <<= 8;
2250 rule->b_spec = TRUE;
2252 (const struct rte_flow_item_nvgre *)item->spec;
2253 if (nvgre_spec->c_k_s_rsvd0_ver !=
2254 rte_cpu_to_be_16(0x2000) ||
2255 nvgre_spec->protocol !=
2256 rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2257 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2258 rte_flow_error_set(error, EINVAL,
2259 RTE_FLOW_ERROR_TYPE_ITEM,
2260 item, "Not supported by fdir filter");
2263 /* tni is a 24-bits bit field */
2264 rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2265 nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2266 rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2270 /* check if the next not void item is MAC */
2272 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2273 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2274 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2275 rte_flow_error_set(error, EINVAL,
2276 RTE_FLOW_ERROR_TYPE_ITEM,
2277 item, "Not supported by fdir filter");
2282 * Only support vlan and dst MAC address,
2283 * others should be masked.
2287 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2288 rte_flow_error_set(error, EINVAL,
2289 RTE_FLOW_ERROR_TYPE_ITEM,
2290 item, "Not supported by fdir filter");
2293 /*Not supported last point for range*/
2295 rte_flow_error_set(error, EINVAL,
2296 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2297 item, "Not supported last point for range");
2300 rule->b_mask = TRUE;
2301 eth_mask = (const struct rte_flow_item_eth *)item->mask;
2303 /* Ether type should be masked. */
2304 if (eth_mask->type) {
2305 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2306 rte_flow_error_set(error, EINVAL,
2307 RTE_FLOW_ERROR_TYPE_ITEM,
2308 item, "Not supported by fdir filter");
2312 /* src MAC address should be masked. */
2313 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2314 if (eth_mask->src.addr_bytes[j]) {
2316 sizeof(struct ixgbe_fdir_rule));
2317 rte_flow_error_set(error, EINVAL,
2318 RTE_FLOW_ERROR_TYPE_ITEM,
2319 item, "Not supported by fdir filter");
2323 rule->mask.mac_addr_byte_mask = 0;
2324 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2325 /* It's a per byte mask. */
2326 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2327 rule->mask.mac_addr_byte_mask |= 0x1 << j;
2328 } else if (eth_mask->dst.addr_bytes[j]) {
2329 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2330 rte_flow_error_set(error, EINVAL,
2331 RTE_FLOW_ERROR_TYPE_ITEM,
2332 item, "Not supported by fdir filter");
2337 /* When no vlan, considered as full mask. */
2338 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2341 rule->b_spec = TRUE;
2342 eth_spec = (const struct rte_flow_item_eth *)item->spec;
2344 /* Get the dst MAC. */
2345 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2346 rule->ixgbe_fdir.formatted.inner_mac[j] =
2347 eth_spec->dst.addr_bytes[j];
2352 * Check if the next not void item is vlan or ipv4.
2353 * IPv6 is not supported.
2356 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2357 if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2358 (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
2359 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2360 rte_flow_error_set(error, EINVAL,
2361 RTE_FLOW_ERROR_TYPE_ITEM,
2362 item, "Not supported by fdir filter");
2365 /*Not supported last point for range*/
2367 rte_flow_error_set(error, EINVAL,
2368 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2369 item, "Not supported last point for range");
2373 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2374 if (!(item->spec && item->mask)) {
2375 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2376 rte_flow_error_set(error, EINVAL,
2377 RTE_FLOW_ERROR_TYPE_ITEM,
2378 item, "Not supported by fdir filter");
2382 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
2383 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
2385 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2387 rule->mask.vlan_tci_mask = vlan_mask->tci;
2388 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
2389 /* More than one tags are not supported. */
2391 /* check if the next not void item is END */
2393 NEXT_ITEM_OF_PATTERN(item, pattern, index);
2395 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2396 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2397 rte_flow_error_set(error, EINVAL,
2398 RTE_FLOW_ERROR_TYPE_ITEM,
2399 item, "Not supported by fdir filter");
2405 * If the tags is 0, it means don't care about the VLAN.
2409 return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2413 ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2414 const struct rte_flow_attr *attr,
2415 const struct rte_flow_item pattern[],
2416 const struct rte_flow_action actions[],
2417 struct ixgbe_fdir_rule *rule,
2418 struct rte_flow_error *error)
2421 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2422 enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2424 if (hw->mac.type != ixgbe_mac_82599EB &&
2425 hw->mac.type != ixgbe_mac_X540 &&
2426 hw->mac.type != ixgbe_mac_X550 &&
2427 hw->mac.type != ixgbe_mac_X550EM_x &&
2428 hw->mac.type != ixgbe_mac_X550EM_a)
2431 ret = ixgbe_parse_fdir_filter_normal(attr, pattern,
2432 actions, rule, error);
2437 ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2438 actions, rule, error);
2441 if (fdir_mode == RTE_FDIR_MODE_NONE ||
2442 fdir_mode != rule->mode)
2448 ixgbe_filterlist_flush(void)
2450 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2451 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2452 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2453 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2454 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2455 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2457 while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2458 TAILQ_REMOVE(&filter_ntuple_list,
2461 rte_free(ntuple_filter_ptr);
2464 while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2465 TAILQ_REMOVE(&filter_ethertype_list,
2466 ethertype_filter_ptr,
2468 rte_free(ethertype_filter_ptr);
2471 while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2472 TAILQ_REMOVE(&filter_syn_list,
2475 rte_free(syn_filter_ptr);
2478 while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2479 TAILQ_REMOVE(&filter_l2_tunnel_list,
2482 rte_free(l2_tn_filter_ptr);
2485 while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2486 TAILQ_REMOVE(&filter_fdir_list,
2489 rte_free(fdir_rule_ptr);
2492 while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
2493 TAILQ_REMOVE(&ixgbe_flow_list,
2496 rte_free(ixgbe_flow_mem_ptr->flow);
2497 rte_free(ixgbe_flow_mem_ptr);
2502 * Create or destroy a flow rule.
2503 * Theorically one rule can match more than one filters.
2504 * We will let it use the filter which it hitt first.
2505 * So, the sequence matters.
2507 static struct rte_flow *
2508 ixgbe_flow_create(struct rte_eth_dev *dev,
2509 const struct rte_flow_attr *attr,
2510 const struct rte_flow_item pattern[],
2511 const struct rte_flow_action actions[],
2512 struct rte_flow_error *error)
2515 struct rte_eth_ntuple_filter ntuple_filter;
2516 struct rte_eth_ethertype_filter ethertype_filter;
2517 struct rte_eth_syn_filter syn_filter;
2518 struct ixgbe_fdir_rule fdir_rule;
2519 struct rte_eth_l2_tunnel_conf l2_tn_filter;
2520 struct ixgbe_hw_fdir_info *fdir_info =
2521 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2522 struct rte_flow *flow = NULL;
2523 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2524 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2525 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2526 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2527 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2528 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2530 flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
2532 PMD_DRV_LOG(ERR, "failed to allocate memory");
2533 return (struct rte_flow *)flow;
2535 ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
2536 sizeof(struct ixgbe_flow_mem), 0);
2537 if (!ixgbe_flow_mem_ptr) {
2538 PMD_DRV_LOG(ERR, "failed to allocate memory");
2542 ixgbe_flow_mem_ptr->flow = flow;
2543 TAILQ_INSERT_TAIL(&ixgbe_flow_list,
2544 ixgbe_flow_mem_ptr, entries);
2546 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2547 ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2548 actions, &ntuple_filter, error);
2550 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2552 ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
2553 sizeof(struct ixgbe_ntuple_filter_ele), 0);
2554 (void)rte_memcpy(&ntuple_filter_ptr->filter_info,
2556 sizeof(struct rte_eth_ntuple_filter));
2557 TAILQ_INSERT_TAIL(&filter_ntuple_list,
2558 ntuple_filter_ptr, entries);
2559 flow->rule = ntuple_filter_ptr;
2560 flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2566 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2567 ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2568 actions, ðertype_filter, error);
2570 ret = ixgbe_add_del_ethertype_filter(dev,
2571 ðertype_filter, TRUE);
2573 ethertype_filter_ptr = rte_zmalloc(
2574 "ixgbe_ethertype_filter",
2575 sizeof(struct ixgbe_ethertype_filter_ele), 0);
2576 (void)rte_memcpy(ðertype_filter_ptr->filter_info,
2578 sizeof(struct rte_eth_ethertype_filter));
2579 TAILQ_INSERT_TAIL(&filter_ethertype_list,
2580 ethertype_filter_ptr, entries);
2581 flow->rule = ethertype_filter_ptr;
2582 flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2588 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2589 ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2590 actions, &syn_filter, error);
2592 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
2594 syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
2595 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
2596 (void)rte_memcpy(&syn_filter_ptr->filter_info,
2598 sizeof(struct rte_eth_syn_filter));
2599 TAILQ_INSERT_TAIL(&filter_syn_list,
2602 flow->rule = syn_filter_ptr;
2603 flow->filter_type = RTE_ETH_FILTER_SYN;
2609 memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2610 ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2611 actions, &fdir_rule, error);
2613 /* A mask cannot be deleted. */
2614 if (fdir_rule.b_mask) {
2615 if (!fdir_info->mask_added) {
2616 /* It's the first time the mask is set. */
2617 rte_memcpy(&fdir_info->mask,
2619 sizeof(struct ixgbe_hw_fdir_mask));
2620 fdir_info->flex_bytes_offset =
2621 fdir_rule.flex_bytes_offset;
2623 if (fdir_rule.mask.flex_bytes_mask)
2624 ixgbe_fdir_set_flexbytes_offset(dev,
2625 fdir_rule.flex_bytes_offset);
2627 ret = ixgbe_fdir_set_input_mask(dev);
2631 fdir_info->mask_added = TRUE;
2634 * Only support one global mask,
2635 * all the masks should be the same.
2637 ret = memcmp(&fdir_info->mask,
2639 sizeof(struct ixgbe_hw_fdir_mask));
2643 if (fdir_info->flex_bytes_offset !=
2644 fdir_rule.flex_bytes_offset)
2649 if (fdir_rule.b_spec) {
2650 ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
2653 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
2654 sizeof(struct ixgbe_fdir_rule_ele), 0);
2655 (void)rte_memcpy(&fdir_rule_ptr->filter_info,
2657 sizeof(struct ixgbe_fdir_rule));
2658 TAILQ_INSERT_TAIL(&filter_fdir_list,
2659 fdir_rule_ptr, entries);
2660 flow->rule = fdir_rule_ptr;
2661 flow->filter_type = RTE_ETH_FILTER_FDIR;
2673 memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2674 ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2675 actions, &l2_tn_filter, error);
2677 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
2679 l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
2680 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
2681 (void)rte_memcpy(&l2_tn_filter_ptr->filter_info,
2683 sizeof(struct rte_eth_l2_tunnel_conf));
2684 TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
2685 l2_tn_filter_ptr, entries);
2686 flow->rule = l2_tn_filter_ptr;
2687 flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
2693 TAILQ_REMOVE(&ixgbe_flow_list,
2694 ixgbe_flow_mem_ptr, entries);
2695 rte_flow_error_set(error, -ret,
2696 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2697 "Failed to create flow.");
2698 rte_free(ixgbe_flow_mem_ptr);
2704 * Check if the flow rule is supported by ixgbe.
2705 * It only checkes the format. Don't guarantee the rule can be programmed into
2706 * the HW. Because there can be no enough room for the rule.
2709 ixgbe_flow_validate(struct rte_eth_dev *dev,
2710 const struct rte_flow_attr *attr,
2711 const struct rte_flow_item pattern[],
2712 const struct rte_flow_action actions[],
2713 struct rte_flow_error *error)
2715 struct rte_eth_ntuple_filter ntuple_filter;
2716 struct rte_eth_ethertype_filter ethertype_filter;
2717 struct rte_eth_syn_filter syn_filter;
2718 struct rte_eth_l2_tunnel_conf l2_tn_filter;
2719 struct ixgbe_fdir_rule fdir_rule;
2722 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2723 ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2724 actions, &ntuple_filter, error);
2728 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2729 ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2730 actions, ðertype_filter, error);
2734 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2735 ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2736 actions, &syn_filter, error);
2740 memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2741 ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2742 actions, &fdir_rule, error);
2746 memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2747 ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2748 actions, &l2_tn_filter, error);
2753 /* Destroy a flow rule on ixgbe. */
2755 ixgbe_flow_destroy(struct rte_eth_dev *dev,
2756 struct rte_flow *flow,
2757 struct rte_flow_error *error)
2760 struct rte_flow *pmd_flow = flow;
2761 enum rte_filter_type filter_type = pmd_flow->filter_type;
2762 struct rte_eth_ntuple_filter ntuple_filter;
2763 struct rte_eth_ethertype_filter ethertype_filter;
2764 struct rte_eth_syn_filter syn_filter;
2765 struct ixgbe_fdir_rule fdir_rule;
2766 struct rte_eth_l2_tunnel_conf l2_tn_filter;
2767 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2768 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2769 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2770 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2771 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2772 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2773 struct ixgbe_hw_fdir_info *fdir_info =
2774 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2776 switch (filter_type) {
2777 case RTE_ETH_FILTER_NTUPLE:
2778 ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
2780 (void)rte_memcpy(&ntuple_filter,
2781 &ntuple_filter_ptr->filter_info,
2782 sizeof(struct rte_eth_ntuple_filter));
2783 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
2785 TAILQ_REMOVE(&filter_ntuple_list,
2786 ntuple_filter_ptr, entries);
2787 rte_free(ntuple_filter_ptr);
2790 case RTE_ETH_FILTER_ETHERTYPE:
2791 ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
2793 (void)rte_memcpy(ðertype_filter,
2794 ðertype_filter_ptr->filter_info,
2795 sizeof(struct rte_eth_ethertype_filter));
2796 ret = ixgbe_add_del_ethertype_filter(dev,
2797 ðertype_filter, FALSE);
2799 TAILQ_REMOVE(&filter_ethertype_list,
2800 ethertype_filter_ptr, entries);
2801 rte_free(ethertype_filter_ptr);
2804 case RTE_ETH_FILTER_SYN:
2805 syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
2807 (void)rte_memcpy(&syn_filter,
2808 &syn_filter_ptr->filter_info,
2809 sizeof(struct rte_eth_syn_filter));
2810 ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
2812 TAILQ_REMOVE(&filter_syn_list,
2813 syn_filter_ptr, entries);
2814 rte_free(syn_filter_ptr);
2817 case RTE_ETH_FILTER_FDIR:
2818 fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
2819 (void)rte_memcpy(&fdir_rule,
2820 &fdir_rule_ptr->filter_info,
2821 sizeof(struct ixgbe_fdir_rule));
2822 ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
2824 TAILQ_REMOVE(&filter_fdir_list,
2825 fdir_rule_ptr, entries);
2826 rte_free(fdir_rule_ptr);
2827 if (TAILQ_EMPTY(&filter_fdir_list))
2828 fdir_info->mask_added = false;
2831 case RTE_ETH_FILTER_L2_TUNNEL:
2832 l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
2834 (void)rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
2835 sizeof(struct rte_eth_l2_tunnel_conf));
2836 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
2838 TAILQ_REMOVE(&filter_l2_tunnel_list,
2839 l2_tn_filter_ptr, entries);
2840 rte_free(l2_tn_filter_ptr);
2844 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
2851 rte_flow_error_set(error, EINVAL,
2852 RTE_FLOW_ERROR_TYPE_HANDLE,
2853 NULL, "Failed to destroy flow");
2857 TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
2858 if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
2859 TAILQ_REMOVE(&ixgbe_flow_list,
2860 ixgbe_flow_mem_ptr, entries);
2861 rte_free(ixgbe_flow_mem_ptr);
2869 /* Destroy all flow rules associated with a port on ixgbe. */
2871 ixgbe_flow_flush(struct rte_eth_dev *dev,
2872 struct rte_flow_error *error)
2876 ixgbe_clear_all_ntuple_filter(dev);
2877 ixgbe_clear_all_ethertype_filter(dev);
2878 ixgbe_clear_syn_filter(dev);
2880 ret = ixgbe_clear_all_fdir_filter(dev);
2882 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
2883 NULL, "Failed to flush rule");
2887 ret = ixgbe_clear_all_l2_tn_filter(dev);
2889 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
2890 NULL, "Failed to flush rule");
2894 ixgbe_filterlist_flush();
2899 const struct rte_flow_ops ixgbe_flow_ops = {
2900 .validate = ixgbe_flow_validate,
2901 .create = ixgbe_flow_create,
2902 .destroy = ixgbe_flow_destroy,
2903 .flush = ixgbe_flow_flush,