4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
47 #include <rte_interrupts.h>
49 #include <rte_debug.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_atomic.h>
60 #include <rte_malloc.h>
61 #include <rte_random.h>
63 #include <rte_hash_crc.h>
65 #include <rte_flow_driver.h>
67 #include "ixgbe_logs.h"
68 #include "base/ixgbe_api.h"
69 #include "base/ixgbe_vf.h"
70 #include "base/ixgbe_common.h"
71 #include "ixgbe_ethdev.h"
72 #include "ixgbe_bypass.h"
73 #include "ixgbe_rxtx.h"
74 #include "base/ixgbe_type.h"
75 #include "base/ixgbe_phy.h"
76 #include "rte_pmd_ixgbe.h"
79 #define IXGBE_MIN_N_TUPLE_PRIO 1
80 #define IXGBE_MAX_N_TUPLE_PRIO 7
81 #define IXGBE_MAX_FLX_SOURCE_OFF 62
84 * Endless loop will never happen with below assumption
85 * 1. there is at least one no-void item(END)
86 * 2. cur is before END.
89 const struct rte_flow_item *next_no_void_pattern(
90 const struct rte_flow_item pattern[],
91 const struct rte_flow_item *cur)
93 const struct rte_flow_item *next =
94 cur ? cur + 1 : &pattern[0];
96 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
103 const struct rte_flow_action *next_no_void_action(
104 const struct rte_flow_action actions[],
105 const struct rte_flow_action *cur)
107 const struct rte_flow_action *next =
108 cur ? cur + 1 : &actions[0];
110 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
117 * Please aware there's an asumption for all the parsers.
118 * rte_flow_item is using big endian, rte_flow_attr and
119 * rte_flow_action are using CPU order.
120 * Because the pattern is used to describe the packets,
121 * normally the packets should use network order.
125 * Parse the rule to see if it is a n-tuple rule.
126 * And get the n-tuple filter info BTW.
128 * The first not void item can be ETH or IPV4.
129 * The second not void item must be IPV4 if the first one is ETH.
130 * The third not void item must be UDP or TCP.
131 * The next not void item must be END.
133 * The first not void action should be QUEUE.
134 * The next not void action should be END.
138 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
139 * dst_addr 192.167.3.50 0xFFFFFFFF
140 * next_proto_id 17 0xFF
141 * UDP/TCP/ src_port 80 0xFFFF
142 * SCTP dst_port 80 0xFFFF
144 * other members in mask and spec should set to 0x00.
145 * item->last should be NULL.
148 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
149 const struct rte_flow_item pattern[],
150 const struct rte_flow_action actions[],
151 struct rte_eth_ntuple_filter *filter,
152 struct rte_flow_error *error)
154 const struct rte_flow_item *item;
155 const struct rte_flow_action *act;
156 const struct rte_flow_item_ipv4 *ipv4_spec;
157 const struct rte_flow_item_ipv4 *ipv4_mask;
158 const struct rte_flow_item_tcp *tcp_spec;
159 const struct rte_flow_item_tcp *tcp_mask;
160 const struct rte_flow_item_udp *udp_spec;
161 const struct rte_flow_item_udp *udp_mask;
162 const struct rte_flow_item_sctp *sctp_spec;
163 const struct rte_flow_item_sctp *sctp_mask;
166 rte_flow_error_set(error,
167 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
168 NULL, "NULL pattern.");
173 rte_flow_error_set(error, EINVAL,
174 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
175 NULL, "NULL action.");
179 rte_flow_error_set(error, EINVAL,
180 RTE_FLOW_ERROR_TYPE_ATTR,
181 NULL, "NULL attribute.");
185 /* the first not void item can be MAC or IPv4 */
186 item = next_no_void_pattern(pattern, NULL);
188 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
189 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
190 rte_flow_error_set(error, EINVAL,
191 RTE_FLOW_ERROR_TYPE_ITEM,
192 item, "Not supported by ntuple filter");
196 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
197 /*Not supported last point for range*/
199 rte_flow_error_set(error,
201 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
202 item, "Not supported last point for range");
206 /* if the first item is MAC, the content should be NULL */
207 if (item->spec || item->mask) {
208 rte_flow_error_set(error, EINVAL,
209 RTE_FLOW_ERROR_TYPE_ITEM,
210 item, "Not supported by ntuple filter");
213 /* check if the next not void item is IPv4 */
214 item = next_no_void_pattern(pattern, item);
215 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
216 rte_flow_error_set(error,
217 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
218 item, "Not supported by ntuple filter");
223 /* get the IPv4 info */
224 if (!item->spec || !item->mask) {
225 rte_flow_error_set(error, EINVAL,
226 RTE_FLOW_ERROR_TYPE_ITEM,
227 item, "Invalid ntuple mask");
230 /*Not supported last point for range*/
232 rte_flow_error_set(error, EINVAL,
233 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
234 item, "Not supported last point for range");
239 ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
241 * Only support src & dst addresses, protocol,
242 * others should be masked.
244 if (ipv4_mask->hdr.version_ihl ||
245 ipv4_mask->hdr.type_of_service ||
246 ipv4_mask->hdr.total_length ||
247 ipv4_mask->hdr.packet_id ||
248 ipv4_mask->hdr.fragment_offset ||
249 ipv4_mask->hdr.time_to_live ||
250 ipv4_mask->hdr.hdr_checksum) {
251 rte_flow_error_set(error,
252 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
253 item, "Not supported by ntuple filter");
257 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
258 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
259 filter->proto_mask = ipv4_mask->hdr.next_proto_id;
261 ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
262 filter->dst_ip = ipv4_spec->hdr.dst_addr;
263 filter->src_ip = ipv4_spec->hdr.src_addr;
264 filter->proto = ipv4_spec->hdr.next_proto_id;
266 /* check if the next not void item is TCP or UDP */
267 item = next_no_void_pattern(pattern, item);
268 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
269 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
270 item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
271 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
272 rte_flow_error_set(error, EINVAL,
273 RTE_FLOW_ERROR_TYPE_ITEM,
274 item, "Not supported by ntuple filter");
278 /* get the TCP/UDP info */
279 if (!item->spec || !item->mask) {
280 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
281 rte_flow_error_set(error, EINVAL,
282 RTE_FLOW_ERROR_TYPE_ITEM,
283 item, "Invalid ntuple mask");
287 /*Not supported last point for range*/
289 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
290 rte_flow_error_set(error, EINVAL,
291 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
292 item, "Not supported last point for range");
297 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
298 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
301 * Only support src & dst ports, tcp flags,
302 * others should be masked.
304 if (tcp_mask->hdr.sent_seq ||
305 tcp_mask->hdr.recv_ack ||
306 tcp_mask->hdr.data_off ||
307 tcp_mask->hdr.rx_win ||
308 tcp_mask->hdr.cksum ||
309 tcp_mask->hdr.tcp_urp) {
311 sizeof(struct rte_eth_ntuple_filter));
312 rte_flow_error_set(error, EINVAL,
313 RTE_FLOW_ERROR_TYPE_ITEM,
314 item, "Not supported by ntuple filter");
318 filter->dst_port_mask = tcp_mask->hdr.dst_port;
319 filter->src_port_mask = tcp_mask->hdr.src_port;
320 if (tcp_mask->hdr.tcp_flags == 0xFF) {
321 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
322 } else if (!tcp_mask->hdr.tcp_flags) {
323 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
325 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
326 rte_flow_error_set(error, EINVAL,
327 RTE_FLOW_ERROR_TYPE_ITEM,
328 item, "Not supported by ntuple filter");
332 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
333 filter->dst_port = tcp_spec->hdr.dst_port;
334 filter->src_port = tcp_spec->hdr.src_port;
335 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
336 } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
337 udp_mask = (const struct rte_flow_item_udp *)item->mask;
340 * Only support src & dst ports,
341 * others should be masked.
343 if (udp_mask->hdr.dgram_len ||
344 udp_mask->hdr.dgram_cksum) {
346 sizeof(struct rte_eth_ntuple_filter));
347 rte_flow_error_set(error, EINVAL,
348 RTE_FLOW_ERROR_TYPE_ITEM,
349 item, "Not supported by ntuple filter");
353 filter->dst_port_mask = udp_mask->hdr.dst_port;
354 filter->src_port_mask = udp_mask->hdr.src_port;
356 udp_spec = (const struct rte_flow_item_udp *)item->spec;
357 filter->dst_port = udp_spec->hdr.dst_port;
358 filter->src_port = udp_spec->hdr.src_port;
360 sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
363 * Only support src & dst ports,
364 * others should be masked.
366 if (sctp_mask->hdr.tag ||
367 sctp_mask->hdr.cksum) {
369 sizeof(struct rte_eth_ntuple_filter));
370 rte_flow_error_set(error, EINVAL,
371 RTE_FLOW_ERROR_TYPE_ITEM,
372 item, "Not supported by ntuple filter");
376 filter->dst_port_mask = sctp_mask->hdr.dst_port;
377 filter->src_port_mask = sctp_mask->hdr.src_port;
379 sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
380 filter->dst_port = sctp_spec->hdr.dst_port;
381 filter->src_port = sctp_spec->hdr.src_port;
384 /* check if the next not void item is END */
385 item = next_no_void_pattern(pattern, item);
386 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
387 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
388 rte_flow_error_set(error, EINVAL,
389 RTE_FLOW_ERROR_TYPE_ITEM,
390 item, "Not supported by ntuple filter");
395 * n-tuple only supports forwarding,
396 * check if the first not void action is QUEUE.
398 act = next_no_void_action(actions, NULL);
399 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
400 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
401 rte_flow_error_set(error, EINVAL,
402 RTE_FLOW_ERROR_TYPE_ACTION,
403 item, "Not supported action.");
407 ((const struct rte_flow_action_queue *)act->conf)->index;
409 /* check if the next not void item is END */
410 act = next_no_void_action(actions, act);
411 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
412 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
413 rte_flow_error_set(error, EINVAL,
414 RTE_FLOW_ERROR_TYPE_ACTION,
415 act, "Not supported action.");
420 /* must be input direction */
421 if (!attr->ingress) {
422 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
423 rte_flow_error_set(error, EINVAL,
424 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
425 attr, "Only support ingress.");
431 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
432 rte_flow_error_set(error, EINVAL,
433 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
434 attr, "Not support egress.");
438 if (attr->priority > 0xFFFF) {
439 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
440 rte_flow_error_set(error, EINVAL,
441 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
442 attr, "Error priority.");
445 filter->priority = (uint16_t)attr->priority;
446 if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
447 attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
448 filter->priority = 1;
453 /* a specific function for ixgbe because the flags is specific */
455 ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
456 const struct rte_flow_attr *attr,
457 const struct rte_flow_item pattern[],
458 const struct rte_flow_action actions[],
459 struct rte_eth_ntuple_filter *filter,
460 struct rte_flow_error *error)
463 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
465 MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
467 ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
472 /* Ixgbe doesn't support tcp flags. */
473 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
474 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
475 rte_flow_error_set(error, EINVAL,
476 RTE_FLOW_ERROR_TYPE_ITEM,
477 NULL, "Not supported by ntuple filter");
481 /* Ixgbe doesn't support many priorities. */
482 if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
483 filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
484 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
485 rte_flow_error_set(error, EINVAL,
486 RTE_FLOW_ERROR_TYPE_ITEM,
487 NULL, "Priority not supported by ntuple filter");
491 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
492 filter->priority > IXGBE_5TUPLE_MAX_PRI ||
493 filter->priority < IXGBE_5TUPLE_MIN_PRI)
496 /* fixed value for ixgbe */
497 filter->flags = RTE_5TUPLE_FLAGS;
502 * Parse the rule to see if it is a ethertype rule.
503 * And get the ethertype filter info BTW.
505 * The first not void item can be ETH.
506 * The next not void item must be END.
508 * The first not void action should be QUEUE.
509 * The next not void action should be END.
512 * ETH type 0x0807 0xFFFF
514 * other members in mask and spec should set to 0x00.
515 * item->last should be NULL.
518 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
519 const struct rte_flow_item *pattern,
520 const struct rte_flow_action *actions,
521 struct rte_eth_ethertype_filter *filter,
522 struct rte_flow_error *error)
524 const struct rte_flow_item *item;
525 const struct rte_flow_action *act;
526 const struct rte_flow_item_eth *eth_spec;
527 const struct rte_flow_item_eth *eth_mask;
528 const struct rte_flow_action_queue *act_q;
531 rte_flow_error_set(error, EINVAL,
532 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
533 NULL, "NULL pattern.");
538 rte_flow_error_set(error, EINVAL,
539 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
540 NULL, "NULL action.");
545 rte_flow_error_set(error, EINVAL,
546 RTE_FLOW_ERROR_TYPE_ATTR,
547 NULL, "NULL attribute.");
551 item = next_no_void_pattern(pattern, NULL);
552 /* The first non-void item should be MAC. */
553 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
554 rte_flow_error_set(error, EINVAL,
555 RTE_FLOW_ERROR_TYPE_ITEM,
556 item, "Not supported by ethertype filter");
560 /*Not supported last point for range*/
562 rte_flow_error_set(error, EINVAL,
563 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
564 item, "Not supported last point for range");
568 /* Get the MAC info. */
569 if (!item->spec || !item->mask) {
570 rte_flow_error_set(error, EINVAL,
571 RTE_FLOW_ERROR_TYPE_ITEM,
572 item, "Not supported by ethertype filter");
576 eth_spec = (const struct rte_flow_item_eth *)item->spec;
577 eth_mask = (const struct rte_flow_item_eth *)item->mask;
579 /* Mask bits of source MAC address must be full of 0.
580 * Mask bits of destination MAC address must be full
583 if (!is_zero_ether_addr(ð_mask->src) ||
584 (!is_zero_ether_addr(ð_mask->dst) &&
585 !is_broadcast_ether_addr(ð_mask->dst))) {
586 rte_flow_error_set(error, EINVAL,
587 RTE_FLOW_ERROR_TYPE_ITEM,
588 item, "Invalid ether address mask");
592 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
593 rte_flow_error_set(error, EINVAL,
594 RTE_FLOW_ERROR_TYPE_ITEM,
595 item, "Invalid ethertype mask");
599 /* If mask bits of destination MAC address
600 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
602 if (is_broadcast_ether_addr(ð_mask->dst)) {
603 filter->mac_addr = eth_spec->dst;
604 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
606 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
608 filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
610 /* Check if the next non-void item is END. */
611 item = next_no_void_pattern(pattern, item);
612 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
613 rte_flow_error_set(error, EINVAL,
614 RTE_FLOW_ERROR_TYPE_ITEM,
615 item, "Not supported by ethertype filter.");
621 act = next_no_void_action(actions, NULL);
622 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
623 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
624 rte_flow_error_set(error, EINVAL,
625 RTE_FLOW_ERROR_TYPE_ACTION,
626 act, "Not supported action.");
630 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
631 act_q = (const struct rte_flow_action_queue *)act->conf;
632 filter->queue = act_q->index;
634 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
637 /* Check if the next non-void item is END */
638 act = next_no_void_action(actions, act);
639 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
640 rte_flow_error_set(error, EINVAL,
641 RTE_FLOW_ERROR_TYPE_ACTION,
642 act, "Not supported action.");
647 /* Must be input direction */
648 if (!attr->ingress) {
649 rte_flow_error_set(error, EINVAL,
650 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
651 attr, "Only support ingress.");
657 rte_flow_error_set(error, EINVAL,
658 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
659 attr, "Not support egress.");
664 if (attr->priority) {
665 rte_flow_error_set(error, EINVAL,
666 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
667 attr, "Not support priority.");
673 rte_flow_error_set(error, EINVAL,
674 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
675 attr, "Not support group.");
683 ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
684 const struct rte_flow_attr *attr,
685 const struct rte_flow_item pattern[],
686 const struct rte_flow_action actions[],
687 struct rte_eth_ethertype_filter *filter,
688 struct rte_flow_error *error)
691 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
693 MAC_TYPE_FILTER_SUP(hw->mac.type);
695 ret = cons_parse_ethertype_filter(attr, pattern,
696 actions, filter, error);
701 /* Ixgbe doesn't support MAC address. */
702 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
703 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
704 rte_flow_error_set(error, EINVAL,
705 RTE_FLOW_ERROR_TYPE_ITEM,
706 NULL, "Not supported by ethertype filter");
710 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
711 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
712 rte_flow_error_set(error, EINVAL,
713 RTE_FLOW_ERROR_TYPE_ITEM,
714 NULL, "queue index much too big");
718 if (filter->ether_type == ETHER_TYPE_IPv4 ||
719 filter->ether_type == ETHER_TYPE_IPv6) {
720 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
721 rte_flow_error_set(error, EINVAL,
722 RTE_FLOW_ERROR_TYPE_ITEM,
723 NULL, "IPv4/IPv6 not supported by ethertype filter");
727 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
728 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
729 rte_flow_error_set(error, EINVAL,
730 RTE_FLOW_ERROR_TYPE_ITEM,
731 NULL, "mac compare is unsupported");
735 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
736 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
737 rte_flow_error_set(error, EINVAL,
738 RTE_FLOW_ERROR_TYPE_ITEM,
739 NULL, "drop option is unsupported");
747 * Parse the rule to see if it is a TCP SYN rule.
748 * And get the TCP SYN filter info BTW.
750 * The first not void item must be ETH.
751 * The second not void item must be IPV4 or IPV6.
752 * The third not void item must be TCP.
753 * The next not void item must be END.
755 * The first not void action should be QUEUE.
756 * The next not void action should be END.
760 * IPV4/IPV6 NULL NULL
761 * TCP tcp_flags 0x02 0xFF
763 * other members in mask and spec should set to 0x00.
764 * item->last should be NULL.
767 cons_parse_syn_filter(const struct rte_flow_attr *attr,
768 const struct rte_flow_item pattern[],
769 const struct rte_flow_action actions[],
770 struct rte_eth_syn_filter *filter,
771 struct rte_flow_error *error)
773 const struct rte_flow_item *item;
774 const struct rte_flow_action *act;
775 const struct rte_flow_item_tcp *tcp_spec;
776 const struct rte_flow_item_tcp *tcp_mask;
777 const struct rte_flow_action_queue *act_q;
780 rte_flow_error_set(error, EINVAL,
781 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
782 NULL, "NULL pattern.");
787 rte_flow_error_set(error, EINVAL,
788 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
789 NULL, "NULL action.");
794 rte_flow_error_set(error, EINVAL,
795 RTE_FLOW_ERROR_TYPE_ATTR,
796 NULL, "NULL attribute.");
801 /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
802 item = next_no_void_pattern(pattern, NULL);
803 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
804 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
805 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
806 item->type != RTE_FLOW_ITEM_TYPE_TCP) {
807 rte_flow_error_set(error, EINVAL,
808 RTE_FLOW_ERROR_TYPE_ITEM,
809 item, "Not supported by syn filter");
812 /*Not supported last point for range*/
814 rte_flow_error_set(error, EINVAL,
815 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
816 item, "Not supported last point for range");
821 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
822 /* if the item is MAC, the content should be NULL */
823 if (item->spec || item->mask) {
824 rte_flow_error_set(error, EINVAL,
825 RTE_FLOW_ERROR_TYPE_ITEM,
826 item, "Invalid SYN address mask");
830 /* check if the next not void item is IPv4 or IPv6 */
831 item = next_no_void_pattern(pattern, item);
832 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
833 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
834 rte_flow_error_set(error, EINVAL,
835 RTE_FLOW_ERROR_TYPE_ITEM,
836 item, "Not supported by syn filter");
842 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
843 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
844 /* if the item is IP, the content should be NULL */
845 if (item->spec || item->mask) {
846 rte_flow_error_set(error, EINVAL,
847 RTE_FLOW_ERROR_TYPE_ITEM,
848 item, "Invalid SYN mask");
852 /* check if the next not void item is TCP */
853 item = next_no_void_pattern(pattern, item);
854 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
855 rte_flow_error_set(error, EINVAL,
856 RTE_FLOW_ERROR_TYPE_ITEM,
857 item, "Not supported by syn filter");
862 /* Get the TCP info. Only support SYN. */
863 if (!item->spec || !item->mask) {
864 rte_flow_error_set(error, EINVAL,
865 RTE_FLOW_ERROR_TYPE_ITEM,
866 item, "Invalid SYN mask");
869 /*Not supported last point for range*/
871 rte_flow_error_set(error, EINVAL,
872 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
873 item, "Not supported last point for range");
877 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
878 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
879 if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
880 tcp_mask->hdr.src_port ||
881 tcp_mask->hdr.dst_port ||
882 tcp_mask->hdr.sent_seq ||
883 tcp_mask->hdr.recv_ack ||
884 tcp_mask->hdr.data_off ||
885 tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
886 tcp_mask->hdr.rx_win ||
887 tcp_mask->hdr.cksum ||
888 tcp_mask->hdr.tcp_urp) {
889 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
890 rte_flow_error_set(error, EINVAL,
891 RTE_FLOW_ERROR_TYPE_ITEM,
892 item, "Not supported by syn filter");
896 /* check if the next not void item is END */
897 item = next_no_void_pattern(pattern, item);
898 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
899 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
900 rte_flow_error_set(error, EINVAL,
901 RTE_FLOW_ERROR_TYPE_ITEM,
902 item, "Not supported by syn filter");
906 /* check if the first not void action is QUEUE. */
907 act = next_no_void_action(actions, NULL);
908 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
909 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
910 rte_flow_error_set(error, EINVAL,
911 RTE_FLOW_ERROR_TYPE_ACTION,
912 act, "Not supported action.");
916 act_q = (const struct rte_flow_action_queue *)act->conf;
917 filter->queue = act_q->index;
918 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
919 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
920 rte_flow_error_set(error, EINVAL,
921 RTE_FLOW_ERROR_TYPE_ACTION,
922 act, "Not supported action.");
926 /* check if the next not void item is END */
927 act = next_no_void_action(actions, act);
928 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
929 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
930 rte_flow_error_set(error, EINVAL,
931 RTE_FLOW_ERROR_TYPE_ACTION,
932 act, "Not supported action.");
937 /* must be input direction */
938 if (!attr->ingress) {
939 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
940 rte_flow_error_set(error, EINVAL,
941 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
942 attr, "Only support ingress.");
948 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
949 rte_flow_error_set(error, EINVAL,
950 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
951 attr, "Not support egress.");
955 /* Support 2 priorities, the lowest or highest. */
956 if (!attr->priority) {
958 } else if (attr->priority == (uint32_t)~0U) {
961 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
962 rte_flow_error_set(error, EINVAL,
963 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
964 attr, "Not support priority.");
972 ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
973 const struct rte_flow_attr *attr,
974 const struct rte_flow_item pattern[],
975 const struct rte_flow_action actions[],
976 struct rte_eth_syn_filter *filter,
977 struct rte_flow_error *error)
980 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
982 MAC_TYPE_FILTER_SUP(hw->mac.type);
984 ret = cons_parse_syn_filter(attr, pattern,
985 actions, filter, error);
994 * Parse the rule to see if it is a L2 tunnel rule.
995 * And get the L2 tunnel filter info BTW.
996 * Only support E-tag now.
998 * The first not void item can be E_TAG.
999 * The next not void item must be END.
1001 * The first not void action should be QUEUE.
1002 * The next not void action should be END.
1006 e_cid_base 0x309 0xFFF
1008 * other members in mask and spec should set to 0x00.
1009 * item->last should be NULL.
1012 cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
1013 const struct rte_flow_item pattern[],
1014 const struct rte_flow_action actions[],
1015 struct rte_eth_l2_tunnel_conf *filter,
1016 struct rte_flow_error *error)
1018 const struct rte_flow_item *item;
1019 const struct rte_flow_item_e_tag *e_tag_spec;
1020 const struct rte_flow_item_e_tag *e_tag_mask;
1021 const struct rte_flow_action *act;
1022 const struct rte_flow_action_queue *act_q;
1025 rte_flow_error_set(error, EINVAL,
1026 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1027 NULL, "NULL pattern.");
1032 rte_flow_error_set(error, EINVAL,
1033 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1034 NULL, "NULL action.");
1039 rte_flow_error_set(error, EINVAL,
1040 RTE_FLOW_ERROR_TYPE_ATTR,
1041 NULL, "NULL attribute.");
1045 /* The first not void item should be e-tag. */
1046 item = next_no_void_pattern(pattern, NULL);
1047 if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1048 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1049 rte_flow_error_set(error, EINVAL,
1050 RTE_FLOW_ERROR_TYPE_ITEM,
1051 item, "Not supported by L2 tunnel filter");
1055 if (!item->spec || !item->mask) {
1056 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1057 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1058 item, "Not supported by L2 tunnel filter");
1062 /*Not supported last point for range*/
1064 rte_flow_error_set(error, EINVAL,
1065 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1066 item, "Not supported last point for range");
1070 e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
1071 e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
1073 /* Only care about GRP and E cid base. */
1074 if (e_tag_mask->epcp_edei_in_ecid_b ||
1075 e_tag_mask->in_ecid_e ||
1076 e_tag_mask->ecid_e ||
1077 e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1078 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1079 rte_flow_error_set(error, EINVAL,
1080 RTE_FLOW_ERROR_TYPE_ITEM,
1081 item, "Not supported by L2 tunnel filter");
1085 filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1087 * grp and e_cid_base are bit fields and only use 14 bits.
1088 * e-tag id is taken as little endian by HW.
1090 filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1092 /* check if the next not void item is END */
1093 item = next_no_void_pattern(pattern, item);
1094 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1095 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1096 rte_flow_error_set(error, EINVAL,
1097 RTE_FLOW_ERROR_TYPE_ITEM,
1098 item, "Not supported by L2 tunnel filter");
1103 /* must be input direction */
1104 if (!attr->ingress) {
1105 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1106 rte_flow_error_set(error, EINVAL,
1107 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1108 attr, "Only support ingress.");
1114 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1115 rte_flow_error_set(error, EINVAL,
1116 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1117 attr, "Not support egress.");
1122 if (attr->priority) {
1123 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1124 rte_flow_error_set(error, EINVAL,
1125 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1126 attr, "Not support priority.");
1130 /* check if the first not void action is QUEUE. */
1131 act = next_no_void_action(actions, NULL);
1132 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1133 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1134 rte_flow_error_set(error, EINVAL,
1135 RTE_FLOW_ERROR_TYPE_ACTION,
1136 act, "Not supported action.");
1140 act_q = (const struct rte_flow_action_queue *)act->conf;
1141 filter->pool = act_q->index;
1143 /* check if the next not void item is END */
1144 act = next_no_void_action(actions, act);
1145 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1146 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1147 rte_flow_error_set(error, EINVAL,
1148 RTE_FLOW_ERROR_TYPE_ACTION,
1149 act, "Not supported action.");
1157 ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1158 const struct rte_flow_attr *attr,
1159 const struct rte_flow_item pattern[],
1160 const struct rte_flow_action actions[],
1161 struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1162 struct rte_flow_error *error)
1165 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1167 ret = cons_parse_l2_tn_filter(attr, pattern,
1168 actions, l2_tn_filter, error);
1170 if (hw->mac.type != ixgbe_mac_X550 &&
1171 hw->mac.type != ixgbe_mac_X550EM_x &&
1172 hw->mac.type != ixgbe_mac_X550EM_a) {
1173 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1174 rte_flow_error_set(error, EINVAL,
1175 RTE_FLOW_ERROR_TYPE_ITEM,
1176 NULL, "Not supported by L2 tunnel filter");
1183 /* Parse to get the attr and action info of flow director rule. */
1185 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1186 const struct rte_flow_action actions[],
1187 struct ixgbe_fdir_rule *rule,
1188 struct rte_flow_error *error)
1190 const struct rte_flow_action *act;
1191 const struct rte_flow_action_queue *act_q;
1192 const struct rte_flow_action_mark *mark;
1195 /* must be input direction */
1196 if (!attr->ingress) {
1197 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1198 rte_flow_error_set(error, EINVAL,
1199 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1200 attr, "Only support ingress.");
1206 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1207 rte_flow_error_set(error, EINVAL,
1208 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1209 attr, "Not support egress.");
1214 if (attr->priority) {
1215 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1216 rte_flow_error_set(error, EINVAL,
1217 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1218 attr, "Not support priority.");
1222 /* check if the first not void action is QUEUE or DROP. */
1223 act = next_no_void_action(actions, NULL);
1224 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1225 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1226 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1227 rte_flow_error_set(error, EINVAL,
1228 RTE_FLOW_ERROR_TYPE_ACTION,
1229 act, "Not supported action.");
1233 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1234 act_q = (const struct rte_flow_action_queue *)act->conf;
1235 rule->queue = act_q->index;
1237 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1240 /* check if the next not void item is MARK */
1241 act = next_no_void_action(actions, act);
1242 if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1243 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1244 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1245 rte_flow_error_set(error, EINVAL,
1246 RTE_FLOW_ERROR_TYPE_ACTION,
1247 act, "Not supported action.");
1253 if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1254 mark = (const struct rte_flow_action_mark *)act->conf;
1255 rule->soft_id = mark->id;
1256 act = next_no_void_action(actions, act);
1259 /* check if the next not void item is END */
1260 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1261 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1262 rte_flow_error_set(error, EINVAL,
1263 RTE_FLOW_ERROR_TYPE_ACTION,
1264 act, "Not supported action.");
1271 /* search next no void pattern and skip fuzzy */
1273 const struct rte_flow_item *next_no_fuzzy_pattern(
1274 const struct rte_flow_item pattern[],
1275 const struct rte_flow_item *cur)
1277 const struct rte_flow_item *next =
1278 next_no_void_pattern(pattern, cur);
1280 if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
1282 next = next_no_void_pattern(pattern, next);
1286 static inline uint8_t signature_match(const struct rte_flow_item pattern[])
1288 const struct rte_flow_item_fuzzy *spec, *last, *mask;
1289 const struct rte_flow_item *item;
1290 uint32_t sh, lh, mh;
1295 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1298 if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
1300 (const struct rte_flow_item_fuzzy *)item->spec;
1302 (const struct rte_flow_item_fuzzy *)item->last;
1304 (const struct rte_flow_item_fuzzy *)item->mask;
1333 * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1334 * And get the flow director filter info BTW.
1335 * UDP/TCP/SCTP PATTERN:
1336 * The first not void item can be ETH or IPV4 or IPV6
1337 * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1338 * The next not void item could be UDP or TCP or SCTP (optional)
1339 * The next not void item could be RAW (for flexbyte, optional)
1340 * The next not void item must be END.
1341 * A Fuzzy Match pattern can appear at any place before END.
1342 * Fuzzy Match is optional for IPV4 but is required for IPV6
1344 * The first not void item must be ETH.
1345 * The second not void item must be MAC VLAN.
1346 * The next not void item must be END.
1348 * The first not void action should be QUEUE or DROP.
1349 * The second not void optional action should be MARK,
1350 * mark_id is a uint32_t number.
1351 * The next not void action should be END.
1352 * UDP/TCP/SCTP pattern example:
1355 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
1356 * dst_addr 192.167.3.50 0xFFFFFFFF
1357 * UDP/TCP/SCTP src_port 80 0xFFFF
1358 * dst_port 80 0xFFFF
1359 * FLEX relative 0 0x1
1362 * offset 12 0xFFFFFFFF
1365 * pattern[0] 0x86 0xFF
1366 * pattern[1] 0xDD 0xFF
1368 * MAC VLAN pattern example:
1371 {0xAC, 0x7B, 0xA1, {0xFF, 0xFF, 0xFF,
1372 0x2C, 0x6D, 0x36} 0xFF, 0xFF, 0xFF}
1373 * MAC VLAN tci 0x2016 0xEFFF
1375 * Other members in mask and spec should set to 0x00.
1376 * Item->last should be NULL.
1379 ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
1380 const struct rte_flow_item pattern[],
1381 const struct rte_flow_action actions[],
1382 struct ixgbe_fdir_rule *rule,
1383 struct rte_flow_error *error)
1385 const struct rte_flow_item *item;
1386 const struct rte_flow_item_eth *eth_spec;
1387 const struct rte_flow_item_eth *eth_mask;
1388 const struct rte_flow_item_ipv4 *ipv4_spec;
1389 const struct rte_flow_item_ipv4 *ipv4_mask;
1390 const struct rte_flow_item_ipv6 *ipv6_spec;
1391 const struct rte_flow_item_ipv6 *ipv6_mask;
1392 const struct rte_flow_item_tcp *tcp_spec;
1393 const struct rte_flow_item_tcp *tcp_mask;
1394 const struct rte_flow_item_udp *udp_spec;
1395 const struct rte_flow_item_udp *udp_mask;
1396 const struct rte_flow_item_sctp *sctp_spec;
1397 const struct rte_flow_item_sctp *sctp_mask;
1398 const struct rte_flow_item_vlan *vlan_spec;
1399 const struct rte_flow_item_vlan *vlan_mask;
1400 const struct rte_flow_item_raw *raw_mask;
1401 const struct rte_flow_item_raw *raw_spec;
1406 rte_flow_error_set(error, EINVAL,
1407 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1408 NULL, "NULL pattern.");
1413 rte_flow_error_set(error, EINVAL,
1414 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1415 NULL, "NULL action.");
1420 rte_flow_error_set(error, EINVAL,
1421 RTE_FLOW_ERROR_TYPE_ATTR,
1422 NULL, "NULL attribute.");
1427 * Some fields may not be provided. Set spec to 0 and mask to default
1428 * value. So, we need not do anything for the not provided fields later.
1430 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1431 memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1432 rule->mask.vlan_tci_mask = 0;
1433 rule->mask.flex_bytes_mask = 0;
1436 * The first not void item should be
1437 * MAC or IPv4 or TCP or UDP or SCTP.
1439 item = next_no_fuzzy_pattern(pattern, NULL);
1440 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1441 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1442 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1443 item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1444 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1445 item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1446 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1447 rte_flow_error_set(error, EINVAL,
1448 RTE_FLOW_ERROR_TYPE_ITEM,
1449 item, "Not supported by fdir filter");
1453 if (signature_match(pattern))
1454 rule->mode = RTE_FDIR_MODE_SIGNATURE;
1456 rule->mode = RTE_FDIR_MODE_PERFECT;
1458 /*Not supported last point for range*/
1460 rte_flow_error_set(error, EINVAL,
1461 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1462 item, "Not supported last point for range");
1466 /* Get the MAC info. */
1467 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1469 * Only support vlan and dst MAC address,
1470 * others should be masked.
1472 if (item->spec && !item->mask) {
1473 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1474 rte_flow_error_set(error, EINVAL,
1475 RTE_FLOW_ERROR_TYPE_ITEM,
1476 item, "Not supported by fdir filter");
1481 rule->b_spec = TRUE;
1482 eth_spec = (const struct rte_flow_item_eth *)item->spec;
1484 /* Get the dst MAC. */
1485 for (j = 0; j < ETHER_ADDR_LEN; j++) {
1486 rule->ixgbe_fdir.formatted.inner_mac[j] =
1487 eth_spec->dst.addr_bytes[j];
1494 rule->b_mask = TRUE;
1495 eth_mask = (const struct rte_flow_item_eth *)item->mask;
1497 /* Ether type should be masked. */
1498 if (eth_mask->type ||
1499 rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1500 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1501 rte_flow_error_set(error, EINVAL,
1502 RTE_FLOW_ERROR_TYPE_ITEM,
1503 item, "Not supported by fdir filter");
1507 /* If ethernet has meaning, it means MAC VLAN mode. */
1508 rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1511 * src MAC address must be masked,
1512 * and don't support dst MAC address mask.
1514 for (j = 0; j < ETHER_ADDR_LEN; j++) {
1515 if (eth_mask->src.addr_bytes[j] ||
1516 eth_mask->dst.addr_bytes[j] != 0xFF) {
1518 sizeof(struct ixgbe_fdir_rule));
1519 rte_flow_error_set(error, EINVAL,
1520 RTE_FLOW_ERROR_TYPE_ITEM,
1521 item, "Not supported by fdir filter");
1526 /* When no VLAN, considered as full mask. */
1527 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1529 /*** If both spec and mask are item,
1530 * it means don't care about ETH.
1535 * Check if the next not void item is vlan or ipv4.
1536 * IPv6 is not supported.
1538 item = next_no_fuzzy_pattern(pattern, item);
1539 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1540 if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1541 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1542 rte_flow_error_set(error, EINVAL,
1543 RTE_FLOW_ERROR_TYPE_ITEM,
1544 item, "Not supported by fdir filter");
1548 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
1549 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1550 rte_flow_error_set(error, EINVAL,
1551 RTE_FLOW_ERROR_TYPE_ITEM,
1552 item, "Not supported by fdir filter");
1558 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1559 if (!(item->spec && item->mask)) {
1560 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1561 rte_flow_error_set(error, EINVAL,
1562 RTE_FLOW_ERROR_TYPE_ITEM,
1563 item, "Not supported by fdir filter");
1567 /*Not supported last point for range*/
1569 rte_flow_error_set(error, EINVAL,
1570 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1571 item, "Not supported last point for range");
1575 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
1576 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
1578 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1580 rule->mask.vlan_tci_mask = vlan_mask->tci;
1581 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
1582 /* More than one tags are not supported. */
1584 /* Next not void item must be END */
1585 item = next_no_fuzzy_pattern(pattern, item);
1586 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1587 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1588 rte_flow_error_set(error, EINVAL,
1589 RTE_FLOW_ERROR_TYPE_ITEM,
1590 item, "Not supported by fdir filter");
1595 /* Get the IPV4 info. */
1596 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1598 * Set the flow type even if there's no content
1599 * as we must have a flow type.
1601 rule->ixgbe_fdir.formatted.flow_type =
1602 IXGBE_ATR_FLOW_TYPE_IPV4;
1603 /*Not supported last point for range*/
1605 rte_flow_error_set(error, EINVAL,
1606 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1607 item, "Not supported last point for range");
1611 * Only care about src & dst addresses,
1612 * others should be masked.
1615 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1616 rte_flow_error_set(error, EINVAL,
1617 RTE_FLOW_ERROR_TYPE_ITEM,
1618 item, "Not supported by fdir filter");
1621 rule->b_mask = TRUE;
1623 (const struct rte_flow_item_ipv4 *)item->mask;
1624 if (ipv4_mask->hdr.version_ihl ||
1625 ipv4_mask->hdr.type_of_service ||
1626 ipv4_mask->hdr.total_length ||
1627 ipv4_mask->hdr.packet_id ||
1628 ipv4_mask->hdr.fragment_offset ||
1629 ipv4_mask->hdr.time_to_live ||
1630 ipv4_mask->hdr.next_proto_id ||
1631 ipv4_mask->hdr.hdr_checksum) {
1632 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1633 rte_flow_error_set(error, EINVAL,
1634 RTE_FLOW_ERROR_TYPE_ITEM,
1635 item, "Not supported by fdir filter");
1638 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1639 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1642 rule->b_spec = TRUE;
1644 (const struct rte_flow_item_ipv4 *)item->spec;
1645 rule->ixgbe_fdir.formatted.dst_ip[0] =
1646 ipv4_spec->hdr.dst_addr;
1647 rule->ixgbe_fdir.formatted.src_ip[0] =
1648 ipv4_spec->hdr.src_addr;
1652 * Check if the next not void item is
1653 * TCP or UDP or SCTP or END.
1655 item = next_no_fuzzy_pattern(pattern, item);
1656 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1657 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1658 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1659 item->type != RTE_FLOW_ITEM_TYPE_END &&
1660 item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1661 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1662 rte_flow_error_set(error, EINVAL,
1663 RTE_FLOW_ERROR_TYPE_ITEM,
1664 item, "Not supported by fdir filter");
1669 /* Get the IPV6 info. */
1670 if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1672 * Set the flow type even if there's no content
1673 * as we must have a flow type.
1675 rule->ixgbe_fdir.formatted.flow_type =
1676 IXGBE_ATR_FLOW_TYPE_IPV6;
1679 * 1. must signature match
1680 * 2. not support last
1681 * 3. mask must not null
1683 if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
1686 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1687 rte_flow_error_set(error, EINVAL,
1688 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1689 item, "Not supported last point for range");
1693 rule->b_mask = TRUE;
1695 (const struct rte_flow_item_ipv6 *)item->mask;
1696 if (ipv6_mask->hdr.vtc_flow ||
1697 ipv6_mask->hdr.payload_len ||
1698 ipv6_mask->hdr.proto ||
1699 ipv6_mask->hdr.hop_limits) {
1700 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1701 rte_flow_error_set(error, EINVAL,
1702 RTE_FLOW_ERROR_TYPE_ITEM,
1703 item, "Not supported by fdir filter");
1707 /* check src addr mask */
1708 for (j = 0; j < 16; j++) {
1709 if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
1710 rule->mask.src_ipv6_mask |= 1 << j;
1711 } else if (ipv6_mask->hdr.src_addr[j] != 0) {
1712 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1713 rte_flow_error_set(error, EINVAL,
1714 RTE_FLOW_ERROR_TYPE_ITEM,
1715 item, "Not supported by fdir filter");
1720 /* check dst addr mask */
1721 for (j = 0; j < 16; j++) {
1722 if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
1723 rule->mask.dst_ipv6_mask |= 1 << j;
1724 } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
1725 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1726 rte_flow_error_set(error, EINVAL,
1727 RTE_FLOW_ERROR_TYPE_ITEM,
1728 item, "Not supported by fdir filter");
1734 rule->b_spec = TRUE;
1736 (const struct rte_flow_item_ipv6 *)item->spec;
1737 rte_memcpy(rule->ixgbe_fdir.formatted.src_ip,
1738 ipv6_spec->hdr.src_addr, 16);
1739 rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip,
1740 ipv6_spec->hdr.dst_addr, 16);
1744 * Check if the next not void item is
1745 * TCP or UDP or SCTP or END.
1747 item = next_no_fuzzy_pattern(pattern, item);
1748 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1749 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1750 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1751 item->type != RTE_FLOW_ITEM_TYPE_END &&
1752 item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1753 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1754 rte_flow_error_set(error, EINVAL,
1755 RTE_FLOW_ERROR_TYPE_ITEM,
1756 item, "Not supported by fdir filter");
1761 /* Get the TCP info. */
1762 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1764 * Set the flow type even if there's no content
1765 * as we must have a flow type.
1767 rule->ixgbe_fdir.formatted.flow_type |=
1768 IXGBE_ATR_L4TYPE_TCP;
1769 /*Not supported last point for range*/
1771 rte_flow_error_set(error, EINVAL,
1772 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1773 item, "Not supported last point for range");
1777 * Only care about src & dst ports,
1778 * others should be masked.
1781 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1782 rte_flow_error_set(error, EINVAL,
1783 RTE_FLOW_ERROR_TYPE_ITEM,
1784 item, "Not supported by fdir filter");
1787 rule->b_mask = TRUE;
1788 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1789 if (tcp_mask->hdr.sent_seq ||
1790 tcp_mask->hdr.recv_ack ||
1791 tcp_mask->hdr.data_off ||
1792 tcp_mask->hdr.tcp_flags ||
1793 tcp_mask->hdr.rx_win ||
1794 tcp_mask->hdr.cksum ||
1795 tcp_mask->hdr.tcp_urp) {
1796 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1797 rte_flow_error_set(error, EINVAL,
1798 RTE_FLOW_ERROR_TYPE_ITEM,
1799 item, "Not supported by fdir filter");
1802 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1803 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1806 rule->b_spec = TRUE;
1807 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1808 rule->ixgbe_fdir.formatted.src_port =
1809 tcp_spec->hdr.src_port;
1810 rule->ixgbe_fdir.formatted.dst_port =
1811 tcp_spec->hdr.dst_port;
1814 item = next_no_fuzzy_pattern(pattern, item);
1815 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1816 item->type != RTE_FLOW_ITEM_TYPE_END) {
1817 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1818 rte_flow_error_set(error, EINVAL,
1819 RTE_FLOW_ERROR_TYPE_ITEM,
1820 item, "Not supported by fdir filter");
1826 /* Get the UDP info */
1827 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1829 * Set the flow type even if there's no content
1830 * as we must have a flow type.
1832 rule->ixgbe_fdir.formatted.flow_type |=
1833 IXGBE_ATR_L4TYPE_UDP;
1834 /*Not supported last point for range*/
1836 rte_flow_error_set(error, EINVAL,
1837 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1838 item, "Not supported last point for range");
1842 * Only care about src & dst ports,
1843 * others should be masked.
1846 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1847 rte_flow_error_set(error, EINVAL,
1848 RTE_FLOW_ERROR_TYPE_ITEM,
1849 item, "Not supported by fdir filter");
1852 rule->b_mask = TRUE;
1853 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1854 if (udp_mask->hdr.dgram_len ||
1855 udp_mask->hdr.dgram_cksum) {
1856 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1857 rte_flow_error_set(error, EINVAL,
1858 RTE_FLOW_ERROR_TYPE_ITEM,
1859 item, "Not supported by fdir filter");
1862 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1863 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1866 rule->b_spec = TRUE;
1867 udp_spec = (const struct rte_flow_item_udp *)item->spec;
1868 rule->ixgbe_fdir.formatted.src_port =
1869 udp_spec->hdr.src_port;
1870 rule->ixgbe_fdir.formatted.dst_port =
1871 udp_spec->hdr.dst_port;
1874 item = next_no_fuzzy_pattern(pattern, item);
1875 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1876 item->type != RTE_FLOW_ITEM_TYPE_END) {
1877 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1878 rte_flow_error_set(error, EINVAL,
1879 RTE_FLOW_ERROR_TYPE_ITEM,
1880 item, "Not supported by fdir filter");
1886 /* Get the SCTP info */
1887 if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
1889 * Set the flow type even if there's no content
1890 * as we must have a flow type.
1892 rule->ixgbe_fdir.formatted.flow_type |=
1893 IXGBE_ATR_L4TYPE_SCTP;
1894 /*Not supported last point for range*/
1896 rte_flow_error_set(error, EINVAL,
1897 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1898 item, "Not supported last point for range");
1902 * Only care about src & dst ports,
1903 * others should be masked.
1906 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1907 rte_flow_error_set(error, EINVAL,
1908 RTE_FLOW_ERROR_TYPE_ITEM,
1909 item, "Not supported by fdir filter");
1912 rule->b_mask = TRUE;
1914 (const struct rte_flow_item_sctp *)item->mask;
1915 if (sctp_mask->hdr.tag ||
1916 sctp_mask->hdr.cksum) {
1917 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1918 rte_flow_error_set(error, EINVAL,
1919 RTE_FLOW_ERROR_TYPE_ITEM,
1920 item, "Not supported by fdir filter");
1923 rule->mask.src_port_mask = sctp_mask->hdr.src_port;
1924 rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
1927 rule->b_spec = TRUE;
1929 (const struct rte_flow_item_sctp *)item->spec;
1930 rule->ixgbe_fdir.formatted.src_port =
1931 sctp_spec->hdr.src_port;
1932 rule->ixgbe_fdir.formatted.dst_port =
1933 sctp_spec->hdr.dst_port;
1936 item = next_no_fuzzy_pattern(pattern, item);
1937 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1938 item->type != RTE_FLOW_ITEM_TYPE_END) {
1939 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1940 rte_flow_error_set(error, EINVAL,
1941 RTE_FLOW_ERROR_TYPE_ITEM,
1942 item, "Not supported by fdir filter");
1947 /* Get the flex byte info */
1948 if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
1949 /* Not supported last point for range*/
1951 rte_flow_error_set(error, EINVAL,
1952 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1953 item, "Not supported last point for range");
1956 /* mask should not be null */
1957 if (!item->mask || !item->spec) {
1958 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1959 rte_flow_error_set(error, EINVAL,
1960 RTE_FLOW_ERROR_TYPE_ITEM,
1961 item, "Not supported by fdir filter");
1965 raw_mask = (const struct rte_flow_item_raw *)item->mask;
1968 if (raw_mask->relative != 0x1 ||
1969 raw_mask->search != 0x1 ||
1970 raw_mask->reserved != 0x0 ||
1971 (uint32_t)raw_mask->offset != 0xffffffff ||
1972 raw_mask->limit != 0xffff ||
1973 raw_mask->length != 0xffff) {
1974 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1975 rte_flow_error_set(error, EINVAL,
1976 RTE_FLOW_ERROR_TYPE_ITEM,
1977 item, "Not supported by fdir filter");
1981 raw_spec = (const struct rte_flow_item_raw *)item->spec;
1984 if (raw_spec->relative != 0 ||
1985 raw_spec->search != 0 ||
1986 raw_spec->reserved != 0 ||
1987 raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
1988 raw_spec->offset % 2 ||
1989 raw_spec->limit != 0 ||
1990 raw_spec->length != 2 ||
1991 /* pattern can't be 0xffff */
1992 (raw_spec->pattern[0] == 0xff &&
1993 raw_spec->pattern[1] == 0xff)) {
1994 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1995 rte_flow_error_set(error, EINVAL,
1996 RTE_FLOW_ERROR_TYPE_ITEM,
1997 item, "Not supported by fdir filter");
2001 /* check pattern mask */
2002 if (raw_mask->pattern[0] != 0xff ||
2003 raw_mask->pattern[1] != 0xff) {
2004 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2005 rte_flow_error_set(error, EINVAL,
2006 RTE_FLOW_ERROR_TYPE_ITEM,
2007 item, "Not supported by fdir filter");
2011 rule->mask.flex_bytes_mask = 0xffff;
2012 rule->ixgbe_fdir.formatted.flex_bytes =
2013 (((uint16_t)raw_spec->pattern[1]) << 8) |
2014 raw_spec->pattern[0];
2015 rule->flex_bytes_offset = raw_spec->offset;
2018 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2019 /* check if the next not void item is END */
2020 item = next_no_fuzzy_pattern(pattern, item);
2021 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2022 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2023 rte_flow_error_set(error, EINVAL,
2024 RTE_FLOW_ERROR_TYPE_ITEM,
2025 item, "Not supported by fdir filter");
2030 return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2033 #define NVGRE_PROTOCOL 0x6558
2036 * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
2037 * And get the flow director filter info BTW.
2039 * The first not void item must be ETH.
2040 * The second not void item must be IPV4/ IPV6.
2041 * The third not void item must be NVGRE.
2042 * The next not void item must be END.
2044 * The first not void item must be ETH.
2045 * The second not void item must be IPV4/ IPV6.
2046 * The third not void item must be NVGRE.
2047 * The next not void item must be END.
2049 * The first not void action should be QUEUE or DROP.
2050 * The second not void optional action should be MARK,
2051 * mark_id is a uint32_t number.
2052 * The next not void action should be END.
2053 * VxLAN pattern example:
2056 * IPV4/IPV6 NULL NULL
2058 * VxLAN vni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
2059 * MAC VLAN tci 0x2016 0xEFFF
2061 * NEGRV pattern example:
2064 * IPV4/IPV6 NULL NULL
2065 * NVGRE protocol 0x6558 0xFFFF
2066 * tni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
2067 * MAC VLAN tci 0x2016 0xEFFF
2069 * other members in mask and spec should set to 0x00.
2070 * item->last should be NULL.
2073 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
2074 const struct rte_flow_item pattern[],
2075 const struct rte_flow_action actions[],
2076 struct ixgbe_fdir_rule *rule,
2077 struct rte_flow_error *error)
2079 const struct rte_flow_item *item;
2080 const struct rte_flow_item_vxlan *vxlan_spec;
2081 const struct rte_flow_item_vxlan *vxlan_mask;
2082 const struct rte_flow_item_nvgre *nvgre_spec;
2083 const struct rte_flow_item_nvgre *nvgre_mask;
2084 const struct rte_flow_item_eth *eth_spec;
2085 const struct rte_flow_item_eth *eth_mask;
2086 const struct rte_flow_item_vlan *vlan_spec;
2087 const struct rte_flow_item_vlan *vlan_mask;
2091 rte_flow_error_set(error, EINVAL,
2092 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2093 NULL, "NULL pattern.");
2098 rte_flow_error_set(error, EINVAL,
2099 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2100 NULL, "NULL action.");
2105 rte_flow_error_set(error, EINVAL,
2106 RTE_FLOW_ERROR_TYPE_ATTR,
2107 NULL, "NULL attribute.");
2112 * Some fields may not be provided. Set spec to 0 and mask to default
2113 * value. So, we need not do anything for the not provided fields later.
2115 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2116 memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
2117 rule->mask.vlan_tci_mask = 0;
2120 * The first not void item should be
2121 * MAC or IPv4 or IPv6 or UDP or VxLAN.
2123 item = next_no_void_pattern(pattern, NULL);
2124 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
2125 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2126 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
2127 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2128 item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
2129 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2130 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2131 rte_flow_error_set(error, EINVAL,
2132 RTE_FLOW_ERROR_TYPE_ITEM,
2133 item, "Not supported by fdir filter");
2137 rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
2140 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
2141 /* Only used to describe the protocol stack. */
2142 if (item->spec || item->mask) {
2143 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2144 rte_flow_error_set(error, EINVAL,
2145 RTE_FLOW_ERROR_TYPE_ITEM,
2146 item, "Not supported by fdir filter");
2149 /* Not supported last point for range*/
2151 rte_flow_error_set(error, EINVAL,
2152 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2153 item, "Not supported last point for range");
2157 /* Check if the next not void item is IPv4 or IPv6. */
2158 item = next_no_void_pattern(pattern, item);
2159 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2160 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2161 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2162 rte_flow_error_set(error, EINVAL,
2163 RTE_FLOW_ERROR_TYPE_ITEM,
2164 item, "Not supported by fdir filter");
2170 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2171 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2172 /* Only used to describe the protocol stack. */
2173 if (item->spec || item->mask) {
2174 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2175 rte_flow_error_set(error, EINVAL,
2176 RTE_FLOW_ERROR_TYPE_ITEM,
2177 item, "Not supported by fdir filter");
2180 /*Not supported last point for range*/
2182 rte_flow_error_set(error, EINVAL,
2183 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2184 item, "Not supported last point for range");
2188 /* Check if the next not void item is UDP or NVGRE. */
2189 item = next_no_void_pattern(pattern, item);
2190 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2191 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2192 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2193 rte_flow_error_set(error, EINVAL,
2194 RTE_FLOW_ERROR_TYPE_ITEM,
2195 item, "Not supported by fdir filter");
2201 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2202 /* Only used to describe the protocol stack. */
2203 if (item->spec || item->mask) {
2204 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2205 rte_flow_error_set(error, EINVAL,
2206 RTE_FLOW_ERROR_TYPE_ITEM,
2207 item, "Not supported by fdir filter");
2210 /*Not supported last point for range*/
2212 rte_flow_error_set(error, EINVAL,
2213 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2214 item, "Not supported last point for range");
2218 /* Check if the next not void item is VxLAN. */
2219 item = next_no_void_pattern(pattern, item);
2220 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2221 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2222 rte_flow_error_set(error, EINVAL,
2223 RTE_FLOW_ERROR_TYPE_ITEM,
2224 item, "Not supported by fdir filter");
2229 /* Get the VxLAN info */
2230 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2231 rule->ixgbe_fdir.formatted.tunnel_type =
2232 RTE_FDIR_TUNNEL_TYPE_VXLAN;
2234 /* Only care about VNI, others should be masked. */
2236 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2237 rte_flow_error_set(error, EINVAL,
2238 RTE_FLOW_ERROR_TYPE_ITEM,
2239 item, "Not supported by fdir filter");
2242 /*Not supported last point for range*/
2244 rte_flow_error_set(error, EINVAL,
2245 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2246 item, "Not supported last point for range");
2249 rule->b_mask = TRUE;
2251 /* Tunnel type is always meaningful. */
2252 rule->mask.tunnel_type_mask = 1;
2255 (const struct rte_flow_item_vxlan *)item->mask;
2256 if (vxlan_mask->flags) {
2257 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2258 rte_flow_error_set(error, EINVAL,
2259 RTE_FLOW_ERROR_TYPE_ITEM,
2260 item, "Not supported by fdir filter");
2263 /* VNI must be totally masked or not. */
2264 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2265 vxlan_mask->vni[2]) &&
2266 ((vxlan_mask->vni[0] != 0xFF) ||
2267 (vxlan_mask->vni[1] != 0xFF) ||
2268 (vxlan_mask->vni[2] != 0xFF))) {
2269 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2270 rte_flow_error_set(error, EINVAL,
2271 RTE_FLOW_ERROR_TYPE_ITEM,
2272 item, "Not supported by fdir filter");
2276 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2277 RTE_DIM(vxlan_mask->vni));
2280 rule->b_spec = TRUE;
2281 vxlan_spec = (const struct rte_flow_item_vxlan *)
2283 rte_memcpy(((uint8_t *)
2284 &rule->ixgbe_fdir.formatted.tni_vni + 1),
2285 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2286 rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
2287 rule->ixgbe_fdir.formatted.tni_vni);
2291 /* Get the NVGRE info */
2292 if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2293 rule->ixgbe_fdir.formatted.tunnel_type =
2294 RTE_FDIR_TUNNEL_TYPE_NVGRE;
2297 * Only care about flags0, flags1, protocol and TNI,
2298 * others should be masked.
2301 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2302 rte_flow_error_set(error, EINVAL,
2303 RTE_FLOW_ERROR_TYPE_ITEM,
2304 item, "Not supported by fdir filter");
2307 /*Not supported last point for range*/
2309 rte_flow_error_set(error, EINVAL,
2310 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2311 item, "Not supported last point for range");
2314 rule->b_mask = TRUE;
2316 /* Tunnel type is always meaningful. */
2317 rule->mask.tunnel_type_mask = 1;
2320 (const struct rte_flow_item_nvgre *)item->mask;
2321 if (nvgre_mask->flow_id) {
2322 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2323 rte_flow_error_set(error, EINVAL,
2324 RTE_FLOW_ERROR_TYPE_ITEM,
2325 item, "Not supported by fdir filter");
2328 if (nvgre_mask->c_k_s_rsvd0_ver !=
2329 rte_cpu_to_be_16(0x3000) ||
2330 nvgre_mask->protocol != 0xFFFF) {
2331 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2332 rte_flow_error_set(error, EINVAL,
2333 RTE_FLOW_ERROR_TYPE_ITEM,
2334 item, "Not supported by fdir filter");
2337 /* TNI must be totally masked or not. */
2338 if (nvgre_mask->tni[0] &&
2339 ((nvgre_mask->tni[0] != 0xFF) ||
2340 (nvgre_mask->tni[1] != 0xFF) ||
2341 (nvgre_mask->tni[2] != 0xFF))) {
2342 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2343 rte_flow_error_set(error, EINVAL,
2344 RTE_FLOW_ERROR_TYPE_ITEM,
2345 item, "Not supported by fdir filter");
2348 /* tni is a 24-bits bit field */
2349 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2350 RTE_DIM(nvgre_mask->tni));
2351 rule->mask.tunnel_id_mask <<= 8;
2354 rule->b_spec = TRUE;
2356 (const struct rte_flow_item_nvgre *)item->spec;
2357 if (nvgre_spec->c_k_s_rsvd0_ver !=
2358 rte_cpu_to_be_16(0x2000) ||
2359 nvgre_spec->protocol !=
2360 rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2361 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2362 rte_flow_error_set(error, EINVAL,
2363 RTE_FLOW_ERROR_TYPE_ITEM,
2364 item, "Not supported by fdir filter");
2367 /* tni is a 24-bits bit field */
2368 rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2369 nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2370 rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2374 /* check if the next not void item is MAC */
2375 item = next_no_void_pattern(pattern, item);
2376 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2377 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2378 rte_flow_error_set(error, EINVAL,
2379 RTE_FLOW_ERROR_TYPE_ITEM,
2380 item, "Not supported by fdir filter");
2385 * Only support vlan and dst MAC address,
2386 * others should be masked.
2390 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2391 rte_flow_error_set(error, EINVAL,
2392 RTE_FLOW_ERROR_TYPE_ITEM,
2393 item, "Not supported by fdir filter");
2396 /*Not supported last point for range*/
2398 rte_flow_error_set(error, EINVAL,
2399 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2400 item, "Not supported last point for range");
2403 rule->b_mask = TRUE;
2404 eth_mask = (const struct rte_flow_item_eth *)item->mask;
2406 /* Ether type should be masked. */
2407 if (eth_mask->type) {
2408 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2409 rte_flow_error_set(error, EINVAL,
2410 RTE_FLOW_ERROR_TYPE_ITEM,
2411 item, "Not supported by fdir filter");
2415 /* src MAC address should be masked. */
2416 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2417 if (eth_mask->src.addr_bytes[j]) {
2419 sizeof(struct ixgbe_fdir_rule));
2420 rte_flow_error_set(error, EINVAL,
2421 RTE_FLOW_ERROR_TYPE_ITEM,
2422 item, "Not supported by fdir filter");
2426 rule->mask.mac_addr_byte_mask = 0;
2427 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2428 /* It's a per byte mask. */
2429 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2430 rule->mask.mac_addr_byte_mask |= 0x1 << j;
2431 } else if (eth_mask->dst.addr_bytes[j]) {
2432 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2433 rte_flow_error_set(error, EINVAL,
2434 RTE_FLOW_ERROR_TYPE_ITEM,
2435 item, "Not supported by fdir filter");
2440 /* When no vlan, considered as full mask. */
2441 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2444 rule->b_spec = TRUE;
2445 eth_spec = (const struct rte_flow_item_eth *)item->spec;
2447 /* Get the dst MAC. */
2448 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2449 rule->ixgbe_fdir.formatted.inner_mac[j] =
2450 eth_spec->dst.addr_bytes[j];
2455 * Check if the next not void item is vlan or ipv4.
2456 * IPv6 is not supported.
2458 item = next_no_void_pattern(pattern, item);
2459 if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2460 (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
2461 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2462 rte_flow_error_set(error, EINVAL,
2463 RTE_FLOW_ERROR_TYPE_ITEM,
2464 item, "Not supported by fdir filter");
2467 /*Not supported last point for range*/
2469 rte_flow_error_set(error, EINVAL,
2470 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2471 item, "Not supported last point for range");
2475 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2476 if (!(item->spec && item->mask)) {
2477 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2478 rte_flow_error_set(error, EINVAL,
2479 RTE_FLOW_ERROR_TYPE_ITEM,
2480 item, "Not supported by fdir filter");
2484 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
2485 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
2487 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2489 rule->mask.vlan_tci_mask = vlan_mask->tci;
2490 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
2491 /* More than one tags are not supported. */
2493 /* check if the next not void item is END */
2494 item = next_no_void_pattern(pattern, item);
2496 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2497 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2498 rte_flow_error_set(error, EINVAL,
2499 RTE_FLOW_ERROR_TYPE_ITEM,
2500 item, "Not supported by fdir filter");
2506 * If the tags is 0, it means don't care about the VLAN.
2510 return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2514 ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2515 const struct rte_flow_attr *attr,
2516 const struct rte_flow_item pattern[],
2517 const struct rte_flow_action actions[],
2518 struct ixgbe_fdir_rule *rule,
2519 struct rte_flow_error *error)
2522 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2523 enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2525 if (hw->mac.type != ixgbe_mac_82599EB &&
2526 hw->mac.type != ixgbe_mac_X540 &&
2527 hw->mac.type != ixgbe_mac_X550 &&
2528 hw->mac.type != ixgbe_mac_X550EM_x &&
2529 hw->mac.type != ixgbe_mac_X550EM_a)
2532 ret = ixgbe_parse_fdir_filter_normal(attr, pattern,
2533 actions, rule, error);
2538 ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2539 actions, rule, error);
2542 if (fdir_mode == RTE_FDIR_MODE_NONE ||
2543 fdir_mode != rule->mode)
2549 ixgbe_filterlist_flush(void)
2551 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2552 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2553 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2554 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2555 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2556 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2558 while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2559 TAILQ_REMOVE(&filter_ntuple_list,
2562 rte_free(ntuple_filter_ptr);
2565 while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2566 TAILQ_REMOVE(&filter_ethertype_list,
2567 ethertype_filter_ptr,
2569 rte_free(ethertype_filter_ptr);
2572 while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2573 TAILQ_REMOVE(&filter_syn_list,
2576 rte_free(syn_filter_ptr);
2579 while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2580 TAILQ_REMOVE(&filter_l2_tunnel_list,
2583 rte_free(l2_tn_filter_ptr);
2586 while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2587 TAILQ_REMOVE(&filter_fdir_list,
2590 rte_free(fdir_rule_ptr);
2593 while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
2594 TAILQ_REMOVE(&ixgbe_flow_list,
2597 rte_free(ixgbe_flow_mem_ptr->flow);
2598 rte_free(ixgbe_flow_mem_ptr);
2603 * Create or destroy a flow rule.
2604 * Theorically one rule can match more than one filters.
2605 * We will let it use the filter which it hitt first.
2606 * So, the sequence matters.
2608 static struct rte_flow *
2609 ixgbe_flow_create(struct rte_eth_dev *dev,
2610 const struct rte_flow_attr *attr,
2611 const struct rte_flow_item pattern[],
2612 const struct rte_flow_action actions[],
2613 struct rte_flow_error *error)
2616 struct rte_eth_ntuple_filter ntuple_filter;
2617 struct rte_eth_ethertype_filter ethertype_filter;
2618 struct rte_eth_syn_filter syn_filter;
2619 struct ixgbe_fdir_rule fdir_rule;
2620 struct rte_eth_l2_tunnel_conf l2_tn_filter;
2621 struct ixgbe_hw_fdir_info *fdir_info =
2622 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2623 struct rte_flow *flow = NULL;
2624 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2625 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2626 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2627 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2628 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2629 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2631 flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
2633 PMD_DRV_LOG(ERR, "failed to allocate memory");
2634 return (struct rte_flow *)flow;
2636 ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
2637 sizeof(struct ixgbe_flow_mem), 0);
2638 if (!ixgbe_flow_mem_ptr) {
2639 PMD_DRV_LOG(ERR, "failed to allocate memory");
2643 ixgbe_flow_mem_ptr->flow = flow;
2644 TAILQ_INSERT_TAIL(&ixgbe_flow_list,
2645 ixgbe_flow_mem_ptr, entries);
2647 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2648 ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2649 actions, &ntuple_filter, error);
2651 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2653 ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
2654 sizeof(struct ixgbe_ntuple_filter_ele), 0);
2655 (void)rte_memcpy(&ntuple_filter_ptr->filter_info,
2657 sizeof(struct rte_eth_ntuple_filter));
2658 TAILQ_INSERT_TAIL(&filter_ntuple_list,
2659 ntuple_filter_ptr, entries);
2660 flow->rule = ntuple_filter_ptr;
2661 flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2667 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2668 ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2669 actions, ðertype_filter, error);
2671 ret = ixgbe_add_del_ethertype_filter(dev,
2672 ðertype_filter, TRUE);
2674 ethertype_filter_ptr = rte_zmalloc(
2675 "ixgbe_ethertype_filter",
2676 sizeof(struct ixgbe_ethertype_filter_ele), 0);
2677 (void)rte_memcpy(ðertype_filter_ptr->filter_info,
2679 sizeof(struct rte_eth_ethertype_filter));
2680 TAILQ_INSERT_TAIL(&filter_ethertype_list,
2681 ethertype_filter_ptr, entries);
2682 flow->rule = ethertype_filter_ptr;
2683 flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2689 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2690 ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2691 actions, &syn_filter, error);
2693 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
2695 syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
2696 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
2697 (void)rte_memcpy(&syn_filter_ptr->filter_info,
2699 sizeof(struct rte_eth_syn_filter));
2700 TAILQ_INSERT_TAIL(&filter_syn_list,
2703 flow->rule = syn_filter_ptr;
2704 flow->filter_type = RTE_ETH_FILTER_SYN;
2710 memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2711 ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2712 actions, &fdir_rule, error);
2714 /* A mask cannot be deleted. */
2715 if (fdir_rule.b_mask) {
2716 if (!fdir_info->mask_added) {
2717 /* It's the first time the mask is set. */
2718 rte_memcpy(&fdir_info->mask,
2720 sizeof(struct ixgbe_hw_fdir_mask));
2721 fdir_info->flex_bytes_offset =
2722 fdir_rule.flex_bytes_offset;
2724 if (fdir_rule.mask.flex_bytes_mask)
2725 ixgbe_fdir_set_flexbytes_offset(dev,
2726 fdir_rule.flex_bytes_offset);
2728 ret = ixgbe_fdir_set_input_mask(dev);
2732 fdir_info->mask_added = TRUE;
2735 * Only support one global mask,
2736 * all the masks should be the same.
2738 ret = memcmp(&fdir_info->mask,
2740 sizeof(struct ixgbe_hw_fdir_mask));
2744 if (fdir_info->flex_bytes_offset !=
2745 fdir_rule.flex_bytes_offset)
2750 if (fdir_rule.b_spec) {
2751 ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
2754 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
2755 sizeof(struct ixgbe_fdir_rule_ele), 0);
2756 (void)rte_memcpy(&fdir_rule_ptr->filter_info,
2758 sizeof(struct ixgbe_fdir_rule));
2759 TAILQ_INSERT_TAIL(&filter_fdir_list,
2760 fdir_rule_ptr, entries);
2761 flow->rule = fdir_rule_ptr;
2762 flow->filter_type = RTE_ETH_FILTER_FDIR;
2774 memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2775 ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2776 actions, &l2_tn_filter, error);
2778 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
2780 l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
2781 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
2782 (void)rte_memcpy(&l2_tn_filter_ptr->filter_info,
2784 sizeof(struct rte_eth_l2_tunnel_conf));
2785 TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
2786 l2_tn_filter_ptr, entries);
2787 flow->rule = l2_tn_filter_ptr;
2788 flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
2794 TAILQ_REMOVE(&ixgbe_flow_list,
2795 ixgbe_flow_mem_ptr, entries);
2796 rte_flow_error_set(error, -ret,
2797 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2798 "Failed to create flow.");
2799 rte_free(ixgbe_flow_mem_ptr);
2805 * Check if the flow rule is supported by ixgbe.
2806 * It only checkes the format. Don't guarantee the rule can be programmed into
2807 * the HW. Because there can be no enough room for the rule.
2810 ixgbe_flow_validate(struct rte_eth_dev *dev,
2811 const struct rte_flow_attr *attr,
2812 const struct rte_flow_item pattern[],
2813 const struct rte_flow_action actions[],
2814 struct rte_flow_error *error)
2816 struct rte_eth_ntuple_filter ntuple_filter;
2817 struct rte_eth_ethertype_filter ethertype_filter;
2818 struct rte_eth_syn_filter syn_filter;
2819 struct rte_eth_l2_tunnel_conf l2_tn_filter;
2820 struct ixgbe_fdir_rule fdir_rule;
2823 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2824 ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2825 actions, &ntuple_filter, error);
2829 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2830 ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2831 actions, ðertype_filter, error);
2835 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2836 ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2837 actions, &syn_filter, error);
2841 memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2842 ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2843 actions, &fdir_rule, error);
2847 memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2848 ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2849 actions, &l2_tn_filter, error);
2854 /* Destroy a flow rule on ixgbe. */
2856 ixgbe_flow_destroy(struct rte_eth_dev *dev,
2857 struct rte_flow *flow,
2858 struct rte_flow_error *error)
2861 struct rte_flow *pmd_flow = flow;
2862 enum rte_filter_type filter_type = pmd_flow->filter_type;
2863 struct rte_eth_ntuple_filter ntuple_filter;
2864 struct rte_eth_ethertype_filter ethertype_filter;
2865 struct rte_eth_syn_filter syn_filter;
2866 struct ixgbe_fdir_rule fdir_rule;
2867 struct rte_eth_l2_tunnel_conf l2_tn_filter;
2868 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2869 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2870 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2871 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2872 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2873 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2874 struct ixgbe_hw_fdir_info *fdir_info =
2875 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2877 switch (filter_type) {
2878 case RTE_ETH_FILTER_NTUPLE:
2879 ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
2881 (void)rte_memcpy(&ntuple_filter,
2882 &ntuple_filter_ptr->filter_info,
2883 sizeof(struct rte_eth_ntuple_filter));
2884 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
2886 TAILQ_REMOVE(&filter_ntuple_list,
2887 ntuple_filter_ptr, entries);
2888 rte_free(ntuple_filter_ptr);
2891 case RTE_ETH_FILTER_ETHERTYPE:
2892 ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
2894 (void)rte_memcpy(ðertype_filter,
2895 ðertype_filter_ptr->filter_info,
2896 sizeof(struct rte_eth_ethertype_filter));
2897 ret = ixgbe_add_del_ethertype_filter(dev,
2898 ðertype_filter, FALSE);
2900 TAILQ_REMOVE(&filter_ethertype_list,
2901 ethertype_filter_ptr, entries);
2902 rte_free(ethertype_filter_ptr);
2905 case RTE_ETH_FILTER_SYN:
2906 syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
2908 (void)rte_memcpy(&syn_filter,
2909 &syn_filter_ptr->filter_info,
2910 sizeof(struct rte_eth_syn_filter));
2911 ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
2913 TAILQ_REMOVE(&filter_syn_list,
2914 syn_filter_ptr, entries);
2915 rte_free(syn_filter_ptr);
2918 case RTE_ETH_FILTER_FDIR:
2919 fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
2920 (void)rte_memcpy(&fdir_rule,
2921 &fdir_rule_ptr->filter_info,
2922 sizeof(struct ixgbe_fdir_rule));
2923 ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
2925 TAILQ_REMOVE(&filter_fdir_list,
2926 fdir_rule_ptr, entries);
2927 rte_free(fdir_rule_ptr);
2928 if (TAILQ_EMPTY(&filter_fdir_list))
2929 fdir_info->mask_added = false;
2932 case RTE_ETH_FILTER_L2_TUNNEL:
2933 l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
2935 (void)rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
2936 sizeof(struct rte_eth_l2_tunnel_conf));
2937 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
2939 TAILQ_REMOVE(&filter_l2_tunnel_list,
2940 l2_tn_filter_ptr, entries);
2941 rte_free(l2_tn_filter_ptr);
2945 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
2952 rte_flow_error_set(error, EINVAL,
2953 RTE_FLOW_ERROR_TYPE_HANDLE,
2954 NULL, "Failed to destroy flow");
2958 TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
2959 if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
2960 TAILQ_REMOVE(&ixgbe_flow_list,
2961 ixgbe_flow_mem_ptr, entries);
2962 rte_free(ixgbe_flow_mem_ptr);
2970 /* Destroy all flow rules associated with a port on ixgbe. */
2972 ixgbe_flow_flush(struct rte_eth_dev *dev,
2973 struct rte_flow_error *error)
2977 ixgbe_clear_all_ntuple_filter(dev);
2978 ixgbe_clear_all_ethertype_filter(dev);
2979 ixgbe_clear_syn_filter(dev);
2981 ret = ixgbe_clear_all_fdir_filter(dev);
2983 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
2984 NULL, "Failed to flush rule");
2988 ret = ixgbe_clear_all_l2_tn_filter(dev);
2990 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
2991 NULL, "Failed to flush rule");
2995 ixgbe_filterlist_flush();
3000 const struct rte_flow_ops ixgbe_flow_ops = {
3001 .validate = ixgbe_flow_validate,
3002 .create = ixgbe_flow_create,
3003 .destroy = ixgbe_flow_destroy,
3004 .flush = ixgbe_flow_flush,