4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
47 #include <rte_interrupts.h>
49 #include <rte_debug.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_malloc.h>
60 #include <rte_random.h>
62 #include <rte_hash_crc.h>
64 #include <rte_flow_driver.h>
66 #include "ixgbe_logs.h"
67 #include "base/ixgbe_api.h"
68 #include "base/ixgbe_vf.h"
69 #include "base/ixgbe_common.h"
70 #include "ixgbe_ethdev.h"
71 #include "ixgbe_bypass.h"
72 #include "ixgbe_rxtx.h"
73 #include "base/ixgbe_type.h"
74 #include "base/ixgbe_phy.h"
75 #include "rte_pmd_ixgbe.h"
78 #define IXGBE_MIN_N_TUPLE_PRIO 1
79 #define IXGBE_MAX_N_TUPLE_PRIO 7
80 #define IXGBE_MAX_FLX_SOURCE_OFF 62
83 * Endless loop will never happen with below assumption
84 * 1. there is at least one no-void item(END)
85 * 2. cur is before END.
88 const struct rte_flow_item *next_no_void_pattern(
89 const struct rte_flow_item pattern[],
90 const struct rte_flow_item *cur)
92 const struct rte_flow_item *next =
93 cur ? cur + 1 : &pattern[0];
95 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
102 const struct rte_flow_action *next_no_void_action(
103 const struct rte_flow_action actions[],
104 const struct rte_flow_action *cur)
106 const struct rte_flow_action *next =
107 cur ? cur + 1 : &actions[0];
109 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
116 * Please aware there's an asumption for all the parsers.
117 * rte_flow_item is using big endian, rte_flow_attr and
118 * rte_flow_action are using CPU order.
119 * Because the pattern is used to describe the packets,
120 * normally the packets should use network order.
124 * Parse the rule to see if it is a n-tuple rule.
125 * And get the n-tuple filter info BTW.
127 * The first not void item can be ETH or IPV4.
128 * The second not void item must be IPV4 if the first one is ETH.
129 * The third not void item must be UDP or TCP.
130 * The next not void item must be END.
132 * The first not void action should be QUEUE.
133 * The next not void action should be END.
137 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
138 * dst_addr 192.167.3.50 0xFFFFFFFF
139 * next_proto_id 17 0xFF
140 * UDP/TCP/ src_port 80 0xFFFF
141 * SCTP dst_port 80 0xFFFF
143 * other members in mask and spec should set to 0x00.
144 * item->last should be NULL.
147 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
148 const struct rte_flow_item pattern[],
149 const struct rte_flow_action actions[],
150 struct rte_eth_ntuple_filter *filter,
151 struct rte_flow_error *error)
153 const struct rte_flow_item *item;
154 const struct rte_flow_action *act;
155 const struct rte_flow_item_ipv4 *ipv4_spec;
156 const struct rte_flow_item_ipv4 *ipv4_mask;
157 const struct rte_flow_item_tcp *tcp_spec;
158 const struct rte_flow_item_tcp *tcp_mask;
159 const struct rte_flow_item_udp *udp_spec;
160 const struct rte_flow_item_udp *udp_mask;
161 const struct rte_flow_item_sctp *sctp_spec;
162 const struct rte_flow_item_sctp *sctp_mask;
165 rte_flow_error_set(error,
166 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
167 NULL, "NULL pattern.");
172 rte_flow_error_set(error, EINVAL,
173 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
174 NULL, "NULL action.");
178 rte_flow_error_set(error, EINVAL,
179 RTE_FLOW_ERROR_TYPE_ATTR,
180 NULL, "NULL attribute.");
184 /* the first not void item can be MAC or IPv4 */
185 item = next_no_void_pattern(pattern, NULL);
187 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
188 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
189 rte_flow_error_set(error, EINVAL,
190 RTE_FLOW_ERROR_TYPE_ITEM,
191 item, "Not supported by ntuple filter");
195 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
196 /*Not supported last point for range*/
198 rte_flow_error_set(error,
200 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
201 item, "Not supported last point for range");
205 /* if the first item is MAC, the content should be NULL */
206 if (item->spec || item->mask) {
207 rte_flow_error_set(error, EINVAL,
208 RTE_FLOW_ERROR_TYPE_ITEM,
209 item, "Not supported by ntuple filter");
212 /* check if the next not void item is IPv4 */
213 item = next_no_void_pattern(pattern, item);
214 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
215 rte_flow_error_set(error,
216 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
217 item, "Not supported by ntuple filter");
222 /* get the IPv4 info */
223 if (!item->spec || !item->mask) {
224 rte_flow_error_set(error, EINVAL,
225 RTE_FLOW_ERROR_TYPE_ITEM,
226 item, "Invalid ntuple mask");
229 /*Not supported last point for range*/
231 rte_flow_error_set(error, EINVAL,
232 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
233 item, "Not supported last point for range");
238 ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
240 * Only support src & dst addresses, protocol,
241 * others should be masked.
243 if (ipv4_mask->hdr.version_ihl ||
244 ipv4_mask->hdr.type_of_service ||
245 ipv4_mask->hdr.total_length ||
246 ipv4_mask->hdr.packet_id ||
247 ipv4_mask->hdr.fragment_offset ||
248 ipv4_mask->hdr.time_to_live ||
249 ipv4_mask->hdr.hdr_checksum) {
250 rte_flow_error_set(error,
251 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
252 item, "Not supported by ntuple filter");
256 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
257 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
258 filter->proto_mask = ipv4_mask->hdr.next_proto_id;
260 ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
261 filter->dst_ip = ipv4_spec->hdr.dst_addr;
262 filter->src_ip = ipv4_spec->hdr.src_addr;
263 filter->proto = ipv4_spec->hdr.next_proto_id;
265 /* check if the next not void item is TCP or UDP */
266 item = next_no_void_pattern(pattern, item);
267 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
268 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
269 item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
270 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
271 rte_flow_error_set(error, EINVAL,
272 RTE_FLOW_ERROR_TYPE_ITEM,
273 item, "Not supported by ntuple filter");
277 /* get the TCP/UDP info */
278 if (!item->spec || !item->mask) {
279 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
280 rte_flow_error_set(error, EINVAL,
281 RTE_FLOW_ERROR_TYPE_ITEM,
282 item, "Invalid ntuple mask");
286 /*Not supported last point for range*/
288 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
289 rte_flow_error_set(error, EINVAL,
290 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
291 item, "Not supported last point for range");
296 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
297 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
300 * Only support src & dst ports, tcp flags,
301 * others should be masked.
303 if (tcp_mask->hdr.sent_seq ||
304 tcp_mask->hdr.recv_ack ||
305 tcp_mask->hdr.data_off ||
306 tcp_mask->hdr.rx_win ||
307 tcp_mask->hdr.cksum ||
308 tcp_mask->hdr.tcp_urp) {
310 sizeof(struct rte_eth_ntuple_filter));
311 rte_flow_error_set(error, EINVAL,
312 RTE_FLOW_ERROR_TYPE_ITEM,
313 item, "Not supported by ntuple filter");
317 filter->dst_port_mask = tcp_mask->hdr.dst_port;
318 filter->src_port_mask = tcp_mask->hdr.src_port;
319 if (tcp_mask->hdr.tcp_flags == 0xFF) {
320 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
321 } else if (!tcp_mask->hdr.tcp_flags) {
322 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
324 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
325 rte_flow_error_set(error, EINVAL,
326 RTE_FLOW_ERROR_TYPE_ITEM,
327 item, "Not supported by ntuple filter");
331 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
332 filter->dst_port = tcp_spec->hdr.dst_port;
333 filter->src_port = tcp_spec->hdr.src_port;
334 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
335 } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
336 udp_mask = (const struct rte_flow_item_udp *)item->mask;
339 * Only support src & dst ports,
340 * others should be masked.
342 if (udp_mask->hdr.dgram_len ||
343 udp_mask->hdr.dgram_cksum) {
345 sizeof(struct rte_eth_ntuple_filter));
346 rte_flow_error_set(error, EINVAL,
347 RTE_FLOW_ERROR_TYPE_ITEM,
348 item, "Not supported by ntuple filter");
352 filter->dst_port_mask = udp_mask->hdr.dst_port;
353 filter->src_port_mask = udp_mask->hdr.src_port;
355 udp_spec = (const struct rte_flow_item_udp *)item->spec;
356 filter->dst_port = udp_spec->hdr.dst_port;
357 filter->src_port = udp_spec->hdr.src_port;
359 sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
362 * Only support src & dst ports,
363 * others should be masked.
365 if (sctp_mask->hdr.tag ||
366 sctp_mask->hdr.cksum) {
368 sizeof(struct rte_eth_ntuple_filter));
369 rte_flow_error_set(error, EINVAL,
370 RTE_FLOW_ERROR_TYPE_ITEM,
371 item, "Not supported by ntuple filter");
375 filter->dst_port_mask = sctp_mask->hdr.dst_port;
376 filter->src_port_mask = sctp_mask->hdr.src_port;
378 sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
379 filter->dst_port = sctp_spec->hdr.dst_port;
380 filter->src_port = sctp_spec->hdr.src_port;
383 /* check if the next not void item is END */
384 item = next_no_void_pattern(pattern, item);
385 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
386 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
387 rte_flow_error_set(error, EINVAL,
388 RTE_FLOW_ERROR_TYPE_ITEM,
389 item, "Not supported by ntuple filter");
394 * n-tuple only supports forwarding,
395 * check if the first not void action is QUEUE.
397 act = next_no_void_action(actions, NULL);
398 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
399 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
400 rte_flow_error_set(error, EINVAL,
401 RTE_FLOW_ERROR_TYPE_ACTION,
402 item, "Not supported action.");
406 ((const struct rte_flow_action_queue *)act->conf)->index;
408 /* check if the next not void item is END */
409 act = next_no_void_action(actions, act);
410 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
411 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
412 rte_flow_error_set(error, EINVAL,
413 RTE_FLOW_ERROR_TYPE_ACTION,
414 act, "Not supported action.");
419 /* must be input direction */
420 if (!attr->ingress) {
421 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
422 rte_flow_error_set(error, EINVAL,
423 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
424 attr, "Only support ingress.");
430 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
431 rte_flow_error_set(error, EINVAL,
432 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
433 attr, "Not support egress.");
437 if (attr->priority > 0xFFFF) {
438 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
439 rte_flow_error_set(error, EINVAL,
440 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
441 attr, "Error priority.");
444 filter->priority = (uint16_t)attr->priority;
445 if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
446 attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
447 filter->priority = 1;
452 /* a specific function for ixgbe because the flags is specific */
454 ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
455 const struct rte_flow_attr *attr,
456 const struct rte_flow_item pattern[],
457 const struct rte_flow_action actions[],
458 struct rte_eth_ntuple_filter *filter,
459 struct rte_flow_error *error)
462 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
464 MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
466 ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
471 /* Ixgbe doesn't support tcp flags. */
472 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
473 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
474 rte_flow_error_set(error, EINVAL,
475 RTE_FLOW_ERROR_TYPE_ITEM,
476 NULL, "Not supported by ntuple filter");
480 /* Ixgbe doesn't support many priorities. */
481 if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
482 filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
483 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
484 rte_flow_error_set(error, EINVAL,
485 RTE_FLOW_ERROR_TYPE_ITEM,
486 NULL, "Priority not supported by ntuple filter");
490 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
491 filter->priority > IXGBE_5TUPLE_MAX_PRI ||
492 filter->priority < IXGBE_5TUPLE_MIN_PRI)
495 /* fixed value for ixgbe */
496 filter->flags = RTE_5TUPLE_FLAGS;
501 * Parse the rule to see if it is a ethertype rule.
502 * And get the ethertype filter info BTW.
504 * The first not void item can be ETH.
505 * The next not void item must be END.
507 * The first not void action should be QUEUE.
508 * The next not void action should be END.
511 * ETH type 0x0807 0xFFFF
513 * other members in mask and spec should set to 0x00.
514 * item->last should be NULL.
517 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
518 const struct rte_flow_item *pattern,
519 const struct rte_flow_action *actions,
520 struct rte_eth_ethertype_filter *filter,
521 struct rte_flow_error *error)
523 const struct rte_flow_item *item;
524 const struct rte_flow_action *act;
525 const struct rte_flow_item_eth *eth_spec;
526 const struct rte_flow_item_eth *eth_mask;
527 const struct rte_flow_action_queue *act_q;
530 rte_flow_error_set(error, EINVAL,
531 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
532 NULL, "NULL pattern.");
537 rte_flow_error_set(error, EINVAL,
538 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
539 NULL, "NULL action.");
544 rte_flow_error_set(error, EINVAL,
545 RTE_FLOW_ERROR_TYPE_ATTR,
546 NULL, "NULL attribute.");
550 item = next_no_void_pattern(pattern, NULL);
551 /* The first non-void item should be MAC. */
552 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
553 rte_flow_error_set(error, EINVAL,
554 RTE_FLOW_ERROR_TYPE_ITEM,
555 item, "Not supported by ethertype filter");
559 /*Not supported last point for range*/
561 rte_flow_error_set(error, EINVAL,
562 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
563 item, "Not supported last point for range");
567 /* Get the MAC info. */
568 if (!item->spec || !item->mask) {
569 rte_flow_error_set(error, EINVAL,
570 RTE_FLOW_ERROR_TYPE_ITEM,
571 item, "Not supported by ethertype filter");
575 eth_spec = (const struct rte_flow_item_eth *)item->spec;
576 eth_mask = (const struct rte_flow_item_eth *)item->mask;
578 /* Mask bits of source MAC address must be full of 0.
579 * Mask bits of destination MAC address must be full
582 if (!is_zero_ether_addr(ð_mask->src) ||
583 (!is_zero_ether_addr(ð_mask->dst) &&
584 !is_broadcast_ether_addr(ð_mask->dst))) {
585 rte_flow_error_set(error, EINVAL,
586 RTE_FLOW_ERROR_TYPE_ITEM,
587 item, "Invalid ether address mask");
591 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
592 rte_flow_error_set(error, EINVAL,
593 RTE_FLOW_ERROR_TYPE_ITEM,
594 item, "Invalid ethertype mask");
598 /* If mask bits of destination MAC address
599 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
601 if (is_broadcast_ether_addr(ð_mask->dst)) {
602 filter->mac_addr = eth_spec->dst;
603 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
605 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
607 filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
609 /* Check if the next non-void item is END. */
610 item = next_no_void_pattern(pattern, item);
611 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
612 rte_flow_error_set(error, EINVAL,
613 RTE_FLOW_ERROR_TYPE_ITEM,
614 item, "Not supported by ethertype filter.");
620 act = next_no_void_action(actions, NULL);
621 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
622 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
623 rte_flow_error_set(error, EINVAL,
624 RTE_FLOW_ERROR_TYPE_ACTION,
625 act, "Not supported action.");
629 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
630 act_q = (const struct rte_flow_action_queue *)act->conf;
631 filter->queue = act_q->index;
633 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
636 /* Check if the next non-void item is END */
637 act = next_no_void_action(actions, act);
638 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
639 rte_flow_error_set(error, EINVAL,
640 RTE_FLOW_ERROR_TYPE_ACTION,
641 act, "Not supported action.");
646 /* Must be input direction */
647 if (!attr->ingress) {
648 rte_flow_error_set(error, EINVAL,
649 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
650 attr, "Only support ingress.");
656 rte_flow_error_set(error, EINVAL,
657 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
658 attr, "Not support egress.");
663 if (attr->priority) {
664 rte_flow_error_set(error, EINVAL,
665 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
666 attr, "Not support priority.");
672 rte_flow_error_set(error, EINVAL,
673 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
674 attr, "Not support group.");
682 ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
683 const struct rte_flow_attr *attr,
684 const struct rte_flow_item pattern[],
685 const struct rte_flow_action actions[],
686 struct rte_eth_ethertype_filter *filter,
687 struct rte_flow_error *error)
690 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
692 MAC_TYPE_FILTER_SUP(hw->mac.type);
694 ret = cons_parse_ethertype_filter(attr, pattern,
695 actions, filter, error);
700 /* Ixgbe doesn't support MAC address. */
701 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
702 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
703 rte_flow_error_set(error, EINVAL,
704 RTE_FLOW_ERROR_TYPE_ITEM,
705 NULL, "Not supported by ethertype filter");
709 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
710 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
711 rte_flow_error_set(error, EINVAL,
712 RTE_FLOW_ERROR_TYPE_ITEM,
713 NULL, "queue index much too big");
717 if (filter->ether_type == ETHER_TYPE_IPv4 ||
718 filter->ether_type == ETHER_TYPE_IPv6) {
719 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
720 rte_flow_error_set(error, EINVAL,
721 RTE_FLOW_ERROR_TYPE_ITEM,
722 NULL, "IPv4/IPv6 not supported by ethertype filter");
726 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
727 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
728 rte_flow_error_set(error, EINVAL,
729 RTE_FLOW_ERROR_TYPE_ITEM,
730 NULL, "mac compare is unsupported");
734 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
735 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
736 rte_flow_error_set(error, EINVAL,
737 RTE_FLOW_ERROR_TYPE_ITEM,
738 NULL, "drop option is unsupported");
746 * Parse the rule to see if it is a TCP SYN rule.
747 * And get the TCP SYN filter info BTW.
749 * The first not void item must be ETH.
750 * The second not void item must be IPV4 or IPV6.
751 * The third not void item must be TCP.
752 * The next not void item must be END.
754 * The first not void action should be QUEUE.
755 * The next not void action should be END.
759 * IPV4/IPV6 NULL NULL
760 * TCP tcp_flags 0x02 0xFF
762 * other members in mask and spec should set to 0x00.
763 * item->last should be NULL.
766 cons_parse_syn_filter(const struct rte_flow_attr *attr,
767 const struct rte_flow_item pattern[],
768 const struct rte_flow_action actions[],
769 struct rte_eth_syn_filter *filter,
770 struct rte_flow_error *error)
772 const struct rte_flow_item *item;
773 const struct rte_flow_action *act;
774 const struct rte_flow_item_tcp *tcp_spec;
775 const struct rte_flow_item_tcp *tcp_mask;
776 const struct rte_flow_action_queue *act_q;
779 rte_flow_error_set(error, EINVAL,
780 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
781 NULL, "NULL pattern.");
786 rte_flow_error_set(error, EINVAL,
787 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
788 NULL, "NULL action.");
793 rte_flow_error_set(error, EINVAL,
794 RTE_FLOW_ERROR_TYPE_ATTR,
795 NULL, "NULL attribute.");
800 /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
801 item = next_no_void_pattern(pattern, NULL);
802 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
803 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
804 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
805 item->type != RTE_FLOW_ITEM_TYPE_TCP) {
806 rte_flow_error_set(error, EINVAL,
807 RTE_FLOW_ERROR_TYPE_ITEM,
808 item, "Not supported by syn filter");
811 /*Not supported last point for range*/
813 rte_flow_error_set(error, EINVAL,
814 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
815 item, "Not supported last point for range");
820 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
821 /* if the item is MAC, the content should be NULL */
822 if (item->spec || item->mask) {
823 rte_flow_error_set(error, EINVAL,
824 RTE_FLOW_ERROR_TYPE_ITEM,
825 item, "Invalid SYN address mask");
829 /* check if the next not void item is IPv4 or IPv6 */
830 item = next_no_void_pattern(pattern, item);
831 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
832 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
833 rte_flow_error_set(error, EINVAL,
834 RTE_FLOW_ERROR_TYPE_ITEM,
835 item, "Not supported by syn filter");
841 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
842 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
843 /* if the item is IP, the content should be NULL */
844 if (item->spec || item->mask) {
845 rte_flow_error_set(error, EINVAL,
846 RTE_FLOW_ERROR_TYPE_ITEM,
847 item, "Invalid SYN mask");
851 /* check if the next not void item is TCP */
852 item = next_no_void_pattern(pattern, item);
853 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
854 rte_flow_error_set(error, EINVAL,
855 RTE_FLOW_ERROR_TYPE_ITEM,
856 item, "Not supported by syn filter");
861 /* Get the TCP info. Only support SYN. */
862 if (!item->spec || !item->mask) {
863 rte_flow_error_set(error, EINVAL,
864 RTE_FLOW_ERROR_TYPE_ITEM,
865 item, "Invalid SYN mask");
868 /*Not supported last point for range*/
870 rte_flow_error_set(error, EINVAL,
871 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
872 item, "Not supported last point for range");
876 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
877 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
878 if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
879 tcp_mask->hdr.src_port ||
880 tcp_mask->hdr.dst_port ||
881 tcp_mask->hdr.sent_seq ||
882 tcp_mask->hdr.recv_ack ||
883 tcp_mask->hdr.data_off ||
884 tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
885 tcp_mask->hdr.rx_win ||
886 tcp_mask->hdr.cksum ||
887 tcp_mask->hdr.tcp_urp) {
888 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
889 rte_flow_error_set(error, EINVAL,
890 RTE_FLOW_ERROR_TYPE_ITEM,
891 item, "Not supported by syn filter");
895 /* check if the next not void item is END */
896 item = next_no_void_pattern(pattern, item);
897 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
898 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
899 rte_flow_error_set(error, EINVAL,
900 RTE_FLOW_ERROR_TYPE_ITEM,
901 item, "Not supported by syn filter");
905 /* check if the first not void action is QUEUE. */
906 act = next_no_void_action(actions, NULL);
907 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
908 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
909 rte_flow_error_set(error, EINVAL,
910 RTE_FLOW_ERROR_TYPE_ACTION,
911 act, "Not supported action.");
915 act_q = (const struct rte_flow_action_queue *)act->conf;
916 filter->queue = act_q->index;
917 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
918 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
919 rte_flow_error_set(error, EINVAL,
920 RTE_FLOW_ERROR_TYPE_ACTION,
921 act, "Not supported action.");
925 /* check if the next not void item is END */
926 act = next_no_void_action(actions, act);
927 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
928 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
929 rte_flow_error_set(error, EINVAL,
930 RTE_FLOW_ERROR_TYPE_ACTION,
931 act, "Not supported action.");
936 /* must be input direction */
937 if (!attr->ingress) {
938 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
939 rte_flow_error_set(error, EINVAL,
940 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
941 attr, "Only support ingress.");
947 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
948 rte_flow_error_set(error, EINVAL,
949 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
950 attr, "Not support egress.");
954 /* Support 2 priorities, the lowest or highest. */
955 if (!attr->priority) {
957 } else if (attr->priority == (uint32_t)~0U) {
960 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
961 rte_flow_error_set(error, EINVAL,
962 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
963 attr, "Not support priority.");
971 ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
972 const struct rte_flow_attr *attr,
973 const struct rte_flow_item pattern[],
974 const struct rte_flow_action actions[],
975 struct rte_eth_syn_filter *filter,
976 struct rte_flow_error *error)
979 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
981 MAC_TYPE_FILTER_SUP(hw->mac.type);
983 ret = cons_parse_syn_filter(attr, pattern,
984 actions, filter, error);
993 * Parse the rule to see if it is a L2 tunnel rule.
994 * And get the L2 tunnel filter info BTW.
995 * Only support E-tag now.
997 * The first not void item can be E_TAG.
998 * The next not void item must be END.
1000 * The first not void action should be QUEUE.
1001 * The next not void action should be END.
1005 e_cid_base 0x309 0xFFF
1007 * other members in mask and spec should set to 0x00.
1008 * item->last should be NULL.
1011 cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
1012 const struct rte_flow_item pattern[],
1013 const struct rte_flow_action actions[],
1014 struct rte_eth_l2_tunnel_conf *filter,
1015 struct rte_flow_error *error)
1017 const struct rte_flow_item *item;
1018 const struct rte_flow_item_e_tag *e_tag_spec;
1019 const struct rte_flow_item_e_tag *e_tag_mask;
1020 const struct rte_flow_action *act;
1021 const struct rte_flow_action_queue *act_q;
1024 rte_flow_error_set(error, EINVAL,
1025 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1026 NULL, "NULL pattern.");
1031 rte_flow_error_set(error, EINVAL,
1032 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1033 NULL, "NULL action.");
1038 rte_flow_error_set(error, EINVAL,
1039 RTE_FLOW_ERROR_TYPE_ATTR,
1040 NULL, "NULL attribute.");
1044 /* The first not void item should be e-tag. */
1045 item = next_no_void_pattern(pattern, NULL);
1046 if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1047 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1048 rte_flow_error_set(error, EINVAL,
1049 RTE_FLOW_ERROR_TYPE_ITEM,
1050 item, "Not supported by L2 tunnel filter");
1054 if (!item->spec || !item->mask) {
1055 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1056 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1057 item, "Not supported by L2 tunnel filter");
1061 /*Not supported last point for range*/
1063 rte_flow_error_set(error, EINVAL,
1064 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1065 item, "Not supported last point for range");
1069 e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
1070 e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
1072 /* Only care about GRP and E cid base. */
1073 if (e_tag_mask->epcp_edei_in_ecid_b ||
1074 e_tag_mask->in_ecid_e ||
1075 e_tag_mask->ecid_e ||
1076 e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1077 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1078 rte_flow_error_set(error, EINVAL,
1079 RTE_FLOW_ERROR_TYPE_ITEM,
1080 item, "Not supported by L2 tunnel filter");
1084 filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1086 * grp and e_cid_base are bit fields and only use 14 bits.
1087 * e-tag id is taken as little endian by HW.
1089 filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1091 /* check if the next not void item is END */
1092 item = next_no_void_pattern(pattern, item);
1093 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1094 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1095 rte_flow_error_set(error, EINVAL,
1096 RTE_FLOW_ERROR_TYPE_ITEM,
1097 item, "Not supported by L2 tunnel filter");
1102 /* must be input direction */
1103 if (!attr->ingress) {
1104 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1105 rte_flow_error_set(error, EINVAL,
1106 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1107 attr, "Only support ingress.");
1113 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1114 rte_flow_error_set(error, EINVAL,
1115 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1116 attr, "Not support egress.");
1121 if (attr->priority) {
1122 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1123 rte_flow_error_set(error, EINVAL,
1124 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1125 attr, "Not support priority.");
1129 /* check if the first not void action is QUEUE. */
1130 act = next_no_void_action(actions, NULL);
1131 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1132 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1133 rte_flow_error_set(error, EINVAL,
1134 RTE_FLOW_ERROR_TYPE_ACTION,
1135 act, "Not supported action.");
1139 act_q = (const struct rte_flow_action_queue *)act->conf;
1140 filter->pool = act_q->index;
1142 /* check if the next not void item is END */
1143 act = next_no_void_action(actions, act);
1144 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1145 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1146 rte_flow_error_set(error, EINVAL,
1147 RTE_FLOW_ERROR_TYPE_ACTION,
1148 act, "Not supported action.");
1156 ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1157 const struct rte_flow_attr *attr,
1158 const struct rte_flow_item pattern[],
1159 const struct rte_flow_action actions[],
1160 struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1161 struct rte_flow_error *error)
1164 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1166 ret = cons_parse_l2_tn_filter(attr, pattern,
1167 actions, l2_tn_filter, error);
1169 if (hw->mac.type != ixgbe_mac_X550 &&
1170 hw->mac.type != ixgbe_mac_X550EM_x &&
1171 hw->mac.type != ixgbe_mac_X550EM_a) {
1172 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1173 rte_flow_error_set(error, EINVAL,
1174 RTE_FLOW_ERROR_TYPE_ITEM,
1175 NULL, "Not supported by L2 tunnel filter");
1182 /* Parse to get the attr and action info of flow director rule. */
1184 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1185 const struct rte_flow_action actions[],
1186 struct ixgbe_fdir_rule *rule,
1187 struct rte_flow_error *error)
1189 const struct rte_flow_action *act;
1190 const struct rte_flow_action_queue *act_q;
1191 const struct rte_flow_action_mark *mark;
1194 /* must be input direction */
1195 if (!attr->ingress) {
1196 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1197 rte_flow_error_set(error, EINVAL,
1198 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1199 attr, "Only support ingress.");
1205 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1206 rte_flow_error_set(error, EINVAL,
1207 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1208 attr, "Not support egress.");
1213 if (attr->priority) {
1214 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1215 rte_flow_error_set(error, EINVAL,
1216 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1217 attr, "Not support priority.");
1221 /* check if the first not void action is QUEUE or DROP. */
1222 act = next_no_void_action(actions, NULL);
1223 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1224 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1225 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1226 rte_flow_error_set(error, EINVAL,
1227 RTE_FLOW_ERROR_TYPE_ACTION,
1228 act, "Not supported action.");
1232 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1233 act_q = (const struct rte_flow_action_queue *)act->conf;
1234 rule->queue = act_q->index;
1236 /* signature mode does not support drop action. */
1237 if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1238 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1239 rte_flow_error_set(error, EINVAL,
1240 RTE_FLOW_ERROR_TYPE_ACTION,
1241 act, "Not supported action.");
1244 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1247 /* check if the next not void item is MARK */
1248 act = next_no_void_action(actions, act);
1249 if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1250 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1251 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1252 rte_flow_error_set(error, EINVAL,
1253 RTE_FLOW_ERROR_TYPE_ACTION,
1254 act, "Not supported action.");
1260 if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1261 mark = (const struct rte_flow_action_mark *)act->conf;
1262 rule->soft_id = mark->id;
1263 act = next_no_void_action(actions, act);
1266 /* check if the next not void item is END */
1267 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1268 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1269 rte_flow_error_set(error, EINVAL,
1270 RTE_FLOW_ERROR_TYPE_ACTION,
1271 act, "Not supported action.");
1278 /* search next no void pattern and skip fuzzy */
1280 const struct rte_flow_item *next_no_fuzzy_pattern(
1281 const struct rte_flow_item pattern[],
1282 const struct rte_flow_item *cur)
1284 const struct rte_flow_item *next =
1285 next_no_void_pattern(pattern, cur);
1287 if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
1289 next = next_no_void_pattern(pattern, next);
1293 static inline uint8_t signature_match(const struct rte_flow_item pattern[])
1295 const struct rte_flow_item_fuzzy *spec, *last, *mask;
1296 const struct rte_flow_item *item;
1297 uint32_t sh, lh, mh;
1302 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1305 if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
1307 (const struct rte_flow_item_fuzzy *)item->spec;
1309 (const struct rte_flow_item_fuzzy *)item->last;
1311 (const struct rte_flow_item_fuzzy *)item->mask;
1340 * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1341 * And get the flow director filter info BTW.
1342 * UDP/TCP/SCTP PATTERN:
1343 * The first not void item can be ETH or IPV4 or IPV6
1344 * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1345 * The next not void item could be UDP or TCP or SCTP (optional)
1346 * The next not void item could be RAW (for flexbyte, optional)
1347 * The next not void item must be END.
1348 * A Fuzzy Match pattern can appear at any place before END.
1349 * Fuzzy Match is optional for IPV4 but is required for IPV6
1351 * The first not void item must be ETH.
1352 * The second not void item must be MAC VLAN.
1353 * The next not void item must be END.
1355 * The first not void action should be QUEUE or DROP.
1356 * The second not void optional action should be MARK,
1357 * mark_id is a uint32_t number.
1358 * The next not void action should be END.
1359 * UDP/TCP/SCTP pattern example:
1362 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
1363 * dst_addr 192.167.3.50 0xFFFFFFFF
1364 * UDP/TCP/SCTP src_port 80 0xFFFF
1365 * dst_port 80 0xFFFF
1366 * FLEX relative 0 0x1
1369 * offset 12 0xFFFFFFFF
1372 * pattern[0] 0x86 0xFF
1373 * pattern[1] 0xDD 0xFF
1375 * MAC VLAN pattern example:
1378 {0xAC, 0x7B, 0xA1, {0xFF, 0xFF, 0xFF,
1379 0x2C, 0x6D, 0x36} 0xFF, 0xFF, 0xFF}
1380 * MAC VLAN tci 0x2016 0xEFFF
1382 * Other members in mask and spec should set to 0x00.
1383 * Item->last should be NULL.
1386 ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
1387 const struct rte_flow_item pattern[],
1388 const struct rte_flow_action actions[],
1389 struct ixgbe_fdir_rule *rule,
1390 struct rte_flow_error *error)
1392 const struct rte_flow_item *item;
1393 const struct rte_flow_item_eth *eth_spec;
1394 const struct rte_flow_item_eth *eth_mask;
1395 const struct rte_flow_item_ipv4 *ipv4_spec;
1396 const struct rte_flow_item_ipv4 *ipv4_mask;
1397 const struct rte_flow_item_ipv6 *ipv6_spec;
1398 const struct rte_flow_item_ipv6 *ipv6_mask;
1399 const struct rte_flow_item_tcp *tcp_spec;
1400 const struct rte_flow_item_tcp *tcp_mask;
1401 const struct rte_flow_item_udp *udp_spec;
1402 const struct rte_flow_item_udp *udp_mask;
1403 const struct rte_flow_item_sctp *sctp_spec;
1404 const struct rte_flow_item_sctp *sctp_mask;
1405 const struct rte_flow_item_vlan *vlan_spec;
1406 const struct rte_flow_item_vlan *vlan_mask;
1407 const struct rte_flow_item_raw *raw_mask;
1408 const struct rte_flow_item_raw *raw_spec;
1413 rte_flow_error_set(error, EINVAL,
1414 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1415 NULL, "NULL pattern.");
1420 rte_flow_error_set(error, EINVAL,
1421 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1422 NULL, "NULL action.");
1427 rte_flow_error_set(error, EINVAL,
1428 RTE_FLOW_ERROR_TYPE_ATTR,
1429 NULL, "NULL attribute.");
1434 * Some fields may not be provided. Set spec to 0 and mask to default
1435 * value. So, we need not do anything for the not provided fields later.
1437 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1438 memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1439 rule->mask.vlan_tci_mask = 0;
1440 rule->mask.flex_bytes_mask = 0;
1443 * The first not void item should be
1444 * MAC or IPv4 or TCP or UDP or SCTP.
1446 item = next_no_fuzzy_pattern(pattern, NULL);
1447 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1448 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1449 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1450 item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1451 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1452 item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1453 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1454 rte_flow_error_set(error, EINVAL,
1455 RTE_FLOW_ERROR_TYPE_ITEM,
1456 item, "Not supported by fdir filter");
1460 if (signature_match(pattern))
1461 rule->mode = RTE_FDIR_MODE_SIGNATURE;
1463 rule->mode = RTE_FDIR_MODE_PERFECT;
1465 /*Not supported last point for range*/
1467 rte_flow_error_set(error, EINVAL,
1468 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1469 item, "Not supported last point for range");
1473 /* Get the MAC info. */
1474 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1476 * Only support vlan and dst MAC address,
1477 * others should be masked.
1479 if (item->spec && !item->mask) {
1480 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1481 rte_flow_error_set(error, EINVAL,
1482 RTE_FLOW_ERROR_TYPE_ITEM,
1483 item, "Not supported by fdir filter");
1488 rule->b_spec = TRUE;
1489 eth_spec = (const struct rte_flow_item_eth *)item->spec;
1491 /* Get the dst MAC. */
1492 for (j = 0; j < ETHER_ADDR_LEN; j++) {
1493 rule->ixgbe_fdir.formatted.inner_mac[j] =
1494 eth_spec->dst.addr_bytes[j];
1501 rule->b_mask = TRUE;
1502 eth_mask = (const struct rte_flow_item_eth *)item->mask;
1504 /* Ether type should be masked. */
1505 if (eth_mask->type ||
1506 rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1507 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1508 rte_flow_error_set(error, EINVAL,
1509 RTE_FLOW_ERROR_TYPE_ITEM,
1510 item, "Not supported by fdir filter");
1514 /* If ethernet has meaning, it means MAC VLAN mode. */
1515 rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1518 * src MAC address must be masked,
1519 * and don't support dst MAC address mask.
1521 for (j = 0; j < ETHER_ADDR_LEN; j++) {
1522 if (eth_mask->src.addr_bytes[j] ||
1523 eth_mask->dst.addr_bytes[j] != 0xFF) {
1525 sizeof(struct ixgbe_fdir_rule));
1526 rte_flow_error_set(error, EINVAL,
1527 RTE_FLOW_ERROR_TYPE_ITEM,
1528 item, "Not supported by fdir filter");
1533 /* When no VLAN, considered as full mask. */
1534 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1536 /*** If both spec and mask are item,
1537 * it means don't care about ETH.
1542 * Check if the next not void item is vlan or ipv4.
1543 * IPv6 is not supported.
1545 item = next_no_fuzzy_pattern(pattern, item);
1546 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1547 if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1548 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1549 rte_flow_error_set(error, EINVAL,
1550 RTE_FLOW_ERROR_TYPE_ITEM,
1551 item, "Not supported by fdir filter");
1555 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
1556 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1557 rte_flow_error_set(error, EINVAL,
1558 RTE_FLOW_ERROR_TYPE_ITEM,
1559 item, "Not supported by fdir filter");
1565 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1566 if (!(item->spec && item->mask)) {
1567 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1568 rte_flow_error_set(error, EINVAL,
1569 RTE_FLOW_ERROR_TYPE_ITEM,
1570 item, "Not supported by fdir filter");
1574 /*Not supported last point for range*/
1576 rte_flow_error_set(error, EINVAL,
1577 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1578 item, "Not supported last point for range");
1582 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
1583 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
1585 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1587 rule->mask.vlan_tci_mask = vlan_mask->tci;
1588 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
1589 /* More than one tags are not supported. */
1591 /* Next not void item must be END */
1592 item = next_no_fuzzy_pattern(pattern, item);
1593 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1594 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1595 rte_flow_error_set(error, EINVAL,
1596 RTE_FLOW_ERROR_TYPE_ITEM,
1597 item, "Not supported by fdir filter");
1602 /* Get the IPV4 info. */
1603 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1605 * Set the flow type even if there's no content
1606 * as we must have a flow type.
1608 rule->ixgbe_fdir.formatted.flow_type =
1609 IXGBE_ATR_FLOW_TYPE_IPV4;
1610 /*Not supported last point for range*/
1612 rte_flow_error_set(error, EINVAL,
1613 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1614 item, "Not supported last point for range");
1618 * Only care about src & dst addresses,
1619 * others should be masked.
1622 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1623 rte_flow_error_set(error, EINVAL,
1624 RTE_FLOW_ERROR_TYPE_ITEM,
1625 item, "Not supported by fdir filter");
1628 rule->b_mask = TRUE;
1630 (const struct rte_flow_item_ipv4 *)item->mask;
1631 if (ipv4_mask->hdr.version_ihl ||
1632 ipv4_mask->hdr.type_of_service ||
1633 ipv4_mask->hdr.total_length ||
1634 ipv4_mask->hdr.packet_id ||
1635 ipv4_mask->hdr.fragment_offset ||
1636 ipv4_mask->hdr.time_to_live ||
1637 ipv4_mask->hdr.next_proto_id ||
1638 ipv4_mask->hdr.hdr_checksum) {
1639 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1640 rte_flow_error_set(error, EINVAL,
1641 RTE_FLOW_ERROR_TYPE_ITEM,
1642 item, "Not supported by fdir filter");
1645 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1646 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1649 rule->b_spec = TRUE;
1651 (const struct rte_flow_item_ipv4 *)item->spec;
1652 rule->ixgbe_fdir.formatted.dst_ip[0] =
1653 ipv4_spec->hdr.dst_addr;
1654 rule->ixgbe_fdir.formatted.src_ip[0] =
1655 ipv4_spec->hdr.src_addr;
1659 * Check if the next not void item is
1660 * TCP or UDP or SCTP or END.
1662 item = next_no_fuzzy_pattern(pattern, item);
1663 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1664 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1665 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1666 item->type != RTE_FLOW_ITEM_TYPE_END &&
1667 item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1668 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1669 rte_flow_error_set(error, EINVAL,
1670 RTE_FLOW_ERROR_TYPE_ITEM,
1671 item, "Not supported by fdir filter");
1676 /* Get the IPV6 info. */
1677 if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1679 * Set the flow type even if there's no content
1680 * as we must have a flow type.
1682 rule->ixgbe_fdir.formatted.flow_type =
1683 IXGBE_ATR_FLOW_TYPE_IPV6;
1686 * 1. must signature match
1687 * 2. not support last
1688 * 3. mask must not null
1690 if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
1693 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1694 rte_flow_error_set(error, EINVAL,
1695 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1696 item, "Not supported last point for range");
1700 rule->b_mask = TRUE;
1702 (const struct rte_flow_item_ipv6 *)item->mask;
1703 if (ipv6_mask->hdr.vtc_flow ||
1704 ipv6_mask->hdr.payload_len ||
1705 ipv6_mask->hdr.proto ||
1706 ipv6_mask->hdr.hop_limits) {
1707 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1708 rte_flow_error_set(error, EINVAL,
1709 RTE_FLOW_ERROR_TYPE_ITEM,
1710 item, "Not supported by fdir filter");
1714 /* check src addr mask */
1715 for (j = 0; j < 16; j++) {
1716 if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
1717 rule->mask.src_ipv6_mask |= 1 << j;
1718 } else if (ipv6_mask->hdr.src_addr[j] != 0) {
1719 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1720 rte_flow_error_set(error, EINVAL,
1721 RTE_FLOW_ERROR_TYPE_ITEM,
1722 item, "Not supported by fdir filter");
1727 /* check dst addr mask */
1728 for (j = 0; j < 16; j++) {
1729 if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
1730 rule->mask.dst_ipv6_mask |= 1 << j;
1731 } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
1732 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1733 rte_flow_error_set(error, EINVAL,
1734 RTE_FLOW_ERROR_TYPE_ITEM,
1735 item, "Not supported by fdir filter");
1741 rule->b_spec = TRUE;
1743 (const struct rte_flow_item_ipv6 *)item->spec;
1744 rte_memcpy(rule->ixgbe_fdir.formatted.src_ip,
1745 ipv6_spec->hdr.src_addr, 16);
1746 rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip,
1747 ipv6_spec->hdr.dst_addr, 16);
1751 * Check if the next not void item is
1752 * TCP or UDP or SCTP or END.
1754 item = next_no_fuzzy_pattern(pattern, item);
1755 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1756 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1757 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1758 item->type != RTE_FLOW_ITEM_TYPE_END &&
1759 item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1760 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1761 rte_flow_error_set(error, EINVAL,
1762 RTE_FLOW_ERROR_TYPE_ITEM,
1763 item, "Not supported by fdir filter");
1768 /* Get the TCP info. */
1769 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1771 * Set the flow type even if there's no content
1772 * as we must have a flow type.
1774 rule->ixgbe_fdir.formatted.flow_type |=
1775 IXGBE_ATR_L4TYPE_TCP;
1776 /*Not supported last point for range*/
1778 rte_flow_error_set(error, EINVAL,
1779 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1780 item, "Not supported last point for range");
1784 * Only care about src & dst ports,
1785 * others should be masked.
1788 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1789 rte_flow_error_set(error, EINVAL,
1790 RTE_FLOW_ERROR_TYPE_ITEM,
1791 item, "Not supported by fdir filter");
1794 rule->b_mask = TRUE;
1795 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1796 if (tcp_mask->hdr.sent_seq ||
1797 tcp_mask->hdr.recv_ack ||
1798 tcp_mask->hdr.data_off ||
1799 tcp_mask->hdr.tcp_flags ||
1800 tcp_mask->hdr.rx_win ||
1801 tcp_mask->hdr.cksum ||
1802 tcp_mask->hdr.tcp_urp) {
1803 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1804 rte_flow_error_set(error, EINVAL,
1805 RTE_FLOW_ERROR_TYPE_ITEM,
1806 item, "Not supported by fdir filter");
1809 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1810 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1813 rule->b_spec = TRUE;
1814 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1815 rule->ixgbe_fdir.formatted.src_port =
1816 tcp_spec->hdr.src_port;
1817 rule->ixgbe_fdir.formatted.dst_port =
1818 tcp_spec->hdr.dst_port;
1821 item = next_no_fuzzy_pattern(pattern, item);
1822 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1823 item->type != RTE_FLOW_ITEM_TYPE_END) {
1824 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1825 rte_flow_error_set(error, EINVAL,
1826 RTE_FLOW_ERROR_TYPE_ITEM,
1827 item, "Not supported by fdir filter");
1833 /* Get the UDP info */
1834 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1836 * Set the flow type even if there's no content
1837 * as we must have a flow type.
1839 rule->ixgbe_fdir.formatted.flow_type |=
1840 IXGBE_ATR_L4TYPE_UDP;
1841 /*Not supported last point for range*/
1843 rte_flow_error_set(error, EINVAL,
1844 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1845 item, "Not supported last point for range");
1849 * Only care about src & dst ports,
1850 * others should be masked.
1853 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1854 rte_flow_error_set(error, EINVAL,
1855 RTE_FLOW_ERROR_TYPE_ITEM,
1856 item, "Not supported by fdir filter");
1859 rule->b_mask = TRUE;
1860 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1861 if (udp_mask->hdr.dgram_len ||
1862 udp_mask->hdr.dgram_cksum) {
1863 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1864 rte_flow_error_set(error, EINVAL,
1865 RTE_FLOW_ERROR_TYPE_ITEM,
1866 item, "Not supported by fdir filter");
1869 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1870 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1873 rule->b_spec = TRUE;
1874 udp_spec = (const struct rte_flow_item_udp *)item->spec;
1875 rule->ixgbe_fdir.formatted.src_port =
1876 udp_spec->hdr.src_port;
1877 rule->ixgbe_fdir.formatted.dst_port =
1878 udp_spec->hdr.dst_port;
1881 item = next_no_fuzzy_pattern(pattern, item);
1882 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1883 item->type != RTE_FLOW_ITEM_TYPE_END) {
1884 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1885 rte_flow_error_set(error, EINVAL,
1886 RTE_FLOW_ERROR_TYPE_ITEM,
1887 item, "Not supported by fdir filter");
1893 /* Get the SCTP info */
1894 if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
1896 * Set the flow type even if there's no content
1897 * as we must have a flow type.
1899 rule->ixgbe_fdir.formatted.flow_type |=
1900 IXGBE_ATR_L4TYPE_SCTP;
1901 /*Not supported last point for range*/
1903 rte_flow_error_set(error, EINVAL,
1904 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1905 item, "Not supported last point for range");
1910 rule->b_mask = TRUE;
1912 (const struct rte_flow_item_sctp *)item->mask;
1913 if (sctp_mask->hdr.tag ||
1914 sctp_mask->hdr.cksum) {
1915 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1916 rte_flow_error_set(error, EINVAL,
1917 RTE_FLOW_ERROR_TYPE_ITEM,
1918 item, "Not supported by fdir filter");
1921 rule->mask.src_port_mask = sctp_mask->hdr.src_port;
1922 rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
1926 rule->b_spec = TRUE;
1928 (const struct rte_flow_item_sctp *)item->spec;
1929 rule->ixgbe_fdir.formatted.src_port =
1930 sctp_spec->hdr.src_port;
1931 rule->ixgbe_fdir.formatted.dst_port =
1932 sctp_spec->hdr.dst_port;
1935 item = next_no_fuzzy_pattern(pattern, item);
1936 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1937 item->type != RTE_FLOW_ITEM_TYPE_END) {
1938 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1939 rte_flow_error_set(error, EINVAL,
1940 RTE_FLOW_ERROR_TYPE_ITEM,
1941 item, "Not supported by fdir filter");
1946 /* Get the flex byte info */
1947 if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
1948 /* Not supported last point for range*/
1950 rte_flow_error_set(error, EINVAL,
1951 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1952 item, "Not supported last point for range");
1955 /* mask should not be null */
1956 if (!item->mask || !item->spec) {
1957 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1958 rte_flow_error_set(error, EINVAL,
1959 RTE_FLOW_ERROR_TYPE_ITEM,
1960 item, "Not supported by fdir filter");
1964 raw_mask = (const struct rte_flow_item_raw *)item->mask;
1967 if (raw_mask->relative != 0x1 ||
1968 raw_mask->search != 0x1 ||
1969 raw_mask->reserved != 0x0 ||
1970 (uint32_t)raw_mask->offset != 0xffffffff ||
1971 raw_mask->limit != 0xffff ||
1972 raw_mask->length != 0xffff) {
1973 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1974 rte_flow_error_set(error, EINVAL,
1975 RTE_FLOW_ERROR_TYPE_ITEM,
1976 item, "Not supported by fdir filter");
1980 raw_spec = (const struct rte_flow_item_raw *)item->spec;
1983 if (raw_spec->relative != 0 ||
1984 raw_spec->search != 0 ||
1985 raw_spec->reserved != 0 ||
1986 raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
1987 raw_spec->offset % 2 ||
1988 raw_spec->limit != 0 ||
1989 raw_spec->length != 2 ||
1990 /* pattern can't be 0xffff */
1991 (raw_spec->pattern[0] == 0xff &&
1992 raw_spec->pattern[1] == 0xff)) {
1993 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1994 rte_flow_error_set(error, EINVAL,
1995 RTE_FLOW_ERROR_TYPE_ITEM,
1996 item, "Not supported by fdir filter");
2000 /* check pattern mask */
2001 if (raw_mask->pattern[0] != 0xff ||
2002 raw_mask->pattern[1] != 0xff) {
2003 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2004 rte_flow_error_set(error, EINVAL,
2005 RTE_FLOW_ERROR_TYPE_ITEM,
2006 item, "Not supported by fdir filter");
2010 rule->mask.flex_bytes_mask = 0xffff;
2011 rule->ixgbe_fdir.formatted.flex_bytes =
2012 (((uint16_t)raw_spec->pattern[1]) << 8) |
2013 raw_spec->pattern[0];
2014 rule->flex_bytes_offset = raw_spec->offset;
2017 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2018 /* check if the next not void item is END */
2019 item = next_no_fuzzy_pattern(pattern, item);
2020 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2021 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2022 rte_flow_error_set(error, EINVAL,
2023 RTE_FLOW_ERROR_TYPE_ITEM,
2024 item, "Not supported by fdir filter");
2029 return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2032 #define NVGRE_PROTOCOL 0x6558
2035 * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
2036 * And get the flow director filter info BTW.
2038 * The first not void item must be ETH.
2039 * The second not void item must be IPV4/ IPV6.
2040 * The third not void item must be NVGRE.
2041 * The next not void item must be END.
2043 * The first not void item must be ETH.
2044 * The second not void item must be IPV4/ IPV6.
2045 * The third not void item must be NVGRE.
2046 * The next not void item must be END.
2048 * The first not void action should be QUEUE or DROP.
2049 * The second not void optional action should be MARK,
2050 * mark_id is a uint32_t number.
2051 * The next not void action should be END.
2052 * VxLAN pattern example:
2055 * IPV4/IPV6 NULL NULL
2057 * VxLAN vni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
2058 * MAC VLAN tci 0x2016 0xEFFF
2060 * NEGRV pattern example:
2063 * IPV4/IPV6 NULL NULL
2064 * NVGRE protocol 0x6558 0xFFFF
2065 * tni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
2066 * MAC VLAN tci 0x2016 0xEFFF
2068 * other members in mask and spec should set to 0x00.
2069 * item->last should be NULL.
2072 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
2073 const struct rte_flow_item pattern[],
2074 const struct rte_flow_action actions[],
2075 struct ixgbe_fdir_rule *rule,
2076 struct rte_flow_error *error)
2078 const struct rte_flow_item *item;
2079 const struct rte_flow_item_vxlan *vxlan_spec;
2080 const struct rte_flow_item_vxlan *vxlan_mask;
2081 const struct rte_flow_item_nvgre *nvgre_spec;
2082 const struct rte_flow_item_nvgre *nvgre_mask;
2083 const struct rte_flow_item_eth *eth_spec;
2084 const struct rte_flow_item_eth *eth_mask;
2085 const struct rte_flow_item_vlan *vlan_spec;
2086 const struct rte_flow_item_vlan *vlan_mask;
2090 rte_flow_error_set(error, EINVAL,
2091 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2092 NULL, "NULL pattern.");
2097 rte_flow_error_set(error, EINVAL,
2098 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2099 NULL, "NULL action.");
2104 rte_flow_error_set(error, EINVAL,
2105 RTE_FLOW_ERROR_TYPE_ATTR,
2106 NULL, "NULL attribute.");
2111 * Some fields may not be provided. Set spec to 0 and mask to default
2112 * value. So, we need not do anything for the not provided fields later.
2114 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2115 memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
2116 rule->mask.vlan_tci_mask = 0;
2119 * The first not void item should be
2120 * MAC or IPv4 or IPv6 or UDP or VxLAN.
2122 item = next_no_void_pattern(pattern, NULL);
2123 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
2124 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2125 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
2126 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2127 item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
2128 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2129 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2130 rte_flow_error_set(error, EINVAL,
2131 RTE_FLOW_ERROR_TYPE_ITEM,
2132 item, "Not supported by fdir filter");
2136 rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
2139 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
2140 /* Only used to describe the protocol stack. */
2141 if (item->spec || item->mask) {
2142 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2143 rte_flow_error_set(error, EINVAL,
2144 RTE_FLOW_ERROR_TYPE_ITEM,
2145 item, "Not supported by fdir filter");
2148 /* Not supported last point for range*/
2150 rte_flow_error_set(error, EINVAL,
2151 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2152 item, "Not supported last point for range");
2156 /* Check if the next not void item is IPv4 or IPv6. */
2157 item = next_no_void_pattern(pattern, item);
2158 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2159 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2160 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2161 rte_flow_error_set(error, EINVAL,
2162 RTE_FLOW_ERROR_TYPE_ITEM,
2163 item, "Not supported by fdir filter");
2169 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2170 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2171 /* Only used to describe the protocol stack. */
2172 if (item->spec || item->mask) {
2173 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2174 rte_flow_error_set(error, EINVAL,
2175 RTE_FLOW_ERROR_TYPE_ITEM,
2176 item, "Not supported by fdir filter");
2179 /*Not supported last point for range*/
2181 rte_flow_error_set(error, EINVAL,
2182 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2183 item, "Not supported last point for range");
2187 /* Check if the next not void item is UDP or NVGRE. */
2188 item = next_no_void_pattern(pattern, item);
2189 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2190 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2191 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2192 rte_flow_error_set(error, EINVAL,
2193 RTE_FLOW_ERROR_TYPE_ITEM,
2194 item, "Not supported by fdir filter");
2200 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2201 /* Only used to describe the protocol stack. */
2202 if (item->spec || item->mask) {
2203 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2204 rte_flow_error_set(error, EINVAL,
2205 RTE_FLOW_ERROR_TYPE_ITEM,
2206 item, "Not supported by fdir filter");
2209 /*Not supported last point for range*/
2211 rte_flow_error_set(error, EINVAL,
2212 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2213 item, "Not supported last point for range");
2217 /* Check if the next not void item is VxLAN. */
2218 item = next_no_void_pattern(pattern, item);
2219 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2220 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2221 rte_flow_error_set(error, EINVAL,
2222 RTE_FLOW_ERROR_TYPE_ITEM,
2223 item, "Not supported by fdir filter");
2228 /* Get the VxLAN info */
2229 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2230 rule->ixgbe_fdir.formatted.tunnel_type =
2231 RTE_FDIR_TUNNEL_TYPE_VXLAN;
2233 /* Only care about VNI, others should be masked. */
2235 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2236 rte_flow_error_set(error, EINVAL,
2237 RTE_FLOW_ERROR_TYPE_ITEM,
2238 item, "Not supported by fdir filter");
2241 /*Not supported last point for range*/
2243 rte_flow_error_set(error, EINVAL,
2244 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2245 item, "Not supported last point for range");
2248 rule->b_mask = TRUE;
2250 /* Tunnel type is always meaningful. */
2251 rule->mask.tunnel_type_mask = 1;
2254 (const struct rte_flow_item_vxlan *)item->mask;
2255 if (vxlan_mask->flags) {
2256 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2257 rte_flow_error_set(error, EINVAL,
2258 RTE_FLOW_ERROR_TYPE_ITEM,
2259 item, "Not supported by fdir filter");
2262 /* VNI must be totally masked or not. */
2263 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2264 vxlan_mask->vni[2]) &&
2265 ((vxlan_mask->vni[0] != 0xFF) ||
2266 (vxlan_mask->vni[1] != 0xFF) ||
2267 (vxlan_mask->vni[2] != 0xFF))) {
2268 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2269 rte_flow_error_set(error, EINVAL,
2270 RTE_FLOW_ERROR_TYPE_ITEM,
2271 item, "Not supported by fdir filter");
2275 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2276 RTE_DIM(vxlan_mask->vni));
2279 rule->b_spec = TRUE;
2280 vxlan_spec = (const struct rte_flow_item_vxlan *)
2282 rte_memcpy(((uint8_t *)
2283 &rule->ixgbe_fdir.formatted.tni_vni + 1),
2284 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2285 rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
2286 rule->ixgbe_fdir.formatted.tni_vni);
2290 /* Get the NVGRE info */
2291 if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2292 rule->ixgbe_fdir.formatted.tunnel_type =
2293 RTE_FDIR_TUNNEL_TYPE_NVGRE;
2296 * Only care about flags0, flags1, protocol and TNI,
2297 * others should be masked.
2300 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2301 rte_flow_error_set(error, EINVAL,
2302 RTE_FLOW_ERROR_TYPE_ITEM,
2303 item, "Not supported by fdir filter");
2306 /*Not supported last point for range*/
2308 rte_flow_error_set(error, EINVAL,
2309 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2310 item, "Not supported last point for range");
2313 rule->b_mask = TRUE;
2315 /* Tunnel type is always meaningful. */
2316 rule->mask.tunnel_type_mask = 1;
2319 (const struct rte_flow_item_nvgre *)item->mask;
2320 if (nvgre_mask->flow_id) {
2321 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2322 rte_flow_error_set(error, EINVAL,
2323 RTE_FLOW_ERROR_TYPE_ITEM,
2324 item, "Not supported by fdir filter");
2327 if (nvgre_mask->c_k_s_rsvd0_ver !=
2328 rte_cpu_to_be_16(0x3000) ||
2329 nvgre_mask->protocol != 0xFFFF) {
2330 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2331 rte_flow_error_set(error, EINVAL,
2332 RTE_FLOW_ERROR_TYPE_ITEM,
2333 item, "Not supported by fdir filter");
2336 /* TNI must be totally masked or not. */
2337 if (nvgre_mask->tni[0] &&
2338 ((nvgre_mask->tni[0] != 0xFF) ||
2339 (nvgre_mask->tni[1] != 0xFF) ||
2340 (nvgre_mask->tni[2] != 0xFF))) {
2341 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2342 rte_flow_error_set(error, EINVAL,
2343 RTE_FLOW_ERROR_TYPE_ITEM,
2344 item, "Not supported by fdir filter");
2347 /* tni is a 24-bits bit field */
2348 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2349 RTE_DIM(nvgre_mask->tni));
2350 rule->mask.tunnel_id_mask <<= 8;
2353 rule->b_spec = TRUE;
2355 (const struct rte_flow_item_nvgre *)item->spec;
2356 if (nvgre_spec->c_k_s_rsvd0_ver !=
2357 rte_cpu_to_be_16(0x2000) ||
2358 nvgre_spec->protocol !=
2359 rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2360 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2361 rte_flow_error_set(error, EINVAL,
2362 RTE_FLOW_ERROR_TYPE_ITEM,
2363 item, "Not supported by fdir filter");
2366 /* tni is a 24-bits bit field */
2367 rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2368 nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2369 rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2373 /* check if the next not void item is MAC */
2374 item = next_no_void_pattern(pattern, item);
2375 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2376 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2377 rte_flow_error_set(error, EINVAL,
2378 RTE_FLOW_ERROR_TYPE_ITEM,
2379 item, "Not supported by fdir filter");
2384 * Only support vlan and dst MAC address,
2385 * others should be masked.
2389 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2390 rte_flow_error_set(error, EINVAL,
2391 RTE_FLOW_ERROR_TYPE_ITEM,
2392 item, "Not supported by fdir filter");
2395 /*Not supported last point for range*/
2397 rte_flow_error_set(error, EINVAL,
2398 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2399 item, "Not supported last point for range");
2402 rule->b_mask = TRUE;
2403 eth_mask = (const struct rte_flow_item_eth *)item->mask;
2405 /* Ether type should be masked. */
2406 if (eth_mask->type) {
2407 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2408 rte_flow_error_set(error, EINVAL,
2409 RTE_FLOW_ERROR_TYPE_ITEM,
2410 item, "Not supported by fdir filter");
2414 /* src MAC address should be masked. */
2415 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2416 if (eth_mask->src.addr_bytes[j]) {
2418 sizeof(struct ixgbe_fdir_rule));
2419 rte_flow_error_set(error, EINVAL,
2420 RTE_FLOW_ERROR_TYPE_ITEM,
2421 item, "Not supported by fdir filter");
2425 rule->mask.mac_addr_byte_mask = 0;
2426 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2427 /* It's a per byte mask. */
2428 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2429 rule->mask.mac_addr_byte_mask |= 0x1 << j;
2430 } else if (eth_mask->dst.addr_bytes[j]) {
2431 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2432 rte_flow_error_set(error, EINVAL,
2433 RTE_FLOW_ERROR_TYPE_ITEM,
2434 item, "Not supported by fdir filter");
2439 /* When no vlan, considered as full mask. */
2440 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2443 rule->b_spec = TRUE;
2444 eth_spec = (const struct rte_flow_item_eth *)item->spec;
2446 /* Get the dst MAC. */
2447 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2448 rule->ixgbe_fdir.formatted.inner_mac[j] =
2449 eth_spec->dst.addr_bytes[j];
2454 * Check if the next not void item is vlan or ipv4.
2455 * IPv6 is not supported.
2457 item = next_no_void_pattern(pattern, item);
2458 if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2459 (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
2460 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2461 rte_flow_error_set(error, EINVAL,
2462 RTE_FLOW_ERROR_TYPE_ITEM,
2463 item, "Not supported by fdir filter");
2466 /*Not supported last point for range*/
2468 rte_flow_error_set(error, EINVAL,
2469 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2470 item, "Not supported last point for range");
2474 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2475 if (!(item->spec && item->mask)) {
2476 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2477 rte_flow_error_set(error, EINVAL,
2478 RTE_FLOW_ERROR_TYPE_ITEM,
2479 item, "Not supported by fdir filter");
2483 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
2484 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
2486 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2488 rule->mask.vlan_tci_mask = vlan_mask->tci;
2489 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
2490 /* More than one tags are not supported. */
2492 /* check if the next not void item is END */
2493 item = next_no_void_pattern(pattern, item);
2495 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2496 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2497 rte_flow_error_set(error, EINVAL,
2498 RTE_FLOW_ERROR_TYPE_ITEM,
2499 item, "Not supported by fdir filter");
2505 * If the tags is 0, it means don't care about the VLAN.
2509 return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2513 ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2514 const struct rte_flow_attr *attr,
2515 const struct rte_flow_item pattern[],
2516 const struct rte_flow_action actions[],
2517 struct ixgbe_fdir_rule *rule,
2518 struct rte_flow_error *error)
2521 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2522 enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2524 if (hw->mac.type != ixgbe_mac_82599EB &&
2525 hw->mac.type != ixgbe_mac_X540 &&
2526 hw->mac.type != ixgbe_mac_X550 &&
2527 hw->mac.type != ixgbe_mac_X550EM_x &&
2528 hw->mac.type != ixgbe_mac_X550EM_a)
2531 ret = ixgbe_parse_fdir_filter_normal(attr, pattern,
2532 actions, rule, error);
2537 ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2538 actions, rule, error);
2545 if (hw->mac.type == ixgbe_mac_82599EB &&
2546 rule->fdirflags == IXGBE_FDIRCMD_DROP &&
2547 (rule->mask.src_port_mask != 0 ||
2548 rule->mask.dst_port_mask != 0))
2551 if (fdir_mode == RTE_FDIR_MODE_NONE ||
2552 fdir_mode != rule->mode)
2558 ixgbe_filterlist_flush(void)
2560 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2561 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2562 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2563 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2564 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2565 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2567 while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2568 TAILQ_REMOVE(&filter_ntuple_list,
2571 rte_free(ntuple_filter_ptr);
2574 while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2575 TAILQ_REMOVE(&filter_ethertype_list,
2576 ethertype_filter_ptr,
2578 rte_free(ethertype_filter_ptr);
2581 while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2582 TAILQ_REMOVE(&filter_syn_list,
2585 rte_free(syn_filter_ptr);
2588 while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2589 TAILQ_REMOVE(&filter_l2_tunnel_list,
2592 rte_free(l2_tn_filter_ptr);
2595 while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2596 TAILQ_REMOVE(&filter_fdir_list,
2599 rte_free(fdir_rule_ptr);
2602 while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
2603 TAILQ_REMOVE(&ixgbe_flow_list,
2606 rte_free(ixgbe_flow_mem_ptr->flow);
2607 rte_free(ixgbe_flow_mem_ptr);
2612 * Create or destroy a flow rule.
2613 * Theorically one rule can match more than one filters.
2614 * We will let it use the filter which it hitt first.
2615 * So, the sequence matters.
2617 static struct rte_flow *
2618 ixgbe_flow_create(struct rte_eth_dev *dev,
2619 const struct rte_flow_attr *attr,
2620 const struct rte_flow_item pattern[],
2621 const struct rte_flow_action actions[],
2622 struct rte_flow_error *error)
2625 struct rte_eth_ntuple_filter ntuple_filter;
2626 struct rte_eth_ethertype_filter ethertype_filter;
2627 struct rte_eth_syn_filter syn_filter;
2628 struct ixgbe_fdir_rule fdir_rule;
2629 struct rte_eth_l2_tunnel_conf l2_tn_filter;
2630 struct ixgbe_hw_fdir_info *fdir_info =
2631 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2632 struct rte_flow *flow = NULL;
2633 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2634 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2635 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2636 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2637 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2638 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2640 flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
2642 PMD_DRV_LOG(ERR, "failed to allocate memory");
2643 return (struct rte_flow *)flow;
2645 ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
2646 sizeof(struct ixgbe_flow_mem), 0);
2647 if (!ixgbe_flow_mem_ptr) {
2648 PMD_DRV_LOG(ERR, "failed to allocate memory");
2652 ixgbe_flow_mem_ptr->flow = flow;
2653 TAILQ_INSERT_TAIL(&ixgbe_flow_list,
2654 ixgbe_flow_mem_ptr, entries);
2656 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2657 ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2658 actions, &ntuple_filter, error);
2660 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2662 ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
2663 sizeof(struct ixgbe_ntuple_filter_ele), 0);
2664 (void)rte_memcpy(&ntuple_filter_ptr->filter_info,
2666 sizeof(struct rte_eth_ntuple_filter));
2667 TAILQ_INSERT_TAIL(&filter_ntuple_list,
2668 ntuple_filter_ptr, entries);
2669 flow->rule = ntuple_filter_ptr;
2670 flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2676 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2677 ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2678 actions, ðertype_filter, error);
2680 ret = ixgbe_add_del_ethertype_filter(dev,
2681 ðertype_filter, TRUE);
2683 ethertype_filter_ptr = rte_zmalloc(
2684 "ixgbe_ethertype_filter",
2685 sizeof(struct ixgbe_ethertype_filter_ele), 0);
2686 (void)rte_memcpy(ðertype_filter_ptr->filter_info,
2688 sizeof(struct rte_eth_ethertype_filter));
2689 TAILQ_INSERT_TAIL(&filter_ethertype_list,
2690 ethertype_filter_ptr, entries);
2691 flow->rule = ethertype_filter_ptr;
2692 flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2698 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2699 ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2700 actions, &syn_filter, error);
2702 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
2704 syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
2705 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
2706 (void)rte_memcpy(&syn_filter_ptr->filter_info,
2708 sizeof(struct rte_eth_syn_filter));
2709 TAILQ_INSERT_TAIL(&filter_syn_list,
2712 flow->rule = syn_filter_ptr;
2713 flow->filter_type = RTE_ETH_FILTER_SYN;
2719 memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2720 ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2721 actions, &fdir_rule, error);
2723 /* A mask cannot be deleted. */
2724 if (fdir_rule.b_mask) {
2725 if (!fdir_info->mask_added) {
2726 /* It's the first time the mask is set. */
2727 rte_memcpy(&fdir_info->mask,
2729 sizeof(struct ixgbe_hw_fdir_mask));
2730 fdir_info->flex_bytes_offset =
2731 fdir_rule.flex_bytes_offset;
2733 if (fdir_rule.mask.flex_bytes_mask)
2734 ixgbe_fdir_set_flexbytes_offset(dev,
2735 fdir_rule.flex_bytes_offset);
2737 ret = ixgbe_fdir_set_input_mask(dev);
2741 fdir_info->mask_added = TRUE;
2744 * Only support one global mask,
2745 * all the masks should be the same.
2747 ret = memcmp(&fdir_info->mask,
2749 sizeof(struct ixgbe_hw_fdir_mask));
2753 if (fdir_info->flex_bytes_offset !=
2754 fdir_rule.flex_bytes_offset)
2759 if (fdir_rule.b_spec) {
2760 ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
2763 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
2764 sizeof(struct ixgbe_fdir_rule_ele), 0);
2765 (void)rte_memcpy(&fdir_rule_ptr->filter_info,
2767 sizeof(struct ixgbe_fdir_rule));
2768 TAILQ_INSERT_TAIL(&filter_fdir_list,
2769 fdir_rule_ptr, entries);
2770 flow->rule = fdir_rule_ptr;
2771 flow->filter_type = RTE_ETH_FILTER_FDIR;
2783 memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2784 ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2785 actions, &l2_tn_filter, error);
2787 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
2789 l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
2790 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
2791 (void)rte_memcpy(&l2_tn_filter_ptr->filter_info,
2793 sizeof(struct rte_eth_l2_tunnel_conf));
2794 TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
2795 l2_tn_filter_ptr, entries);
2796 flow->rule = l2_tn_filter_ptr;
2797 flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
2803 TAILQ_REMOVE(&ixgbe_flow_list,
2804 ixgbe_flow_mem_ptr, entries);
2805 rte_flow_error_set(error, -ret,
2806 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2807 "Failed to create flow.");
2808 rte_free(ixgbe_flow_mem_ptr);
2814 * Check if the flow rule is supported by ixgbe.
2815 * It only checkes the format. Don't guarantee the rule can be programmed into
2816 * the HW. Because there can be no enough room for the rule.
2819 ixgbe_flow_validate(struct rte_eth_dev *dev,
2820 const struct rte_flow_attr *attr,
2821 const struct rte_flow_item pattern[],
2822 const struct rte_flow_action actions[],
2823 struct rte_flow_error *error)
2825 struct rte_eth_ntuple_filter ntuple_filter;
2826 struct rte_eth_ethertype_filter ethertype_filter;
2827 struct rte_eth_syn_filter syn_filter;
2828 struct rte_eth_l2_tunnel_conf l2_tn_filter;
2829 struct ixgbe_fdir_rule fdir_rule;
2832 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2833 ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2834 actions, &ntuple_filter, error);
2838 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2839 ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2840 actions, ðertype_filter, error);
2844 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2845 ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2846 actions, &syn_filter, error);
2850 memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2851 ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2852 actions, &fdir_rule, error);
2856 memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2857 ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2858 actions, &l2_tn_filter, error);
2863 /* Destroy a flow rule on ixgbe. */
2865 ixgbe_flow_destroy(struct rte_eth_dev *dev,
2866 struct rte_flow *flow,
2867 struct rte_flow_error *error)
2870 struct rte_flow *pmd_flow = flow;
2871 enum rte_filter_type filter_type = pmd_flow->filter_type;
2872 struct rte_eth_ntuple_filter ntuple_filter;
2873 struct rte_eth_ethertype_filter ethertype_filter;
2874 struct rte_eth_syn_filter syn_filter;
2875 struct ixgbe_fdir_rule fdir_rule;
2876 struct rte_eth_l2_tunnel_conf l2_tn_filter;
2877 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2878 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2879 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2880 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2881 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2882 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2883 struct ixgbe_hw_fdir_info *fdir_info =
2884 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2886 switch (filter_type) {
2887 case RTE_ETH_FILTER_NTUPLE:
2888 ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
2890 (void)rte_memcpy(&ntuple_filter,
2891 &ntuple_filter_ptr->filter_info,
2892 sizeof(struct rte_eth_ntuple_filter));
2893 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
2895 TAILQ_REMOVE(&filter_ntuple_list,
2896 ntuple_filter_ptr, entries);
2897 rte_free(ntuple_filter_ptr);
2900 case RTE_ETH_FILTER_ETHERTYPE:
2901 ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
2903 (void)rte_memcpy(ðertype_filter,
2904 ðertype_filter_ptr->filter_info,
2905 sizeof(struct rte_eth_ethertype_filter));
2906 ret = ixgbe_add_del_ethertype_filter(dev,
2907 ðertype_filter, FALSE);
2909 TAILQ_REMOVE(&filter_ethertype_list,
2910 ethertype_filter_ptr, entries);
2911 rte_free(ethertype_filter_ptr);
2914 case RTE_ETH_FILTER_SYN:
2915 syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
2917 (void)rte_memcpy(&syn_filter,
2918 &syn_filter_ptr->filter_info,
2919 sizeof(struct rte_eth_syn_filter));
2920 ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
2922 TAILQ_REMOVE(&filter_syn_list,
2923 syn_filter_ptr, entries);
2924 rte_free(syn_filter_ptr);
2927 case RTE_ETH_FILTER_FDIR:
2928 fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
2929 (void)rte_memcpy(&fdir_rule,
2930 &fdir_rule_ptr->filter_info,
2931 sizeof(struct ixgbe_fdir_rule));
2932 ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
2934 TAILQ_REMOVE(&filter_fdir_list,
2935 fdir_rule_ptr, entries);
2936 rte_free(fdir_rule_ptr);
2937 if (TAILQ_EMPTY(&filter_fdir_list))
2938 fdir_info->mask_added = false;
2941 case RTE_ETH_FILTER_L2_TUNNEL:
2942 l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
2944 (void)rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
2945 sizeof(struct rte_eth_l2_tunnel_conf));
2946 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
2948 TAILQ_REMOVE(&filter_l2_tunnel_list,
2949 l2_tn_filter_ptr, entries);
2950 rte_free(l2_tn_filter_ptr);
2954 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
2961 rte_flow_error_set(error, EINVAL,
2962 RTE_FLOW_ERROR_TYPE_HANDLE,
2963 NULL, "Failed to destroy flow");
2967 TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
2968 if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
2969 TAILQ_REMOVE(&ixgbe_flow_list,
2970 ixgbe_flow_mem_ptr, entries);
2971 rte_free(ixgbe_flow_mem_ptr);
2979 /* Destroy all flow rules associated with a port on ixgbe. */
2981 ixgbe_flow_flush(struct rte_eth_dev *dev,
2982 struct rte_flow_error *error)
2986 ixgbe_clear_all_ntuple_filter(dev);
2987 ixgbe_clear_all_ethertype_filter(dev);
2988 ixgbe_clear_syn_filter(dev);
2990 ret = ixgbe_clear_all_fdir_filter(dev);
2992 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
2993 NULL, "Failed to flush rule");
2997 ret = ixgbe_clear_all_l2_tn_filter(dev);
2999 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3000 NULL, "Failed to flush rule");
3004 ixgbe_filterlist_flush();
3009 const struct rte_flow_ops ixgbe_flow_ops = {
3010 .validate = ixgbe_flow_validate,
3011 .create = ixgbe_flow_create,
3012 .destroy = ixgbe_flow_destroy,
3013 .flush = ixgbe_flow_flush,