4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
47 #include <rte_interrupts.h>
49 #include <rte_debug.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_malloc.h>
60 #include <rte_random.h>
62 #include <rte_hash_crc.h>
64 #include <rte_flow_driver.h>
66 #include "ixgbe_logs.h"
67 #include "base/ixgbe_api.h"
68 #include "base/ixgbe_vf.h"
69 #include "base/ixgbe_common.h"
70 #include "ixgbe_ethdev.h"
71 #include "ixgbe_bypass.h"
72 #include "ixgbe_rxtx.h"
73 #include "base/ixgbe_type.h"
74 #include "base/ixgbe_phy.h"
75 #include "rte_pmd_ixgbe.h"
78 #define IXGBE_MIN_N_TUPLE_PRIO 1
79 #define IXGBE_MAX_N_TUPLE_PRIO 7
80 #define IXGBE_MAX_FLX_SOURCE_OFF 62
83 * Endless loop will never happen with below assumption
84 * 1. there is at least one no-void item(END)
85 * 2. cur is before END.
88 const struct rte_flow_item *next_no_void_pattern(
89 const struct rte_flow_item pattern[],
90 const struct rte_flow_item *cur)
92 const struct rte_flow_item *next =
93 cur ? cur + 1 : &pattern[0];
95 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
102 const struct rte_flow_action *next_no_void_action(
103 const struct rte_flow_action actions[],
104 const struct rte_flow_action *cur)
106 const struct rte_flow_action *next =
107 cur ? cur + 1 : &actions[0];
109 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
116 * Please aware there's an asumption for all the parsers.
117 * rte_flow_item is using big endian, rte_flow_attr and
118 * rte_flow_action are using CPU order.
119 * Because the pattern is used to describe the packets,
120 * normally the packets should use network order.
124 * Parse the rule to see if it is a n-tuple rule.
125 * And get the n-tuple filter info BTW.
127 * The first not void item can be ETH or IPV4.
128 * The second not void item must be IPV4 if the first one is ETH.
129 * The third not void item must be UDP or TCP.
130 * The next not void item must be END.
132 * The first not void action should be QUEUE.
133 * The next not void action should be END.
137 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
138 * dst_addr 192.167.3.50 0xFFFFFFFF
139 * next_proto_id 17 0xFF
140 * UDP/TCP/ src_port 80 0xFFFF
141 * SCTP dst_port 80 0xFFFF
143 * other members in mask and spec should set to 0x00.
144 * item->last should be NULL.
147 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
148 const struct rte_flow_item pattern[],
149 const struct rte_flow_action actions[],
150 struct rte_eth_ntuple_filter *filter,
151 struct rte_flow_error *error)
153 const struct rte_flow_item *item;
154 const struct rte_flow_action *act;
155 const struct rte_flow_item_ipv4 *ipv4_spec;
156 const struct rte_flow_item_ipv4 *ipv4_mask;
157 const struct rte_flow_item_tcp *tcp_spec;
158 const struct rte_flow_item_tcp *tcp_mask;
159 const struct rte_flow_item_udp *udp_spec;
160 const struct rte_flow_item_udp *udp_mask;
161 const struct rte_flow_item_sctp *sctp_spec;
162 const struct rte_flow_item_sctp *sctp_mask;
165 rte_flow_error_set(error,
166 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
167 NULL, "NULL pattern.");
172 rte_flow_error_set(error, EINVAL,
173 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
174 NULL, "NULL action.");
178 rte_flow_error_set(error, EINVAL,
179 RTE_FLOW_ERROR_TYPE_ATTR,
180 NULL, "NULL attribute.");
184 /* the first not void item can be MAC or IPv4 */
185 item = next_no_void_pattern(pattern, NULL);
187 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
188 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
189 rte_flow_error_set(error, EINVAL,
190 RTE_FLOW_ERROR_TYPE_ITEM,
191 item, "Not supported by ntuple filter");
195 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
196 /*Not supported last point for range*/
198 rte_flow_error_set(error,
200 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
201 item, "Not supported last point for range");
205 /* if the first item is MAC, the content should be NULL */
206 if (item->spec || item->mask) {
207 rte_flow_error_set(error, EINVAL,
208 RTE_FLOW_ERROR_TYPE_ITEM,
209 item, "Not supported by ntuple filter");
212 /* check if the next not void item is IPv4 */
213 item = next_no_void_pattern(pattern, item);
214 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
215 rte_flow_error_set(error,
216 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
217 item, "Not supported by ntuple filter");
222 /* get the IPv4 info */
223 if (!item->spec || !item->mask) {
224 rte_flow_error_set(error, EINVAL,
225 RTE_FLOW_ERROR_TYPE_ITEM,
226 item, "Invalid ntuple mask");
229 /*Not supported last point for range*/
231 rte_flow_error_set(error, EINVAL,
232 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
233 item, "Not supported last point for range");
238 ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
240 * Only support src & dst addresses, protocol,
241 * others should be masked.
243 if (ipv4_mask->hdr.version_ihl ||
244 ipv4_mask->hdr.type_of_service ||
245 ipv4_mask->hdr.total_length ||
246 ipv4_mask->hdr.packet_id ||
247 ipv4_mask->hdr.fragment_offset ||
248 ipv4_mask->hdr.time_to_live ||
249 ipv4_mask->hdr.hdr_checksum) {
250 rte_flow_error_set(error,
251 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
252 item, "Not supported by ntuple filter");
256 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
257 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
258 filter->proto_mask = ipv4_mask->hdr.next_proto_id;
260 ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
261 filter->dst_ip = ipv4_spec->hdr.dst_addr;
262 filter->src_ip = ipv4_spec->hdr.src_addr;
263 filter->proto = ipv4_spec->hdr.next_proto_id;
265 /* check if the next not void item is TCP or UDP */
266 item = next_no_void_pattern(pattern, item);
267 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
268 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
269 item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
270 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
271 rte_flow_error_set(error, EINVAL,
272 RTE_FLOW_ERROR_TYPE_ITEM,
273 item, "Not supported by ntuple filter");
277 /* get the TCP/UDP info */
278 if (!item->spec || !item->mask) {
279 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
280 rte_flow_error_set(error, EINVAL,
281 RTE_FLOW_ERROR_TYPE_ITEM,
282 item, "Invalid ntuple mask");
286 /*Not supported last point for range*/
288 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
289 rte_flow_error_set(error, EINVAL,
290 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
291 item, "Not supported last point for range");
296 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
297 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
300 * Only support src & dst ports, tcp flags,
301 * others should be masked.
303 if (tcp_mask->hdr.sent_seq ||
304 tcp_mask->hdr.recv_ack ||
305 tcp_mask->hdr.data_off ||
306 tcp_mask->hdr.rx_win ||
307 tcp_mask->hdr.cksum ||
308 tcp_mask->hdr.tcp_urp) {
310 sizeof(struct rte_eth_ntuple_filter));
311 rte_flow_error_set(error, EINVAL,
312 RTE_FLOW_ERROR_TYPE_ITEM,
313 item, "Not supported by ntuple filter");
317 filter->dst_port_mask = tcp_mask->hdr.dst_port;
318 filter->src_port_mask = tcp_mask->hdr.src_port;
319 if (tcp_mask->hdr.tcp_flags == 0xFF) {
320 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
321 } else if (!tcp_mask->hdr.tcp_flags) {
322 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
324 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
325 rte_flow_error_set(error, EINVAL,
326 RTE_FLOW_ERROR_TYPE_ITEM,
327 item, "Not supported by ntuple filter");
331 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
332 filter->dst_port = tcp_spec->hdr.dst_port;
333 filter->src_port = tcp_spec->hdr.src_port;
334 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
335 } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
336 udp_mask = (const struct rte_flow_item_udp *)item->mask;
339 * Only support src & dst ports,
340 * others should be masked.
342 if (udp_mask->hdr.dgram_len ||
343 udp_mask->hdr.dgram_cksum) {
345 sizeof(struct rte_eth_ntuple_filter));
346 rte_flow_error_set(error, EINVAL,
347 RTE_FLOW_ERROR_TYPE_ITEM,
348 item, "Not supported by ntuple filter");
352 filter->dst_port_mask = udp_mask->hdr.dst_port;
353 filter->src_port_mask = udp_mask->hdr.src_port;
355 udp_spec = (const struct rte_flow_item_udp *)item->spec;
356 filter->dst_port = udp_spec->hdr.dst_port;
357 filter->src_port = udp_spec->hdr.src_port;
359 sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
362 * Only support src & dst ports,
363 * others should be masked.
365 if (sctp_mask->hdr.tag ||
366 sctp_mask->hdr.cksum) {
368 sizeof(struct rte_eth_ntuple_filter));
369 rte_flow_error_set(error, EINVAL,
370 RTE_FLOW_ERROR_TYPE_ITEM,
371 item, "Not supported by ntuple filter");
375 filter->dst_port_mask = sctp_mask->hdr.dst_port;
376 filter->src_port_mask = sctp_mask->hdr.src_port;
378 sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
379 filter->dst_port = sctp_spec->hdr.dst_port;
380 filter->src_port = sctp_spec->hdr.src_port;
383 /* check if the next not void item is END */
384 item = next_no_void_pattern(pattern, item);
385 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
386 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
387 rte_flow_error_set(error, EINVAL,
388 RTE_FLOW_ERROR_TYPE_ITEM,
389 item, "Not supported by ntuple filter");
394 * n-tuple only supports forwarding,
395 * check if the first not void action is QUEUE.
397 act = next_no_void_action(actions, NULL);
398 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
399 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
400 rte_flow_error_set(error, EINVAL,
401 RTE_FLOW_ERROR_TYPE_ACTION,
402 item, "Not supported action.");
406 ((const struct rte_flow_action_queue *)act->conf)->index;
408 /* check if the next not void item is END */
409 act = next_no_void_action(actions, act);
410 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
411 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
412 rte_flow_error_set(error, EINVAL,
413 RTE_FLOW_ERROR_TYPE_ACTION,
414 act, "Not supported action.");
419 /* must be input direction */
420 if (!attr->ingress) {
421 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
422 rte_flow_error_set(error, EINVAL,
423 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
424 attr, "Only support ingress.");
430 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
431 rte_flow_error_set(error, EINVAL,
432 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
433 attr, "Not support egress.");
437 if (attr->priority > 0xFFFF) {
438 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
439 rte_flow_error_set(error, EINVAL,
440 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
441 attr, "Error priority.");
444 filter->priority = (uint16_t)attr->priority;
445 if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
446 attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
447 filter->priority = 1;
452 /* a specific function for ixgbe because the flags is specific */
454 ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
455 const struct rte_flow_attr *attr,
456 const struct rte_flow_item pattern[],
457 const struct rte_flow_action actions[],
458 struct rte_eth_ntuple_filter *filter,
459 struct rte_flow_error *error)
462 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
464 MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
466 ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
471 /* Ixgbe doesn't support tcp flags. */
472 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
473 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
474 rte_flow_error_set(error, EINVAL,
475 RTE_FLOW_ERROR_TYPE_ITEM,
476 NULL, "Not supported by ntuple filter");
480 /* Ixgbe doesn't support many priorities. */
481 if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
482 filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
483 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
484 rte_flow_error_set(error, EINVAL,
485 RTE_FLOW_ERROR_TYPE_ITEM,
486 NULL, "Priority not supported by ntuple filter");
490 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM ||
491 filter->priority > IXGBE_5TUPLE_MAX_PRI ||
492 filter->priority < IXGBE_5TUPLE_MIN_PRI)
495 /* fixed value for ixgbe */
496 filter->flags = RTE_5TUPLE_FLAGS;
501 * Parse the rule to see if it is a ethertype rule.
502 * And get the ethertype filter info BTW.
504 * The first not void item can be ETH.
505 * The next not void item must be END.
507 * The first not void action should be QUEUE.
508 * The next not void action should be END.
511 * ETH type 0x0807 0xFFFF
513 * other members in mask and spec should set to 0x00.
514 * item->last should be NULL.
517 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
518 const struct rte_flow_item *pattern,
519 const struct rte_flow_action *actions,
520 struct rte_eth_ethertype_filter *filter,
521 struct rte_flow_error *error)
523 const struct rte_flow_item *item;
524 const struct rte_flow_action *act;
525 const struct rte_flow_item_eth *eth_spec;
526 const struct rte_flow_item_eth *eth_mask;
527 const struct rte_flow_action_queue *act_q;
530 rte_flow_error_set(error, EINVAL,
531 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
532 NULL, "NULL pattern.");
537 rte_flow_error_set(error, EINVAL,
538 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
539 NULL, "NULL action.");
544 rte_flow_error_set(error, EINVAL,
545 RTE_FLOW_ERROR_TYPE_ATTR,
546 NULL, "NULL attribute.");
550 item = next_no_void_pattern(pattern, NULL);
551 /* The first non-void item should be MAC. */
552 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
553 rte_flow_error_set(error, EINVAL,
554 RTE_FLOW_ERROR_TYPE_ITEM,
555 item, "Not supported by ethertype filter");
559 /*Not supported last point for range*/
561 rte_flow_error_set(error, EINVAL,
562 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
563 item, "Not supported last point for range");
567 /* Get the MAC info. */
568 if (!item->spec || !item->mask) {
569 rte_flow_error_set(error, EINVAL,
570 RTE_FLOW_ERROR_TYPE_ITEM,
571 item, "Not supported by ethertype filter");
575 eth_spec = (const struct rte_flow_item_eth *)item->spec;
576 eth_mask = (const struct rte_flow_item_eth *)item->mask;
578 /* Mask bits of source MAC address must be full of 0.
579 * Mask bits of destination MAC address must be full
582 if (!is_zero_ether_addr(ð_mask->src) ||
583 (!is_zero_ether_addr(ð_mask->dst) &&
584 !is_broadcast_ether_addr(ð_mask->dst))) {
585 rte_flow_error_set(error, EINVAL,
586 RTE_FLOW_ERROR_TYPE_ITEM,
587 item, "Invalid ether address mask");
591 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
592 rte_flow_error_set(error, EINVAL,
593 RTE_FLOW_ERROR_TYPE_ITEM,
594 item, "Invalid ethertype mask");
598 /* If mask bits of destination MAC address
599 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
601 if (is_broadcast_ether_addr(ð_mask->dst)) {
602 filter->mac_addr = eth_spec->dst;
603 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
605 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
607 filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
609 /* Check if the next non-void item is END. */
610 item = next_no_void_pattern(pattern, item);
611 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
612 rte_flow_error_set(error, EINVAL,
613 RTE_FLOW_ERROR_TYPE_ITEM,
614 item, "Not supported by ethertype filter.");
620 act = next_no_void_action(actions, NULL);
621 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
622 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
623 rte_flow_error_set(error, EINVAL,
624 RTE_FLOW_ERROR_TYPE_ACTION,
625 act, "Not supported action.");
629 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
630 act_q = (const struct rte_flow_action_queue *)act->conf;
631 filter->queue = act_q->index;
633 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
636 /* Check if the next non-void item is END */
637 act = next_no_void_action(actions, act);
638 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
639 rte_flow_error_set(error, EINVAL,
640 RTE_FLOW_ERROR_TYPE_ACTION,
641 act, "Not supported action.");
646 /* Must be input direction */
647 if (!attr->ingress) {
648 rte_flow_error_set(error, EINVAL,
649 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
650 attr, "Only support ingress.");
656 rte_flow_error_set(error, EINVAL,
657 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
658 attr, "Not support egress.");
663 if (attr->priority) {
664 rte_flow_error_set(error, EINVAL,
665 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
666 attr, "Not support priority.");
672 rte_flow_error_set(error, EINVAL,
673 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
674 attr, "Not support group.");
682 ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
683 const struct rte_flow_attr *attr,
684 const struct rte_flow_item pattern[],
685 const struct rte_flow_action actions[],
686 struct rte_eth_ethertype_filter *filter,
687 struct rte_flow_error *error)
690 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
692 MAC_TYPE_FILTER_SUP(hw->mac.type);
694 ret = cons_parse_ethertype_filter(attr, pattern,
695 actions, filter, error);
700 /* Ixgbe doesn't support MAC address. */
701 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
702 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
703 rte_flow_error_set(error, EINVAL,
704 RTE_FLOW_ERROR_TYPE_ITEM,
705 NULL, "Not supported by ethertype filter");
709 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
710 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
711 rte_flow_error_set(error, EINVAL,
712 RTE_FLOW_ERROR_TYPE_ITEM,
713 NULL, "queue index much too big");
717 if (filter->ether_type == ETHER_TYPE_IPv4 ||
718 filter->ether_type == ETHER_TYPE_IPv6) {
719 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
720 rte_flow_error_set(error, EINVAL,
721 RTE_FLOW_ERROR_TYPE_ITEM,
722 NULL, "IPv4/IPv6 not supported by ethertype filter");
726 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
727 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
728 rte_flow_error_set(error, EINVAL,
729 RTE_FLOW_ERROR_TYPE_ITEM,
730 NULL, "mac compare is unsupported");
734 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
735 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
736 rte_flow_error_set(error, EINVAL,
737 RTE_FLOW_ERROR_TYPE_ITEM,
738 NULL, "drop option is unsupported");
746 * Parse the rule to see if it is a TCP SYN rule.
747 * And get the TCP SYN filter info BTW.
749 * The first not void item must be ETH.
750 * The second not void item must be IPV4 or IPV6.
751 * The third not void item must be TCP.
752 * The next not void item must be END.
754 * The first not void action should be QUEUE.
755 * The next not void action should be END.
759 * IPV4/IPV6 NULL NULL
760 * TCP tcp_flags 0x02 0xFF
762 * other members in mask and spec should set to 0x00.
763 * item->last should be NULL.
766 cons_parse_syn_filter(const struct rte_flow_attr *attr,
767 const struct rte_flow_item pattern[],
768 const struct rte_flow_action actions[],
769 struct rte_eth_syn_filter *filter,
770 struct rte_flow_error *error)
772 const struct rte_flow_item *item;
773 const struct rte_flow_action *act;
774 const struct rte_flow_item_tcp *tcp_spec;
775 const struct rte_flow_item_tcp *tcp_mask;
776 const struct rte_flow_action_queue *act_q;
779 rte_flow_error_set(error, EINVAL,
780 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
781 NULL, "NULL pattern.");
786 rte_flow_error_set(error, EINVAL,
787 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
788 NULL, "NULL action.");
793 rte_flow_error_set(error, EINVAL,
794 RTE_FLOW_ERROR_TYPE_ATTR,
795 NULL, "NULL attribute.");
800 /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
801 item = next_no_void_pattern(pattern, NULL);
802 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
803 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
804 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
805 item->type != RTE_FLOW_ITEM_TYPE_TCP) {
806 rte_flow_error_set(error, EINVAL,
807 RTE_FLOW_ERROR_TYPE_ITEM,
808 item, "Not supported by syn filter");
811 /*Not supported last point for range*/
813 rte_flow_error_set(error, EINVAL,
814 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
815 item, "Not supported last point for range");
820 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
821 /* if the item is MAC, the content should be NULL */
822 if (item->spec || item->mask) {
823 rte_flow_error_set(error, EINVAL,
824 RTE_FLOW_ERROR_TYPE_ITEM,
825 item, "Invalid SYN address mask");
829 /* check if the next not void item is IPv4 or IPv6 */
830 item = next_no_void_pattern(pattern, item);
831 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
832 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
833 rte_flow_error_set(error, EINVAL,
834 RTE_FLOW_ERROR_TYPE_ITEM,
835 item, "Not supported by syn filter");
841 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
842 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
843 /* if the item is IP, the content should be NULL */
844 if (item->spec || item->mask) {
845 rte_flow_error_set(error, EINVAL,
846 RTE_FLOW_ERROR_TYPE_ITEM,
847 item, "Invalid SYN mask");
851 /* check if the next not void item is TCP */
852 item = next_no_void_pattern(pattern, item);
853 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
854 rte_flow_error_set(error, EINVAL,
855 RTE_FLOW_ERROR_TYPE_ITEM,
856 item, "Not supported by syn filter");
861 /* Get the TCP info. Only support SYN. */
862 if (!item->spec || !item->mask) {
863 rte_flow_error_set(error, EINVAL,
864 RTE_FLOW_ERROR_TYPE_ITEM,
865 item, "Invalid SYN mask");
868 /*Not supported last point for range*/
870 rte_flow_error_set(error, EINVAL,
871 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
872 item, "Not supported last point for range");
876 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
877 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
878 if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
879 tcp_mask->hdr.src_port ||
880 tcp_mask->hdr.dst_port ||
881 tcp_mask->hdr.sent_seq ||
882 tcp_mask->hdr.recv_ack ||
883 tcp_mask->hdr.data_off ||
884 tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
885 tcp_mask->hdr.rx_win ||
886 tcp_mask->hdr.cksum ||
887 tcp_mask->hdr.tcp_urp) {
888 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
889 rte_flow_error_set(error, EINVAL,
890 RTE_FLOW_ERROR_TYPE_ITEM,
891 item, "Not supported by syn filter");
895 /* check if the next not void item is END */
896 item = next_no_void_pattern(pattern, item);
897 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
898 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
899 rte_flow_error_set(error, EINVAL,
900 RTE_FLOW_ERROR_TYPE_ITEM,
901 item, "Not supported by syn filter");
905 /* check if the first not void action is QUEUE. */
906 act = next_no_void_action(actions, NULL);
907 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
908 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
909 rte_flow_error_set(error, EINVAL,
910 RTE_FLOW_ERROR_TYPE_ACTION,
911 act, "Not supported action.");
915 act_q = (const struct rte_flow_action_queue *)act->conf;
916 filter->queue = act_q->index;
917 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
918 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
919 rte_flow_error_set(error, EINVAL,
920 RTE_FLOW_ERROR_TYPE_ACTION,
921 act, "Not supported action.");
925 /* check if the next not void item is END */
926 act = next_no_void_action(actions, act);
927 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
928 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
929 rte_flow_error_set(error, EINVAL,
930 RTE_FLOW_ERROR_TYPE_ACTION,
931 act, "Not supported action.");
936 /* must be input direction */
937 if (!attr->ingress) {
938 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
939 rte_flow_error_set(error, EINVAL,
940 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
941 attr, "Only support ingress.");
947 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
948 rte_flow_error_set(error, EINVAL,
949 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
950 attr, "Not support egress.");
954 /* Support 2 priorities, the lowest or highest. */
955 if (!attr->priority) {
957 } else if (attr->priority == (uint32_t)~0U) {
960 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
961 rte_flow_error_set(error, EINVAL,
962 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
963 attr, "Not support priority.");
971 ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
972 const struct rte_flow_attr *attr,
973 const struct rte_flow_item pattern[],
974 const struct rte_flow_action actions[],
975 struct rte_eth_syn_filter *filter,
976 struct rte_flow_error *error)
979 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
981 MAC_TYPE_FILTER_SUP(hw->mac.type);
983 ret = cons_parse_syn_filter(attr, pattern,
984 actions, filter, error);
993 * Parse the rule to see if it is a L2 tunnel rule.
994 * And get the L2 tunnel filter info BTW.
995 * Only support E-tag now.
997 * The first not void item can be E_TAG.
998 * The next not void item must be END.
1000 * The first not void action should be QUEUE.
1001 * The next not void action should be END.
1005 e_cid_base 0x309 0xFFF
1007 * other members in mask and spec should set to 0x00.
1008 * item->last should be NULL.
1011 cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
1012 const struct rte_flow_item pattern[],
1013 const struct rte_flow_action actions[],
1014 struct rte_eth_l2_tunnel_conf *filter,
1015 struct rte_flow_error *error)
1017 const struct rte_flow_item *item;
1018 const struct rte_flow_item_e_tag *e_tag_spec;
1019 const struct rte_flow_item_e_tag *e_tag_mask;
1020 const struct rte_flow_action *act;
1021 const struct rte_flow_action_queue *act_q;
1024 rte_flow_error_set(error, EINVAL,
1025 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1026 NULL, "NULL pattern.");
1031 rte_flow_error_set(error, EINVAL,
1032 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1033 NULL, "NULL action.");
1038 rte_flow_error_set(error, EINVAL,
1039 RTE_FLOW_ERROR_TYPE_ATTR,
1040 NULL, "NULL attribute.");
1044 /* The first not void item should be e-tag. */
1045 item = next_no_void_pattern(pattern, NULL);
1046 if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1047 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1048 rte_flow_error_set(error, EINVAL,
1049 RTE_FLOW_ERROR_TYPE_ITEM,
1050 item, "Not supported by L2 tunnel filter");
1054 if (!item->spec || !item->mask) {
1055 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1056 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1057 item, "Not supported by L2 tunnel filter");
1061 /*Not supported last point for range*/
1063 rte_flow_error_set(error, EINVAL,
1064 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1065 item, "Not supported last point for range");
1069 e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
1070 e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
1072 /* Only care about GRP and E cid base. */
1073 if (e_tag_mask->epcp_edei_in_ecid_b ||
1074 e_tag_mask->in_ecid_e ||
1075 e_tag_mask->ecid_e ||
1076 e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1077 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1078 rte_flow_error_set(error, EINVAL,
1079 RTE_FLOW_ERROR_TYPE_ITEM,
1080 item, "Not supported by L2 tunnel filter");
1084 filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1086 * grp and e_cid_base are bit fields and only use 14 bits.
1087 * e-tag id is taken as little endian by HW.
1089 filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1091 /* check if the next not void item is END */
1092 item = next_no_void_pattern(pattern, item);
1093 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1094 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1095 rte_flow_error_set(error, EINVAL,
1096 RTE_FLOW_ERROR_TYPE_ITEM,
1097 item, "Not supported by L2 tunnel filter");
1102 /* must be input direction */
1103 if (!attr->ingress) {
1104 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1105 rte_flow_error_set(error, EINVAL,
1106 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1107 attr, "Only support ingress.");
1113 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1114 rte_flow_error_set(error, EINVAL,
1115 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1116 attr, "Not support egress.");
1121 if (attr->priority) {
1122 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1123 rte_flow_error_set(error, EINVAL,
1124 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1125 attr, "Not support priority.");
1129 /* check if the first not void action is QUEUE. */
1130 act = next_no_void_action(actions, NULL);
1131 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1132 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1133 rte_flow_error_set(error, EINVAL,
1134 RTE_FLOW_ERROR_TYPE_ACTION,
1135 act, "Not supported action.");
1139 act_q = (const struct rte_flow_action_queue *)act->conf;
1140 filter->pool = act_q->index;
1142 /* check if the next not void item is END */
1143 act = next_no_void_action(actions, act);
1144 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1145 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1146 rte_flow_error_set(error, EINVAL,
1147 RTE_FLOW_ERROR_TYPE_ACTION,
1148 act, "Not supported action.");
1156 ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1157 const struct rte_flow_attr *attr,
1158 const struct rte_flow_item pattern[],
1159 const struct rte_flow_action actions[],
1160 struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1161 struct rte_flow_error *error)
1164 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1166 ret = cons_parse_l2_tn_filter(attr, pattern,
1167 actions, l2_tn_filter, error);
1169 if (hw->mac.type != ixgbe_mac_X550 &&
1170 hw->mac.type != ixgbe_mac_X550EM_x &&
1171 hw->mac.type != ixgbe_mac_X550EM_a) {
1172 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1173 rte_flow_error_set(error, EINVAL,
1174 RTE_FLOW_ERROR_TYPE_ITEM,
1175 NULL, "Not supported by L2 tunnel filter");
1182 /* Parse to get the attr and action info of flow director rule. */
1184 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1185 const struct rte_flow_action actions[],
1186 struct ixgbe_fdir_rule *rule,
1187 struct rte_flow_error *error)
1189 const struct rte_flow_action *act;
1190 const struct rte_flow_action_queue *act_q;
1191 const struct rte_flow_action_mark *mark;
1194 /* must be input direction */
1195 if (!attr->ingress) {
1196 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1197 rte_flow_error_set(error, EINVAL,
1198 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1199 attr, "Only support ingress.");
1205 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1206 rte_flow_error_set(error, EINVAL,
1207 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1208 attr, "Not support egress.");
1213 if (attr->priority) {
1214 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1215 rte_flow_error_set(error, EINVAL,
1216 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1217 attr, "Not support priority.");
1221 /* check if the first not void action is QUEUE or DROP. */
1222 act = next_no_void_action(actions, NULL);
1223 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1224 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1225 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1226 rte_flow_error_set(error, EINVAL,
1227 RTE_FLOW_ERROR_TYPE_ACTION,
1228 act, "Not supported action.");
1232 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1233 act_q = (const struct rte_flow_action_queue *)act->conf;
1234 rule->queue = act_q->index;
1236 /* signature mode does not support drop action. */
1237 if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1238 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1239 rte_flow_error_set(error, EINVAL,
1240 RTE_FLOW_ERROR_TYPE_ACTION,
1241 act, "Not supported action.");
1244 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1247 /* check if the next not void item is MARK */
1248 act = next_no_void_action(actions, act);
1249 if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1250 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1251 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1252 rte_flow_error_set(error, EINVAL,
1253 RTE_FLOW_ERROR_TYPE_ACTION,
1254 act, "Not supported action.");
1260 if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1261 mark = (const struct rte_flow_action_mark *)act->conf;
1262 rule->soft_id = mark->id;
1263 act = next_no_void_action(actions, act);
1266 /* check if the next not void item is END */
1267 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1268 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1269 rte_flow_error_set(error, EINVAL,
1270 RTE_FLOW_ERROR_TYPE_ACTION,
1271 act, "Not supported action.");
1278 /* search next no void pattern and skip fuzzy */
1280 const struct rte_flow_item *next_no_fuzzy_pattern(
1281 const struct rte_flow_item pattern[],
1282 const struct rte_flow_item *cur)
1284 const struct rte_flow_item *next =
1285 next_no_void_pattern(pattern, cur);
1287 if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
1289 next = next_no_void_pattern(pattern, next);
1293 static inline uint8_t signature_match(const struct rte_flow_item pattern[])
1295 const struct rte_flow_item_fuzzy *spec, *last, *mask;
1296 const struct rte_flow_item *item;
1297 uint32_t sh, lh, mh;
1302 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1305 if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
1307 (const struct rte_flow_item_fuzzy *)item->spec;
1309 (const struct rte_flow_item_fuzzy *)item->last;
1311 (const struct rte_flow_item_fuzzy *)item->mask;
1340 * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1341 * And get the flow director filter info BTW.
1342 * UDP/TCP/SCTP PATTERN:
1343 * The first not void item can be ETH or IPV4 or IPV6
1344 * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1345 * The next not void item could be UDP or TCP or SCTP (optional)
1346 * The next not void item could be RAW (for flexbyte, optional)
1347 * The next not void item must be END.
1348 * A Fuzzy Match pattern can appear at any place before END.
1349 * Fuzzy Match is optional for IPV4 but is required for IPV6
1351 * The first not void item must be ETH.
1352 * The second not void item must be MAC VLAN.
1353 * The next not void item must be END.
1355 * The first not void action should be QUEUE or DROP.
1356 * The second not void optional action should be MARK,
1357 * mark_id is a uint32_t number.
1358 * The next not void action should be END.
1359 * UDP/TCP/SCTP pattern example:
1362 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
1363 * dst_addr 192.167.3.50 0xFFFFFFFF
1364 * UDP/TCP/SCTP src_port 80 0xFFFF
1365 * dst_port 80 0xFFFF
1366 * FLEX relative 0 0x1
1369 * offset 12 0xFFFFFFFF
1372 * pattern[0] 0x86 0xFF
1373 * pattern[1] 0xDD 0xFF
1375 * MAC VLAN pattern example:
1378 {0xAC, 0x7B, 0xA1, {0xFF, 0xFF, 0xFF,
1379 0x2C, 0x6D, 0x36} 0xFF, 0xFF, 0xFF}
1380 * MAC VLAN tci 0x2016 0xEFFF
1382 * Other members in mask and spec should set to 0x00.
1383 * Item->last should be NULL.
1386 ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
1387 const struct rte_flow_item pattern[],
1388 const struct rte_flow_action actions[],
1389 struct ixgbe_fdir_rule *rule,
1390 struct rte_flow_error *error)
1392 const struct rte_flow_item *item;
1393 const struct rte_flow_item_eth *eth_spec;
1394 const struct rte_flow_item_eth *eth_mask;
1395 const struct rte_flow_item_ipv4 *ipv4_spec;
1396 const struct rte_flow_item_ipv4 *ipv4_mask;
1397 const struct rte_flow_item_ipv6 *ipv6_spec;
1398 const struct rte_flow_item_ipv6 *ipv6_mask;
1399 const struct rte_flow_item_tcp *tcp_spec;
1400 const struct rte_flow_item_tcp *tcp_mask;
1401 const struct rte_flow_item_udp *udp_spec;
1402 const struct rte_flow_item_udp *udp_mask;
1403 const struct rte_flow_item_sctp *sctp_spec;
1404 const struct rte_flow_item_sctp *sctp_mask;
1405 const struct rte_flow_item_vlan *vlan_spec;
1406 const struct rte_flow_item_vlan *vlan_mask;
1407 const struct rte_flow_item_raw *raw_mask;
1408 const struct rte_flow_item_raw *raw_spec;
1413 rte_flow_error_set(error, EINVAL,
1414 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1415 NULL, "NULL pattern.");
1420 rte_flow_error_set(error, EINVAL,
1421 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1422 NULL, "NULL action.");
1427 rte_flow_error_set(error, EINVAL,
1428 RTE_FLOW_ERROR_TYPE_ATTR,
1429 NULL, "NULL attribute.");
1434 * Some fields may not be provided. Set spec to 0 and mask to default
1435 * value. So, we need not do anything for the not provided fields later.
1437 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1438 memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1439 rule->mask.vlan_tci_mask = 0;
1440 rule->mask.flex_bytes_mask = 0;
1443 * The first not void item should be
1444 * MAC or IPv4 or TCP or UDP or SCTP.
1446 item = next_no_fuzzy_pattern(pattern, NULL);
1447 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1448 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1449 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1450 item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1451 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1452 item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1453 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1454 rte_flow_error_set(error, EINVAL,
1455 RTE_FLOW_ERROR_TYPE_ITEM,
1456 item, "Not supported by fdir filter");
1460 if (signature_match(pattern))
1461 rule->mode = RTE_FDIR_MODE_SIGNATURE;
1463 rule->mode = RTE_FDIR_MODE_PERFECT;
1465 /*Not supported last point for range*/
1467 rte_flow_error_set(error, EINVAL,
1468 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1469 item, "Not supported last point for range");
1473 /* Get the MAC info. */
1474 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1476 * Only support vlan and dst MAC address,
1477 * others should be masked.
1479 if (item->spec && !item->mask) {
1480 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1481 rte_flow_error_set(error, EINVAL,
1482 RTE_FLOW_ERROR_TYPE_ITEM,
1483 item, "Not supported by fdir filter");
1488 rule->b_spec = TRUE;
1489 eth_spec = (const struct rte_flow_item_eth *)item->spec;
1491 /* Get the dst MAC. */
1492 for (j = 0; j < ETHER_ADDR_LEN; j++) {
1493 rule->ixgbe_fdir.formatted.inner_mac[j] =
1494 eth_spec->dst.addr_bytes[j];
1501 rule->b_mask = TRUE;
1502 eth_mask = (const struct rte_flow_item_eth *)item->mask;
1504 /* Ether type should be masked. */
1505 if (eth_mask->type ||
1506 rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1507 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1508 rte_flow_error_set(error, EINVAL,
1509 RTE_FLOW_ERROR_TYPE_ITEM,
1510 item, "Not supported by fdir filter");
1514 /* If ethernet has meaning, it means MAC VLAN mode. */
1515 rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1518 * src MAC address must be masked,
1519 * and don't support dst MAC address mask.
1521 for (j = 0; j < ETHER_ADDR_LEN; j++) {
1522 if (eth_mask->src.addr_bytes[j] ||
1523 eth_mask->dst.addr_bytes[j] != 0xFF) {
1525 sizeof(struct ixgbe_fdir_rule));
1526 rte_flow_error_set(error, EINVAL,
1527 RTE_FLOW_ERROR_TYPE_ITEM,
1528 item, "Not supported by fdir filter");
1533 /* When no VLAN, considered as full mask. */
1534 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1536 /*** If both spec and mask are item,
1537 * it means don't care about ETH.
1542 * Check if the next not void item is vlan or ipv4.
1543 * IPv6 is not supported.
1545 item = next_no_fuzzy_pattern(pattern, item);
1546 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1547 if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1548 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1549 rte_flow_error_set(error, EINVAL,
1550 RTE_FLOW_ERROR_TYPE_ITEM,
1551 item, "Not supported by fdir filter");
1555 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
1556 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1557 rte_flow_error_set(error, EINVAL,
1558 RTE_FLOW_ERROR_TYPE_ITEM,
1559 item, "Not supported by fdir filter");
1565 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1566 if (!(item->spec && item->mask)) {
1567 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1568 rte_flow_error_set(error, EINVAL,
1569 RTE_FLOW_ERROR_TYPE_ITEM,
1570 item, "Not supported by fdir filter");
1574 /*Not supported last point for range*/
1576 rte_flow_error_set(error, EINVAL,
1577 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1578 item, "Not supported last point for range");
1582 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
1583 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
1585 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1587 rule->mask.vlan_tci_mask = vlan_mask->tci;
1588 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
1589 /* More than one tags are not supported. */
1591 /* Next not void item must be END */
1592 item = next_no_fuzzy_pattern(pattern, item);
1593 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1594 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1595 rte_flow_error_set(error, EINVAL,
1596 RTE_FLOW_ERROR_TYPE_ITEM,
1597 item, "Not supported by fdir filter");
1602 /* Get the IPV4 info. */
1603 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1605 * Set the flow type even if there's no content
1606 * as we must have a flow type.
1608 rule->ixgbe_fdir.formatted.flow_type =
1609 IXGBE_ATR_FLOW_TYPE_IPV4;
1610 /*Not supported last point for range*/
1612 rte_flow_error_set(error, EINVAL,
1613 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1614 item, "Not supported last point for range");
1618 * Only care about src & dst addresses,
1619 * others should be masked.
1622 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1623 rte_flow_error_set(error, EINVAL,
1624 RTE_FLOW_ERROR_TYPE_ITEM,
1625 item, "Not supported by fdir filter");
1628 rule->b_mask = TRUE;
1630 (const struct rte_flow_item_ipv4 *)item->mask;
1631 if (ipv4_mask->hdr.version_ihl ||
1632 ipv4_mask->hdr.type_of_service ||
1633 ipv4_mask->hdr.total_length ||
1634 ipv4_mask->hdr.packet_id ||
1635 ipv4_mask->hdr.fragment_offset ||
1636 ipv4_mask->hdr.time_to_live ||
1637 ipv4_mask->hdr.next_proto_id ||
1638 ipv4_mask->hdr.hdr_checksum) {
1639 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1640 rte_flow_error_set(error, EINVAL,
1641 RTE_FLOW_ERROR_TYPE_ITEM,
1642 item, "Not supported by fdir filter");
1645 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1646 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1649 rule->b_spec = TRUE;
1651 (const struct rte_flow_item_ipv4 *)item->spec;
1652 rule->ixgbe_fdir.formatted.dst_ip[0] =
1653 ipv4_spec->hdr.dst_addr;
1654 rule->ixgbe_fdir.formatted.src_ip[0] =
1655 ipv4_spec->hdr.src_addr;
1659 * Check if the next not void item is
1660 * TCP or UDP or SCTP or END.
1662 item = next_no_fuzzy_pattern(pattern, item);
1663 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1664 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1665 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1666 item->type != RTE_FLOW_ITEM_TYPE_END &&
1667 item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1668 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1669 rte_flow_error_set(error, EINVAL,
1670 RTE_FLOW_ERROR_TYPE_ITEM,
1671 item, "Not supported by fdir filter");
1676 /* Get the IPV6 info. */
1677 if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1679 * Set the flow type even if there's no content
1680 * as we must have a flow type.
1682 rule->ixgbe_fdir.formatted.flow_type =
1683 IXGBE_ATR_FLOW_TYPE_IPV6;
1686 * 1. must signature match
1687 * 2. not support last
1688 * 3. mask must not null
1690 if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
1693 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1694 rte_flow_error_set(error, EINVAL,
1695 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1696 item, "Not supported last point for range");
1700 rule->b_mask = TRUE;
1702 (const struct rte_flow_item_ipv6 *)item->mask;
1703 if (ipv6_mask->hdr.vtc_flow ||
1704 ipv6_mask->hdr.payload_len ||
1705 ipv6_mask->hdr.proto ||
1706 ipv6_mask->hdr.hop_limits) {
1707 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1708 rte_flow_error_set(error, EINVAL,
1709 RTE_FLOW_ERROR_TYPE_ITEM,
1710 item, "Not supported by fdir filter");
1714 /* check src addr mask */
1715 for (j = 0; j < 16; j++) {
1716 if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
1717 rule->mask.src_ipv6_mask |= 1 << j;
1718 } else if (ipv6_mask->hdr.src_addr[j] != 0) {
1719 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1720 rte_flow_error_set(error, EINVAL,
1721 RTE_FLOW_ERROR_TYPE_ITEM,
1722 item, "Not supported by fdir filter");
1727 /* check dst addr mask */
1728 for (j = 0; j < 16; j++) {
1729 if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
1730 rule->mask.dst_ipv6_mask |= 1 << j;
1731 } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
1732 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1733 rte_flow_error_set(error, EINVAL,
1734 RTE_FLOW_ERROR_TYPE_ITEM,
1735 item, "Not supported by fdir filter");
1741 rule->b_spec = TRUE;
1743 (const struct rte_flow_item_ipv6 *)item->spec;
1744 rte_memcpy(rule->ixgbe_fdir.formatted.src_ip,
1745 ipv6_spec->hdr.src_addr, 16);
1746 rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip,
1747 ipv6_spec->hdr.dst_addr, 16);
1751 * Check if the next not void item is
1752 * TCP or UDP or SCTP or END.
1754 item = next_no_fuzzy_pattern(pattern, item);
1755 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1756 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1757 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1758 item->type != RTE_FLOW_ITEM_TYPE_END &&
1759 item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1760 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1761 rte_flow_error_set(error, EINVAL,
1762 RTE_FLOW_ERROR_TYPE_ITEM,
1763 item, "Not supported by fdir filter");
1768 /* Get the TCP info. */
1769 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1771 * Set the flow type even if there's no content
1772 * as we must have a flow type.
1774 rule->ixgbe_fdir.formatted.flow_type |=
1775 IXGBE_ATR_L4TYPE_TCP;
1776 /*Not supported last point for range*/
1778 rte_flow_error_set(error, EINVAL,
1779 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1780 item, "Not supported last point for range");
1784 * Only care about src & dst ports,
1785 * others should be masked.
1788 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1789 rte_flow_error_set(error, EINVAL,
1790 RTE_FLOW_ERROR_TYPE_ITEM,
1791 item, "Not supported by fdir filter");
1794 rule->b_mask = TRUE;
1795 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1796 if (tcp_mask->hdr.sent_seq ||
1797 tcp_mask->hdr.recv_ack ||
1798 tcp_mask->hdr.data_off ||
1799 tcp_mask->hdr.tcp_flags ||
1800 tcp_mask->hdr.rx_win ||
1801 tcp_mask->hdr.cksum ||
1802 tcp_mask->hdr.tcp_urp) {
1803 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1804 rte_flow_error_set(error, EINVAL,
1805 RTE_FLOW_ERROR_TYPE_ITEM,
1806 item, "Not supported by fdir filter");
1809 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1810 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1813 rule->b_spec = TRUE;
1814 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1815 rule->ixgbe_fdir.formatted.src_port =
1816 tcp_spec->hdr.src_port;
1817 rule->ixgbe_fdir.formatted.dst_port =
1818 tcp_spec->hdr.dst_port;
1821 item = next_no_fuzzy_pattern(pattern, item);
1822 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1823 item->type != RTE_FLOW_ITEM_TYPE_END) {
1824 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1825 rte_flow_error_set(error, EINVAL,
1826 RTE_FLOW_ERROR_TYPE_ITEM,
1827 item, "Not supported by fdir filter");
1833 /* Get the UDP info */
1834 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1836 * Set the flow type even if there's no content
1837 * as we must have a flow type.
1839 rule->ixgbe_fdir.formatted.flow_type |=
1840 IXGBE_ATR_L4TYPE_UDP;
1841 /*Not supported last point for range*/
1843 rte_flow_error_set(error, EINVAL,
1844 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1845 item, "Not supported last point for range");
1849 * Only care about src & dst ports,
1850 * others should be masked.
1853 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1854 rte_flow_error_set(error, EINVAL,
1855 RTE_FLOW_ERROR_TYPE_ITEM,
1856 item, "Not supported by fdir filter");
1859 rule->b_mask = TRUE;
1860 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1861 if (udp_mask->hdr.dgram_len ||
1862 udp_mask->hdr.dgram_cksum) {
1863 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1864 rte_flow_error_set(error, EINVAL,
1865 RTE_FLOW_ERROR_TYPE_ITEM,
1866 item, "Not supported by fdir filter");
1869 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1870 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1873 rule->b_spec = TRUE;
1874 udp_spec = (const struct rte_flow_item_udp *)item->spec;
1875 rule->ixgbe_fdir.formatted.src_port =
1876 udp_spec->hdr.src_port;
1877 rule->ixgbe_fdir.formatted.dst_port =
1878 udp_spec->hdr.dst_port;
1881 item = next_no_fuzzy_pattern(pattern, item);
1882 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1883 item->type != RTE_FLOW_ITEM_TYPE_END) {
1884 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1885 rte_flow_error_set(error, EINVAL,
1886 RTE_FLOW_ERROR_TYPE_ITEM,
1887 item, "Not supported by fdir filter");
1893 /* Get the SCTP info */
1894 if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
1896 * Set the flow type even if there's no content
1897 * as we must have a flow type.
1899 rule->ixgbe_fdir.formatted.flow_type |=
1900 IXGBE_ATR_L4TYPE_SCTP;
1901 /*Not supported last point for range*/
1903 rte_flow_error_set(error, EINVAL,
1904 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1905 item, "Not supported last point for range");
1909 * Only care about src & dst ports,
1910 * others should be masked.
1913 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1914 rte_flow_error_set(error, EINVAL,
1915 RTE_FLOW_ERROR_TYPE_ITEM,
1916 item, "Not supported by fdir filter");
1919 rule->b_mask = TRUE;
1921 (const struct rte_flow_item_sctp *)item->mask;
1922 if (sctp_mask->hdr.tag ||
1923 sctp_mask->hdr.cksum) {
1924 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1925 rte_flow_error_set(error, EINVAL,
1926 RTE_FLOW_ERROR_TYPE_ITEM,
1927 item, "Not supported by fdir filter");
1930 rule->mask.src_port_mask = sctp_mask->hdr.src_port;
1931 rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
1934 rule->b_spec = TRUE;
1936 (const struct rte_flow_item_sctp *)item->spec;
1937 rule->ixgbe_fdir.formatted.src_port =
1938 sctp_spec->hdr.src_port;
1939 rule->ixgbe_fdir.formatted.dst_port =
1940 sctp_spec->hdr.dst_port;
1943 item = next_no_fuzzy_pattern(pattern, item);
1944 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1945 item->type != RTE_FLOW_ITEM_TYPE_END) {
1946 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1947 rte_flow_error_set(error, EINVAL,
1948 RTE_FLOW_ERROR_TYPE_ITEM,
1949 item, "Not supported by fdir filter");
1954 /* Get the flex byte info */
1955 if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
1956 /* Not supported last point for range*/
1958 rte_flow_error_set(error, EINVAL,
1959 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1960 item, "Not supported last point for range");
1963 /* mask should not be null */
1964 if (!item->mask || !item->spec) {
1965 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1966 rte_flow_error_set(error, EINVAL,
1967 RTE_FLOW_ERROR_TYPE_ITEM,
1968 item, "Not supported by fdir filter");
1972 raw_mask = (const struct rte_flow_item_raw *)item->mask;
1975 if (raw_mask->relative != 0x1 ||
1976 raw_mask->search != 0x1 ||
1977 raw_mask->reserved != 0x0 ||
1978 (uint32_t)raw_mask->offset != 0xffffffff ||
1979 raw_mask->limit != 0xffff ||
1980 raw_mask->length != 0xffff) {
1981 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1982 rte_flow_error_set(error, EINVAL,
1983 RTE_FLOW_ERROR_TYPE_ITEM,
1984 item, "Not supported by fdir filter");
1988 raw_spec = (const struct rte_flow_item_raw *)item->spec;
1991 if (raw_spec->relative != 0 ||
1992 raw_spec->search != 0 ||
1993 raw_spec->reserved != 0 ||
1994 raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
1995 raw_spec->offset % 2 ||
1996 raw_spec->limit != 0 ||
1997 raw_spec->length != 2 ||
1998 /* pattern can't be 0xffff */
1999 (raw_spec->pattern[0] == 0xff &&
2000 raw_spec->pattern[1] == 0xff)) {
2001 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2002 rte_flow_error_set(error, EINVAL,
2003 RTE_FLOW_ERROR_TYPE_ITEM,
2004 item, "Not supported by fdir filter");
2008 /* check pattern mask */
2009 if (raw_mask->pattern[0] != 0xff ||
2010 raw_mask->pattern[1] != 0xff) {
2011 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2012 rte_flow_error_set(error, EINVAL,
2013 RTE_FLOW_ERROR_TYPE_ITEM,
2014 item, "Not supported by fdir filter");
2018 rule->mask.flex_bytes_mask = 0xffff;
2019 rule->ixgbe_fdir.formatted.flex_bytes =
2020 (((uint16_t)raw_spec->pattern[1]) << 8) |
2021 raw_spec->pattern[0];
2022 rule->flex_bytes_offset = raw_spec->offset;
2025 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2026 /* check if the next not void item is END */
2027 item = next_no_fuzzy_pattern(pattern, item);
2028 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2029 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2030 rte_flow_error_set(error, EINVAL,
2031 RTE_FLOW_ERROR_TYPE_ITEM,
2032 item, "Not supported by fdir filter");
2037 return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2040 #define NVGRE_PROTOCOL 0x6558
2043 * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
2044 * And get the flow director filter info BTW.
2046 * The first not void item must be ETH.
2047 * The second not void item must be IPV4/ IPV6.
2048 * The third not void item must be NVGRE.
2049 * The next not void item must be END.
2051 * The first not void item must be ETH.
2052 * The second not void item must be IPV4/ IPV6.
2053 * The third not void item must be NVGRE.
2054 * The next not void item must be END.
2056 * The first not void action should be QUEUE or DROP.
2057 * The second not void optional action should be MARK,
2058 * mark_id is a uint32_t number.
2059 * The next not void action should be END.
2060 * VxLAN pattern example:
2063 * IPV4/IPV6 NULL NULL
2065 * VxLAN vni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
2066 * MAC VLAN tci 0x2016 0xEFFF
2068 * NEGRV pattern example:
2071 * IPV4/IPV6 NULL NULL
2072 * NVGRE protocol 0x6558 0xFFFF
2073 * tni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
2074 * MAC VLAN tci 0x2016 0xEFFF
2076 * other members in mask and spec should set to 0x00.
2077 * item->last should be NULL.
2080 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
2081 const struct rte_flow_item pattern[],
2082 const struct rte_flow_action actions[],
2083 struct ixgbe_fdir_rule *rule,
2084 struct rte_flow_error *error)
2086 const struct rte_flow_item *item;
2087 const struct rte_flow_item_vxlan *vxlan_spec;
2088 const struct rte_flow_item_vxlan *vxlan_mask;
2089 const struct rte_flow_item_nvgre *nvgre_spec;
2090 const struct rte_flow_item_nvgre *nvgre_mask;
2091 const struct rte_flow_item_eth *eth_spec;
2092 const struct rte_flow_item_eth *eth_mask;
2093 const struct rte_flow_item_vlan *vlan_spec;
2094 const struct rte_flow_item_vlan *vlan_mask;
2098 rte_flow_error_set(error, EINVAL,
2099 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2100 NULL, "NULL pattern.");
2105 rte_flow_error_set(error, EINVAL,
2106 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2107 NULL, "NULL action.");
2112 rte_flow_error_set(error, EINVAL,
2113 RTE_FLOW_ERROR_TYPE_ATTR,
2114 NULL, "NULL attribute.");
2119 * Some fields may not be provided. Set spec to 0 and mask to default
2120 * value. So, we need not do anything for the not provided fields later.
2122 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2123 memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
2124 rule->mask.vlan_tci_mask = 0;
2127 * The first not void item should be
2128 * MAC or IPv4 or IPv6 or UDP or VxLAN.
2130 item = next_no_void_pattern(pattern, NULL);
2131 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
2132 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2133 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
2134 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2135 item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
2136 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2137 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2138 rte_flow_error_set(error, EINVAL,
2139 RTE_FLOW_ERROR_TYPE_ITEM,
2140 item, "Not supported by fdir filter");
2144 rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
2147 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
2148 /* Only used to describe the protocol stack. */
2149 if (item->spec || item->mask) {
2150 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2151 rte_flow_error_set(error, EINVAL,
2152 RTE_FLOW_ERROR_TYPE_ITEM,
2153 item, "Not supported by fdir filter");
2156 /* Not supported last point for range*/
2158 rte_flow_error_set(error, EINVAL,
2159 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2160 item, "Not supported last point for range");
2164 /* Check if the next not void item is IPv4 or IPv6. */
2165 item = next_no_void_pattern(pattern, item);
2166 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2167 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2168 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2169 rte_flow_error_set(error, EINVAL,
2170 RTE_FLOW_ERROR_TYPE_ITEM,
2171 item, "Not supported by fdir filter");
2177 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2178 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2179 /* Only used to describe the protocol stack. */
2180 if (item->spec || item->mask) {
2181 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2182 rte_flow_error_set(error, EINVAL,
2183 RTE_FLOW_ERROR_TYPE_ITEM,
2184 item, "Not supported by fdir filter");
2187 /*Not supported last point for range*/
2189 rte_flow_error_set(error, EINVAL,
2190 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2191 item, "Not supported last point for range");
2195 /* Check if the next not void item is UDP or NVGRE. */
2196 item = next_no_void_pattern(pattern, item);
2197 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2198 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2199 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2200 rte_flow_error_set(error, EINVAL,
2201 RTE_FLOW_ERROR_TYPE_ITEM,
2202 item, "Not supported by fdir filter");
2208 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2209 /* Only used to describe the protocol stack. */
2210 if (item->spec || item->mask) {
2211 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2212 rte_flow_error_set(error, EINVAL,
2213 RTE_FLOW_ERROR_TYPE_ITEM,
2214 item, "Not supported by fdir filter");
2217 /*Not supported last point for range*/
2219 rte_flow_error_set(error, EINVAL,
2220 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2221 item, "Not supported last point for range");
2225 /* Check if the next not void item is VxLAN. */
2226 item = next_no_void_pattern(pattern, item);
2227 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2228 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2229 rte_flow_error_set(error, EINVAL,
2230 RTE_FLOW_ERROR_TYPE_ITEM,
2231 item, "Not supported by fdir filter");
2236 /* Get the VxLAN info */
2237 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2238 rule->ixgbe_fdir.formatted.tunnel_type =
2239 RTE_FDIR_TUNNEL_TYPE_VXLAN;
2241 /* Only care about VNI, others should be masked. */
2243 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2244 rte_flow_error_set(error, EINVAL,
2245 RTE_FLOW_ERROR_TYPE_ITEM,
2246 item, "Not supported by fdir filter");
2249 /*Not supported last point for range*/
2251 rte_flow_error_set(error, EINVAL,
2252 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2253 item, "Not supported last point for range");
2256 rule->b_mask = TRUE;
2258 /* Tunnel type is always meaningful. */
2259 rule->mask.tunnel_type_mask = 1;
2262 (const struct rte_flow_item_vxlan *)item->mask;
2263 if (vxlan_mask->flags) {
2264 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2265 rte_flow_error_set(error, EINVAL,
2266 RTE_FLOW_ERROR_TYPE_ITEM,
2267 item, "Not supported by fdir filter");
2270 /* VNI must be totally masked or not. */
2271 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2272 vxlan_mask->vni[2]) &&
2273 ((vxlan_mask->vni[0] != 0xFF) ||
2274 (vxlan_mask->vni[1] != 0xFF) ||
2275 (vxlan_mask->vni[2] != 0xFF))) {
2276 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2277 rte_flow_error_set(error, EINVAL,
2278 RTE_FLOW_ERROR_TYPE_ITEM,
2279 item, "Not supported by fdir filter");
2283 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2284 RTE_DIM(vxlan_mask->vni));
2287 rule->b_spec = TRUE;
2288 vxlan_spec = (const struct rte_flow_item_vxlan *)
2290 rte_memcpy(((uint8_t *)
2291 &rule->ixgbe_fdir.formatted.tni_vni + 1),
2292 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2293 rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
2294 rule->ixgbe_fdir.formatted.tni_vni);
2298 /* Get the NVGRE info */
2299 if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2300 rule->ixgbe_fdir.formatted.tunnel_type =
2301 RTE_FDIR_TUNNEL_TYPE_NVGRE;
2304 * Only care about flags0, flags1, protocol and TNI,
2305 * others should be masked.
2308 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2309 rte_flow_error_set(error, EINVAL,
2310 RTE_FLOW_ERROR_TYPE_ITEM,
2311 item, "Not supported by fdir filter");
2314 /*Not supported last point for range*/
2316 rte_flow_error_set(error, EINVAL,
2317 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2318 item, "Not supported last point for range");
2321 rule->b_mask = TRUE;
2323 /* Tunnel type is always meaningful. */
2324 rule->mask.tunnel_type_mask = 1;
2327 (const struct rte_flow_item_nvgre *)item->mask;
2328 if (nvgre_mask->flow_id) {
2329 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2330 rte_flow_error_set(error, EINVAL,
2331 RTE_FLOW_ERROR_TYPE_ITEM,
2332 item, "Not supported by fdir filter");
2335 if (nvgre_mask->c_k_s_rsvd0_ver !=
2336 rte_cpu_to_be_16(0x3000) ||
2337 nvgre_mask->protocol != 0xFFFF) {
2338 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2339 rte_flow_error_set(error, EINVAL,
2340 RTE_FLOW_ERROR_TYPE_ITEM,
2341 item, "Not supported by fdir filter");
2344 /* TNI must be totally masked or not. */
2345 if (nvgre_mask->tni[0] &&
2346 ((nvgre_mask->tni[0] != 0xFF) ||
2347 (nvgre_mask->tni[1] != 0xFF) ||
2348 (nvgre_mask->tni[2] != 0xFF))) {
2349 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2350 rte_flow_error_set(error, EINVAL,
2351 RTE_FLOW_ERROR_TYPE_ITEM,
2352 item, "Not supported by fdir filter");
2355 /* tni is a 24-bits bit field */
2356 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2357 RTE_DIM(nvgre_mask->tni));
2358 rule->mask.tunnel_id_mask <<= 8;
2361 rule->b_spec = TRUE;
2363 (const struct rte_flow_item_nvgre *)item->spec;
2364 if (nvgre_spec->c_k_s_rsvd0_ver !=
2365 rte_cpu_to_be_16(0x2000) ||
2366 nvgre_spec->protocol !=
2367 rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2368 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2369 rte_flow_error_set(error, EINVAL,
2370 RTE_FLOW_ERROR_TYPE_ITEM,
2371 item, "Not supported by fdir filter");
2374 /* tni is a 24-bits bit field */
2375 rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2376 nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2377 rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2381 /* check if the next not void item is MAC */
2382 item = next_no_void_pattern(pattern, item);
2383 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2384 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2385 rte_flow_error_set(error, EINVAL,
2386 RTE_FLOW_ERROR_TYPE_ITEM,
2387 item, "Not supported by fdir filter");
2392 * Only support vlan and dst MAC address,
2393 * others should be masked.
2397 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2398 rte_flow_error_set(error, EINVAL,
2399 RTE_FLOW_ERROR_TYPE_ITEM,
2400 item, "Not supported by fdir filter");
2403 /*Not supported last point for range*/
2405 rte_flow_error_set(error, EINVAL,
2406 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2407 item, "Not supported last point for range");
2410 rule->b_mask = TRUE;
2411 eth_mask = (const struct rte_flow_item_eth *)item->mask;
2413 /* Ether type should be masked. */
2414 if (eth_mask->type) {
2415 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2416 rte_flow_error_set(error, EINVAL,
2417 RTE_FLOW_ERROR_TYPE_ITEM,
2418 item, "Not supported by fdir filter");
2422 /* src MAC address should be masked. */
2423 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2424 if (eth_mask->src.addr_bytes[j]) {
2426 sizeof(struct ixgbe_fdir_rule));
2427 rte_flow_error_set(error, EINVAL,
2428 RTE_FLOW_ERROR_TYPE_ITEM,
2429 item, "Not supported by fdir filter");
2433 rule->mask.mac_addr_byte_mask = 0;
2434 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2435 /* It's a per byte mask. */
2436 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2437 rule->mask.mac_addr_byte_mask |= 0x1 << j;
2438 } else if (eth_mask->dst.addr_bytes[j]) {
2439 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2440 rte_flow_error_set(error, EINVAL,
2441 RTE_FLOW_ERROR_TYPE_ITEM,
2442 item, "Not supported by fdir filter");
2447 /* When no vlan, considered as full mask. */
2448 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2451 rule->b_spec = TRUE;
2452 eth_spec = (const struct rte_flow_item_eth *)item->spec;
2454 /* Get the dst MAC. */
2455 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2456 rule->ixgbe_fdir.formatted.inner_mac[j] =
2457 eth_spec->dst.addr_bytes[j];
2462 * Check if the next not void item is vlan or ipv4.
2463 * IPv6 is not supported.
2465 item = next_no_void_pattern(pattern, item);
2466 if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2467 (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
2468 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2469 rte_flow_error_set(error, EINVAL,
2470 RTE_FLOW_ERROR_TYPE_ITEM,
2471 item, "Not supported by fdir filter");
2474 /*Not supported last point for range*/
2476 rte_flow_error_set(error, EINVAL,
2477 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2478 item, "Not supported last point for range");
2482 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2483 if (!(item->spec && item->mask)) {
2484 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2485 rte_flow_error_set(error, EINVAL,
2486 RTE_FLOW_ERROR_TYPE_ITEM,
2487 item, "Not supported by fdir filter");
2491 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
2492 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
2494 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2496 rule->mask.vlan_tci_mask = vlan_mask->tci;
2497 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
2498 /* More than one tags are not supported. */
2500 /* check if the next not void item is END */
2501 item = next_no_void_pattern(pattern, item);
2503 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2504 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2505 rte_flow_error_set(error, EINVAL,
2506 RTE_FLOW_ERROR_TYPE_ITEM,
2507 item, "Not supported by fdir filter");
2513 * If the tags is 0, it means don't care about the VLAN.
2517 return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2521 ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2522 const struct rte_flow_attr *attr,
2523 const struct rte_flow_item pattern[],
2524 const struct rte_flow_action actions[],
2525 struct ixgbe_fdir_rule *rule,
2526 struct rte_flow_error *error)
2529 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2530 enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2532 if (hw->mac.type != ixgbe_mac_82599EB &&
2533 hw->mac.type != ixgbe_mac_X540 &&
2534 hw->mac.type != ixgbe_mac_X550 &&
2535 hw->mac.type != ixgbe_mac_X550EM_x &&
2536 hw->mac.type != ixgbe_mac_X550EM_a)
2539 ret = ixgbe_parse_fdir_filter_normal(attr, pattern,
2540 actions, rule, error);
2545 ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2546 actions, rule, error);
2549 if (fdir_mode == RTE_FDIR_MODE_NONE ||
2550 fdir_mode != rule->mode)
2556 ixgbe_filterlist_flush(void)
2558 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2559 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2560 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2561 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2562 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2563 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2565 while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2566 TAILQ_REMOVE(&filter_ntuple_list,
2569 rte_free(ntuple_filter_ptr);
2572 while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2573 TAILQ_REMOVE(&filter_ethertype_list,
2574 ethertype_filter_ptr,
2576 rte_free(ethertype_filter_ptr);
2579 while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2580 TAILQ_REMOVE(&filter_syn_list,
2583 rte_free(syn_filter_ptr);
2586 while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2587 TAILQ_REMOVE(&filter_l2_tunnel_list,
2590 rte_free(l2_tn_filter_ptr);
2593 while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2594 TAILQ_REMOVE(&filter_fdir_list,
2597 rte_free(fdir_rule_ptr);
2600 while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
2601 TAILQ_REMOVE(&ixgbe_flow_list,
2604 rte_free(ixgbe_flow_mem_ptr->flow);
2605 rte_free(ixgbe_flow_mem_ptr);
2610 * Create or destroy a flow rule.
2611 * Theorically one rule can match more than one filters.
2612 * We will let it use the filter which it hitt first.
2613 * So, the sequence matters.
2615 static struct rte_flow *
2616 ixgbe_flow_create(struct rte_eth_dev *dev,
2617 const struct rte_flow_attr *attr,
2618 const struct rte_flow_item pattern[],
2619 const struct rte_flow_action actions[],
2620 struct rte_flow_error *error)
2623 struct rte_eth_ntuple_filter ntuple_filter;
2624 struct rte_eth_ethertype_filter ethertype_filter;
2625 struct rte_eth_syn_filter syn_filter;
2626 struct ixgbe_fdir_rule fdir_rule;
2627 struct rte_eth_l2_tunnel_conf l2_tn_filter;
2628 struct ixgbe_hw_fdir_info *fdir_info =
2629 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2630 struct rte_flow *flow = NULL;
2631 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2632 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2633 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2634 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2635 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2636 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2638 flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
2640 PMD_DRV_LOG(ERR, "failed to allocate memory");
2641 return (struct rte_flow *)flow;
2643 ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
2644 sizeof(struct ixgbe_flow_mem), 0);
2645 if (!ixgbe_flow_mem_ptr) {
2646 PMD_DRV_LOG(ERR, "failed to allocate memory");
2650 ixgbe_flow_mem_ptr->flow = flow;
2651 TAILQ_INSERT_TAIL(&ixgbe_flow_list,
2652 ixgbe_flow_mem_ptr, entries);
2654 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2655 ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2656 actions, &ntuple_filter, error);
2658 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2660 ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
2661 sizeof(struct ixgbe_ntuple_filter_ele), 0);
2662 (void)rte_memcpy(&ntuple_filter_ptr->filter_info,
2664 sizeof(struct rte_eth_ntuple_filter));
2665 TAILQ_INSERT_TAIL(&filter_ntuple_list,
2666 ntuple_filter_ptr, entries);
2667 flow->rule = ntuple_filter_ptr;
2668 flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2674 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2675 ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2676 actions, ðertype_filter, error);
2678 ret = ixgbe_add_del_ethertype_filter(dev,
2679 ðertype_filter, TRUE);
2681 ethertype_filter_ptr = rte_zmalloc(
2682 "ixgbe_ethertype_filter",
2683 sizeof(struct ixgbe_ethertype_filter_ele), 0);
2684 (void)rte_memcpy(ðertype_filter_ptr->filter_info,
2686 sizeof(struct rte_eth_ethertype_filter));
2687 TAILQ_INSERT_TAIL(&filter_ethertype_list,
2688 ethertype_filter_ptr, entries);
2689 flow->rule = ethertype_filter_ptr;
2690 flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2696 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2697 ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2698 actions, &syn_filter, error);
2700 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
2702 syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
2703 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
2704 (void)rte_memcpy(&syn_filter_ptr->filter_info,
2706 sizeof(struct rte_eth_syn_filter));
2707 TAILQ_INSERT_TAIL(&filter_syn_list,
2710 flow->rule = syn_filter_ptr;
2711 flow->filter_type = RTE_ETH_FILTER_SYN;
2717 memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2718 ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2719 actions, &fdir_rule, error);
2721 /* A mask cannot be deleted. */
2722 if (fdir_rule.b_mask) {
2723 if (!fdir_info->mask_added) {
2724 /* It's the first time the mask is set. */
2725 rte_memcpy(&fdir_info->mask,
2727 sizeof(struct ixgbe_hw_fdir_mask));
2728 fdir_info->flex_bytes_offset =
2729 fdir_rule.flex_bytes_offset;
2731 if (fdir_rule.mask.flex_bytes_mask)
2732 ixgbe_fdir_set_flexbytes_offset(dev,
2733 fdir_rule.flex_bytes_offset);
2735 ret = ixgbe_fdir_set_input_mask(dev);
2739 fdir_info->mask_added = TRUE;
2742 * Only support one global mask,
2743 * all the masks should be the same.
2745 ret = memcmp(&fdir_info->mask,
2747 sizeof(struct ixgbe_hw_fdir_mask));
2751 if (fdir_info->flex_bytes_offset !=
2752 fdir_rule.flex_bytes_offset)
2757 if (fdir_rule.b_spec) {
2758 ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
2761 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
2762 sizeof(struct ixgbe_fdir_rule_ele), 0);
2763 (void)rte_memcpy(&fdir_rule_ptr->filter_info,
2765 sizeof(struct ixgbe_fdir_rule));
2766 TAILQ_INSERT_TAIL(&filter_fdir_list,
2767 fdir_rule_ptr, entries);
2768 flow->rule = fdir_rule_ptr;
2769 flow->filter_type = RTE_ETH_FILTER_FDIR;
2781 memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2782 ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2783 actions, &l2_tn_filter, error);
2785 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
2787 l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
2788 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
2789 (void)rte_memcpy(&l2_tn_filter_ptr->filter_info,
2791 sizeof(struct rte_eth_l2_tunnel_conf));
2792 TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
2793 l2_tn_filter_ptr, entries);
2794 flow->rule = l2_tn_filter_ptr;
2795 flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
2801 TAILQ_REMOVE(&ixgbe_flow_list,
2802 ixgbe_flow_mem_ptr, entries);
2803 rte_flow_error_set(error, -ret,
2804 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2805 "Failed to create flow.");
2806 rte_free(ixgbe_flow_mem_ptr);
2812 * Check if the flow rule is supported by ixgbe.
2813 * It only checkes the format. Don't guarantee the rule can be programmed into
2814 * the HW. Because there can be no enough room for the rule.
2817 ixgbe_flow_validate(struct rte_eth_dev *dev,
2818 const struct rte_flow_attr *attr,
2819 const struct rte_flow_item pattern[],
2820 const struct rte_flow_action actions[],
2821 struct rte_flow_error *error)
2823 struct rte_eth_ntuple_filter ntuple_filter;
2824 struct rte_eth_ethertype_filter ethertype_filter;
2825 struct rte_eth_syn_filter syn_filter;
2826 struct rte_eth_l2_tunnel_conf l2_tn_filter;
2827 struct ixgbe_fdir_rule fdir_rule;
2830 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2831 ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2832 actions, &ntuple_filter, error);
2836 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2837 ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2838 actions, ðertype_filter, error);
2842 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2843 ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2844 actions, &syn_filter, error);
2848 memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2849 ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2850 actions, &fdir_rule, error);
2854 memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2855 ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2856 actions, &l2_tn_filter, error);
2861 /* Destroy a flow rule on ixgbe. */
2863 ixgbe_flow_destroy(struct rte_eth_dev *dev,
2864 struct rte_flow *flow,
2865 struct rte_flow_error *error)
2868 struct rte_flow *pmd_flow = flow;
2869 enum rte_filter_type filter_type = pmd_flow->filter_type;
2870 struct rte_eth_ntuple_filter ntuple_filter;
2871 struct rte_eth_ethertype_filter ethertype_filter;
2872 struct rte_eth_syn_filter syn_filter;
2873 struct ixgbe_fdir_rule fdir_rule;
2874 struct rte_eth_l2_tunnel_conf l2_tn_filter;
2875 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2876 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2877 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2878 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2879 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2880 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2881 struct ixgbe_hw_fdir_info *fdir_info =
2882 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2884 switch (filter_type) {
2885 case RTE_ETH_FILTER_NTUPLE:
2886 ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
2888 (void)rte_memcpy(&ntuple_filter,
2889 &ntuple_filter_ptr->filter_info,
2890 sizeof(struct rte_eth_ntuple_filter));
2891 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
2893 TAILQ_REMOVE(&filter_ntuple_list,
2894 ntuple_filter_ptr, entries);
2895 rte_free(ntuple_filter_ptr);
2898 case RTE_ETH_FILTER_ETHERTYPE:
2899 ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
2901 (void)rte_memcpy(ðertype_filter,
2902 ðertype_filter_ptr->filter_info,
2903 sizeof(struct rte_eth_ethertype_filter));
2904 ret = ixgbe_add_del_ethertype_filter(dev,
2905 ðertype_filter, FALSE);
2907 TAILQ_REMOVE(&filter_ethertype_list,
2908 ethertype_filter_ptr, entries);
2909 rte_free(ethertype_filter_ptr);
2912 case RTE_ETH_FILTER_SYN:
2913 syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
2915 (void)rte_memcpy(&syn_filter,
2916 &syn_filter_ptr->filter_info,
2917 sizeof(struct rte_eth_syn_filter));
2918 ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
2920 TAILQ_REMOVE(&filter_syn_list,
2921 syn_filter_ptr, entries);
2922 rte_free(syn_filter_ptr);
2925 case RTE_ETH_FILTER_FDIR:
2926 fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
2927 (void)rte_memcpy(&fdir_rule,
2928 &fdir_rule_ptr->filter_info,
2929 sizeof(struct ixgbe_fdir_rule));
2930 ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
2932 TAILQ_REMOVE(&filter_fdir_list,
2933 fdir_rule_ptr, entries);
2934 rte_free(fdir_rule_ptr);
2935 if (TAILQ_EMPTY(&filter_fdir_list))
2936 fdir_info->mask_added = false;
2939 case RTE_ETH_FILTER_L2_TUNNEL:
2940 l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
2942 (void)rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
2943 sizeof(struct rte_eth_l2_tunnel_conf));
2944 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
2946 TAILQ_REMOVE(&filter_l2_tunnel_list,
2947 l2_tn_filter_ptr, entries);
2948 rte_free(l2_tn_filter_ptr);
2952 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
2959 rte_flow_error_set(error, EINVAL,
2960 RTE_FLOW_ERROR_TYPE_HANDLE,
2961 NULL, "Failed to destroy flow");
2965 TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
2966 if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
2967 TAILQ_REMOVE(&ixgbe_flow_list,
2968 ixgbe_flow_mem_ptr, entries);
2969 rte_free(ixgbe_flow_mem_ptr);
2977 /* Destroy all flow rules associated with a port on ixgbe. */
2979 ixgbe_flow_flush(struct rte_eth_dev *dev,
2980 struct rte_flow_error *error)
2984 ixgbe_clear_all_ntuple_filter(dev);
2985 ixgbe_clear_all_ethertype_filter(dev);
2986 ixgbe_clear_syn_filter(dev);
2988 ret = ixgbe_clear_all_fdir_filter(dev);
2990 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
2991 NULL, "Failed to flush rule");
2995 ret = ixgbe_clear_all_l2_tn_filter(dev);
2997 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
2998 NULL, "Failed to flush rule");
3002 ixgbe_filterlist_flush();
3007 const struct rte_flow_ops ixgbe_flow_ops = {
3008 .validate = ixgbe_flow_validate,
3009 .create = ixgbe_flow_create,
3010 .destroy = ixgbe_flow_destroy,
3011 .flush = ixgbe_flow_flush,