4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
47 #include <rte_interrupts.h>
49 #include <rte_debug.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_malloc.h>
60 #include <rte_random.h>
62 #include <rte_hash_crc.h>
64 #include <rte_flow_driver.h>
66 #include "ixgbe_logs.h"
67 #include "base/ixgbe_api.h"
68 #include "base/ixgbe_vf.h"
69 #include "base/ixgbe_common.h"
70 #include "ixgbe_ethdev.h"
71 #include "ixgbe_bypass.h"
72 #include "ixgbe_rxtx.h"
73 #include "base/ixgbe_type.h"
74 #include "base/ixgbe_phy.h"
75 #include "rte_pmd_ixgbe.h"
78 #define IXGBE_MIN_N_TUPLE_PRIO 1
79 #define IXGBE_MAX_N_TUPLE_PRIO 7
80 #define IXGBE_MAX_FLX_SOURCE_OFF 62
83 * Endless loop will never happen with below assumption
84 * 1. there is at least one no-void item(END)
85 * 2. cur is before END.
88 const struct rte_flow_item *next_no_void_pattern(
89 const struct rte_flow_item pattern[],
90 const struct rte_flow_item *cur)
92 const struct rte_flow_item *next =
93 cur ? cur + 1 : &pattern[0];
95 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
102 const struct rte_flow_action *next_no_void_action(
103 const struct rte_flow_action actions[],
104 const struct rte_flow_action *cur)
106 const struct rte_flow_action *next =
107 cur ? cur + 1 : &actions[0];
109 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
116 * Please aware there's an asumption for all the parsers.
117 * rte_flow_item is using big endian, rte_flow_attr and
118 * rte_flow_action are using CPU order.
119 * Because the pattern is used to describe the packets,
120 * normally the packets should use network order.
124 * Parse the rule to see if it is a n-tuple rule.
125 * And get the n-tuple filter info BTW.
127 * The first not void item can be ETH or IPV4.
128 * The second not void item must be IPV4 if the first one is ETH.
129 * The third not void item must be UDP or TCP.
130 * The next not void item must be END.
132 * The first not void action should be QUEUE.
133 * The next not void action should be END.
137 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
138 * dst_addr 192.167.3.50 0xFFFFFFFF
139 * next_proto_id 17 0xFF
140 * UDP/TCP/ src_port 80 0xFFFF
141 * SCTP dst_port 80 0xFFFF
143 * other members in mask and spec should set to 0x00.
144 * item->last should be NULL.
147 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
148 const struct rte_flow_item pattern[],
149 const struct rte_flow_action actions[],
150 struct rte_eth_ntuple_filter *filter,
151 struct rte_flow_error *error)
153 const struct rte_flow_item *item;
154 const struct rte_flow_action *act;
155 const struct rte_flow_item_ipv4 *ipv4_spec;
156 const struct rte_flow_item_ipv4 *ipv4_mask;
157 const struct rte_flow_item_tcp *tcp_spec;
158 const struct rte_flow_item_tcp *tcp_mask;
159 const struct rte_flow_item_udp *udp_spec;
160 const struct rte_flow_item_udp *udp_mask;
161 const struct rte_flow_item_sctp *sctp_spec;
162 const struct rte_flow_item_sctp *sctp_mask;
165 rte_flow_error_set(error,
166 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
167 NULL, "NULL pattern.");
172 rte_flow_error_set(error, EINVAL,
173 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
174 NULL, "NULL action.");
178 rte_flow_error_set(error, EINVAL,
179 RTE_FLOW_ERROR_TYPE_ATTR,
180 NULL, "NULL attribute.");
184 /* the first not void item can be MAC or IPv4 */
185 item = next_no_void_pattern(pattern, NULL);
187 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
188 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
189 rte_flow_error_set(error, EINVAL,
190 RTE_FLOW_ERROR_TYPE_ITEM,
191 item, "Not supported by ntuple filter");
195 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
196 /*Not supported last point for range*/
198 rte_flow_error_set(error,
200 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
201 item, "Not supported last point for range");
205 /* if the first item is MAC, the content should be NULL */
206 if (item->spec || item->mask) {
207 rte_flow_error_set(error, EINVAL,
208 RTE_FLOW_ERROR_TYPE_ITEM,
209 item, "Not supported by ntuple filter");
212 /* check if the next not void item is IPv4 */
213 item = next_no_void_pattern(pattern, item);
214 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
215 rte_flow_error_set(error,
216 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
217 item, "Not supported by ntuple filter");
222 /* get the IPv4 info */
223 if (!item->spec || !item->mask) {
224 rte_flow_error_set(error, EINVAL,
225 RTE_FLOW_ERROR_TYPE_ITEM,
226 item, "Invalid ntuple mask");
229 /*Not supported last point for range*/
231 rte_flow_error_set(error, EINVAL,
232 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
233 item, "Not supported last point for range");
238 ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
240 * Only support src & dst addresses, protocol,
241 * others should be masked.
243 if (ipv4_mask->hdr.version_ihl ||
244 ipv4_mask->hdr.type_of_service ||
245 ipv4_mask->hdr.total_length ||
246 ipv4_mask->hdr.packet_id ||
247 ipv4_mask->hdr.fragment_offset ||
248 ipv4_mask->hdr.time_to_live ||
249 ipv4_mask->hdr.hdr_checksum) {
250 rte_flow_error_set(error,
251 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
252 item, "Not supported by ntuple filter");
256 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
257 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
258 filter->proto_mask = ipv4_mask->hdr.next_proto_id;
260 ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
261 filter->dst_ip = ipv4_spec->hdr.dst_addr;
262 filter->src_ip = ipv4_spec->hdr.src_addr;
263 filter->proto = ipv4_spec->hdr.next_proto_id;
265 /* check if the next not void item is TCP or UDP */
266 item = next_no_void_pattern(pattern, item);
267 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
268 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
269 item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
270 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
271 rte_flow_error_set(error, EINVAL,
272 RTE_FLOW_ERROR_TYPE_ITEM,
273 item, "Not supported by ntuple filter");
277 /* get the TCP/UDP info */
278 if (!item->spec || !item->mask) {
279 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
280 rte_flow_error_set(error, EINVAL,
281 RTE_FLOW_ERROR_TYPE_ITEM,
282 item, "Invalid ntuple mask");
286 /*Not supported last point for range*/
288 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
289 rte_flow_error_set(error, EINVAL,
290 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
291 item, "Not supported last point for range");
296 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
297 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
300 * Only support src & dst ports, tcp flags,
301 * others should be masked.
303 if (tcp_mask->hdr.sent_seq ||
304 tcp_mask->hdr.recv_ack ||
305 tcp_mask->hdr.data_off ||
306 tcp_mask->hdr.rx_win ||
307 tcp_mask->hdr.cksum ||
308 tcp_mask->hdr.tcp_urp) {
310 sizeof(struct rte_eth_ntuple_filter));
311 rte_flow_error_set(error, EINVAL,
312 RTE_FLOW_ERROR_TYPE_ITEM,
313 item, "Not supported by ntuple filter");
317 filter->dst_port_mask = tcp_mask->hdr.dst_port;
318 filter->src_port_mask = tcp_mask->hdr.src_port;
319 if (tcp_mask->hdr.tcp_flags == 0xFF) {
320 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
321 } else if (!tcp_mask->hdr.tcp_flags) {
322 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
324 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
325 rte_flow_error_set(error, EINVAL,
326 RTE_FLOW_ERROR_TYPE_ITEM,
327 item, "Not supported by ntuple filter");
331 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
332 filter->dst_port = tcp_spec->hdr.dst_port;
333 filter->src_port = tcp_spec->hdr.src_port;
334 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
335 } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
336 udp_mask = (const struct rte_flow_item_udp *)item->mask;
339 * Only support src & dst ports,
340 * others should be masked.
342 if (udp_mask->hdr.dgram_len ||
343 udp_mask->hdr.dgram_cksum) {
345 sizeof(struct rte_eth_ntuple_filter));
346 rte_flow_error_set(error, EINVAL,
347 RTE_FLOW_ERROR_TYPE_ITEM,
348 item, "Not supported by ntuple filter");
352 filter->dst_port_mask = udp_mask->hdr.dst_port;
353 filter->src_port_mask = udp_mask->hdr.src_port;
355 udp_spec = (const struct rte_flow_item_udp *)item->spec;
356 filter->dst_port = udp_spec->hdr.dst_port;
357 filter->src_port = udp_spec->hdr.src_port;
359 sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
362 * Only support src & dst ports,
363 * others should be masked.
365 if (sctp_mask->hdr.tag ||
366 sctp_mask->hdr.cksum) {
368 sizeof(struct rte_eth_ntuple_filter));
369 rte_flow_error_set(error, EINVAL,
370 RTE_FLOW_ERROR_TYPE_ITEM,
371 item, "Not supported by ntuple filter");
375 filter->dst_port_mask = sctp_mask->hdr.dst_port;
376 filter->src_port_mask = sctp_mask->hdr.src_port;
378 sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
379 filter->dst_port = sctp_spec->hdr.dst_port;
380 filter->src_port = sctp_spec->hdr.src_port;
383 /* check if the next not void item is END */
384 item = next_no_void_pattern(pattern, item);
385 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
386 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
387 rte_flow_error_set(error, EINVAL,
388 RTE_FLOW_ERROR_TYPE_ITEM,
389 item, "Not supported by ntuple filter");
394 * n-tuple only supports forwarding,
395 * check if the first not void action is QUEUE.
397 act = next_no_void_action(actions, NULL);
398 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
399 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
400 rte_flow_error_set(error, EINVAL,
401 RTE_FLOW_ERROR_TYPE_ACTION,
402 item, "Not supported action.");
406 ((const struct rte_flow_action_queue *)act->conf)->index;
408 /* check if the next not void item is END */
409 act = next_no_void_action(actions, act);
410 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
411 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
412 rte_flow_error_set(error, EINVAL,
413 RTE_FLOW_ERROR_TYPE_ACTION,
414 act, "Not supported action.");
419 /* must be input direction */
420 if (!attr->ingress) {
421 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
422 rte_flow_error_set(error, EINVAL,
423 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
424 attr, "Only support ingress.");
430 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
431 rte_flow_error_set(error, EINVAL,
432 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
433 attr, "Not support egress.");
437 if (attr->priority > 0xFFFF) {
438 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
439 rte_flow_error_set(error, EINVAL,
440 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
441 attr, "Error priority.");
444 filter->priority = (uint16_t)attr->priority;
445 if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
446 attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
447 filter->priority = 1;
452 /* a specific function for ixgbe because the flags is specific */
454 ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
455 const struct rte_flow_attr *attr,
456 const struct rte_flow_item pattern[],
457 const struct rte_flow_action actions[],
458 struct rte_eth_ntuple_filter *filter,
459 struct rte_flow_error *error)
462 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
464 MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
466 ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
471 /* Ixgbe doesn't support tcp flags. */
472 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
473 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
474 rte_flow_error_set(error, EINVAL,
475 RTE_FLOW_ERROR_TYPE_ITEM,
476 NULL, "Not supported by ntuple filter");
480 /* Ixgbe doesn't support many priorities. */
481 if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
482 filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
483 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
484 rte_flow_error_set(error, EINVAL,
485 RTE_FLOW_ERROR_TYPE_ITEM,
486 NULL, "Priority not supported by ntuple filter");
490 if (filter->queue >= dev->data->nb_rx_queues)
493 /* fixed value for ixgbe */
494 filter->flags = RTE_5TUPLE_FLAGS;
499 * Parse the rule to see if it is a ethertype rule.
500 * And get the ethertype filter info BTW.
502 * The first not void item can be ETH.
503 * The next not void item must be END.
505 * The first not void action should be QUEUE.
506 * The next not void action should be END.
509 * ETH type 0x0807 0xFFFF
511 * other members in mask and spec should set to 0x00.
512 * item->last should be NULL.
515 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
516 const struct rte_flow_item *pattern,
517 const struct rte_flow_action *actions,
518 struct rte_eth_ethertype_filter *filter,
519 struct rte_flow_error *error)
521 const struct rte_flow_item *item;
522 const struct rte_flow_action *act;
523 const struct rte_flow_item_eth *eth_spec;
524 const struct rte_flow_item_eth *eth_mask;
525 const struct rte_flow_action_queue *act_q;
528 rte_flow_error_set(error, EINVAL,
529 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
530 NULL, "NULL pattern.");
535 rte_flow_error_set(error, EINVAL,
536 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
537 NULL, "NULL action.");
542 rte_flow_error_set(error, EINVAL,
543 RTE_FLOW_ERROR_TYPE_ATTR,
544 NULL, "NULL attribute.");
548 item = next_no_void_pattern(pattern, NULL);
549 /* The first non-void item should be MAC. */
550 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
551 rte_flow_error_set(error, EINVAL,
552 RTE_FLOW_ERROR_TYPE_ITEM,
553 item, "Not supported by ethertype filter");
557 /*Not supported last point for range*/
559 rte_flow_error_set(error, EINVAL,
560 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
561 item, "Not supported last point for range");
565 /* Get the MAC info. */
566 if (!item->spec || !item->mask) {
567 rte_flow_error_set(error, EINVAL,
568 RTE_FLOW_ERROR_TYPE_ITEM,
569 item, "Not supported by ethertype filter");
573 eth_spec = (const struct rte_flow_item_eth *)item->spec;
574 eth_mask = (const struct rte_flow_item_eth *)item->mask;
576 /* Mask bits of source MAC address must be full of 0.
577 * Mask bits of destination MAC address must be full
580 if (!is_zero_ether_addr(ð_mask->src) ||
581 (!is_zero_ether_addr(ð_mask->dst) &&
582 !is_broadcast_ether_addr(ð_mask->dst))) {
583 rte_flow_error_set(error, EINVAL,
584 RTE_FLOW_ERROR_TYPE_ITEM,
585 item, "Invalid ether address mask");
589 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
590 rte_flow_error_set(error, EINVAL,
591 RTE_FLOW_ERROR_TYPE_ITEM,
592 item, "Invalid ethertype mask");
596 /* If mask bits of destination MAC address
597 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
599 if (is_broadcast_ether_addr(ð_mask->dst)) {
600 filter->mac_addr = eth_spec->dst;
601 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
603 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
605 filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
607 /* Check if the next non-void item is END. */
608 item = next_no_void_pattern(pattern, item);
609 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
610 rte_flow_error_set(error, EINVAL,
611 RTE_FLOW_ERROR_TYPE_ITEM,
612 item, "Not supported by ethertype filter.");
618 act = next_no_void_action(actions, NULL);
619 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
620 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
621 rte_flow_error_set(error, EINVAL,
622 RTE_FLOW_ERROR_TYPE_ACTION,
623 act, "Not supported action.");
627 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
628 act_q = (const struct rte_flow_action_queue *)act->conf;
629 filter->queue = act_q->index;
631 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
634 /* Check if the next non-void item is END */
635 act = next_no_void_action(actions, act);
636 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
637 rte_flow_error_set(error, EINVAL,
638 RTE_FLOW_ERROR_TYPE_ACTION,
639 act, "Not supported action.");
644 /* Must be input direction */
645 if (!attr->ingress) {
646 rte_flow_error_set(error, EINVAL,
647 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
648 attr, "Only support ingress.");
654 rte_flow_error_set(error, EINVAL,
655 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
656 attr, "Not support egress.");
661 if (attr->priority) {
662 rte_flow_error_set(error, EINVAL,
663 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
664 attr, "Not support priority.");
670 rte_flow_error_set(error, EINVAL,
671 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
672 attr, "Not support group.");
680 ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
681 const struct rte_flow_attr *attr,
682 const struct rte_flow_item pattern[],
683 const struct rte_flow_action actions[],
684 struct rte_eth_ethertype_filter *filter,
685 struct rte_flow_error *error)
688 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
690 MAC_TYPE_FILTER_SUP(hw->mac.type);
692 ret = cons_parse_ethertype_filter(attr, pattern,
693 actions, filter, error);
698 /* Ixgbe doesn't support MAC address. */
699 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
700 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
701 rte_flow_error_set(error, EINVAL,
702 RTE_FLOW_ERROR_TYPE_ITEM,
703 NULL, "Not supported by ethertype filter");
707 if (filter->queue >= dev->data->nb_rx_queues) {
708 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
709 rte_flow_error_set(error, EINVAL,
710 RTE_FLOW_ERROR_TYPE_ITEM,
711 NULL, "queue index much too big");
715 if (filter->ether_type == ETHER_TYPE_IPv4 ||
716 filter->ether_type == ETHER_TYPE_IPv6) {
717 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
718 rte_flow_error_set(error, EINVAL,
719 RTE_FLOW_ERROR_TYPE_ITEM,
720 NULL, "IPv4/IPv6 not supported by ethertype filter");
724 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
725 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
726 rte_flow_error_set(error, EINVAL,
727 RTE_FLOW_ERROR_TYPE_ITEM,
728 NULL, "mac compare is unsupported");
732 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
733 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
734 rte_flow_error_set(error, EINVAL,
735 RTE_FLOW_ERROR_TYPE_ITEM,
736 NULL, "drop option is unsupported");
744 * Parse the rule to see if it is a TCP SYN rule.
745 * And get the TCP SYN filter info BTW.
747 * The first not void item must be ETH.
748 * The second not void item must be IPV4 or IPV6.
749 * The third not void item must be TCP.
750 * The next not void item must be END.
752 * The first not void action should be QUEUE.
753 * The next not void action should be END.
757 * IPV4/IPV6 NULL NULL
758 * TCP tcp_flags 0x02 0xFF
760 * other members in mask and spec should set to 0x00.
761 * item->last should be NULL.
764 cons_parse_syn_filter(const struct rte_flow_attr *attr,
765 const struct rte_flow_item pattern[],
766 const struct rte_flow_action actions[],
767 struct rte_eth_syn_filter *filter,
768 struct rte_flow_error *error)
770 const struct rte_flow_item *item;
771 const struct rte_flow_action *act;
772 const struct rte_flow_item_tcp *tcp_spec;
773 const struct rte_flow_item_tcp *tcp_mask;
774 const struct rte_flow_action_queue *act_q;
777 rte_flow_error_set(error, EINVAL,
778 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
779 NULL, "NULL pattern.");
784 rte_flow_error_set(error, EINVAL,
785 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
786 NULL, "NULL action.");
791 rte_flow_error_set(error, EINVAL,
792 RTE_FLOW_ERROR_TYPE_ATTR,
793 NULL, "NULL attribute.");
798 /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
799 item = next_no_void_pattern(pattern, NULL);
800 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
801 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
802 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
803 item->type != RTE_FLOW_ITEM_TYPE_TCP) {
804 rte_flow_error_set(error, EINVAL,
805 RTE_FLOW_ERROR_TYPE_ITEM,
806 item, "Not supported by syn filter");
809 /*Not supported last point for range*/
811 rte_flow_error_set(error, EINVAL,
812 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
813 item, "Not supported last point for range");
818 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
819 /* if the item is MAC, the content should be NULL */
820 if (item->spec || item->mask) {
821 rte_flow_error_set(error, EINVAL,
822 RTE_FLOW_ERROR_TYPE_ITEM,
823 item, "Invalid SYN address mask");
827 /* check if the next not void item is IPv4 or IPv6 */
828 item = next_no_void_pattern(pattern, item);
829 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
830 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
831 rte_flow_error_set(error, EINVAL,
832 RTE_FLOW_ERROR_TYPE_ITEM,
833 item, "Not supported by syn filter");
839 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
840 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
841 /* if the item is IP, the content should be NULL */
842 if (item->spec || item->mask) {
843 rte_flow_error_set(error, EINVAL,
844 RTE_FLOW_ERROR_TYPE_ITEM,
845 item, "Invalid SYN mask");
849 /* check if the next not void item is TCP */
850 item = next_no_void_pattern(pattern, item);
851 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
852 rte_flow_error_set(error, EINVAL,
853 RTE_FLOW_ERROR_TYPE_ITEM,
854 item, "Not supported by syn filter");
859 /* Get the TCP info. Only support SYN. */
860 if (!item->spec || !item->mask) {
861 rte_flow_error_set(error, EINVAL,
862 RTE_FLOW_ERROR_TYPE_ITEM,
863 item, "Invalid SYN mask");
866 /*Not supported last point for range*/
868 rte_flow_error_set(error, EINVAL,
869 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
870 item, "Not supported last point for range");
874 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
875 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
876 if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
877 tcp_mask->hdr.src_port ||
878 tcp_mask->hdr.dst_port ||
879 tcp_mask->hdr.sent_seq ||
880 tcp_mask->hdr.recv_ack ||
881 tcp_mask->hdr.data_off ||
882 tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
883 tcp_mask->hdr.rx_win ||
884 tcp_mask->hdr.cksum ||
885 tcp_mask->hdr.tcp_urp) {
886 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
887 rte_flow_error_set(error, EINVAL,
888 RTE_FLOW_ERROR_TYPE_ITEM,
889 item, "Not supported by syn filter");
893 /* check if the next not void item is END */
894 item = next_no_void_pattern(pattern, item);
895 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
896 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
897 rte_flow_error_set(error, EINVAL,
898 RTE_FLOW_ERROR_TYPE_ITEM,
899 item, "Not supported by syn filter");
903 /* check if the first not void action is QUEUE. */
904 act = next_no_void_action(actions, NULL);
905 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
906 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
907 rte_flow_error_set(error, EINVAL,
908 RTE_FLOW_ERROR_TYPE_ACTION,
909 act, "Not supported action.");
913 act_q = (const struct rte_flow_action_queue *)act->conf;
914 filter->queue = act_q->index;
915 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
916 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
917 rte_flow_error_set(error, EINVAL,
918 RTE_FLOW_ERROR_TYPE_ACTION,
919 act, "Not supported action.");
923 /* check if the next not void item is END */
924 act = next_no_void_action(actions, act);
925 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
926 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
927 rte_flow_error_set(error, EINVAL,
928 RTE_FLOW_ERROR_TYPE_ACTION,
929 act, "Not supported action.");
934 /* must be input direction */
935 if (!attr->ingress) {
936 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
937 rte_flow_error_set(error, EINVAL,
938 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
939 attr, "Only support ingress.");
945 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
946 rte_flow_error_set(error, EINVAL,
947 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
948 attr, "Not support egress.");
952 /* Support 2 priorities, the lowest or highest. */
953 if (!attr->priority) {
955 } else if (attr->priority == (uint32_t)~0U) {
958 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
959 rte_flow_error_set(error, EINVAL,
960 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
961 attr, "Not support priority.");
969 ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
970 const struct rte_flow_attr *attr,
971 const struct rte_flow_item pattern[],
972 const struct rte_flow_action actions[],
973 struct rte_eth_syn_filter *filter,
974 struct rte_flow_error *error)
977 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
979 MAC_TYPE_FILTER_SUP(hw->mac.type);
981 ret = cons_parse_syn_filter(attr, pattern,
982 actions, filter, error);
984 if (filter->queue >= dev->data->nb_rx_queues)
994 * Parse the rule to see if it is a L2 tunnel rule.
995 * And get the L2 tunnel filter info BTW.
996 * Only support E-tag now.
998 * The first not void item can be E_TAG.
999 * The next not void item must be END.
1001 * The first not void action should be QUEUE.
1002 * The next not void action should be END.
1006 e_cid_base 0x309 0xFFF
1008 * other members in mask and spec should set to 0x00.
1009 * item->last should be NULL.
1012 cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
1013 const struct rte_flow_item pattern[],
1014 const struct rte_flow_action actions[],
1015 struct rte_eth_l2_tunnel_conf *filter,
1016 struct rte_flow_error *error)
1018 const struct rte_flow_item *item;
1019 const struct rte_flow_item_e_tag *e_tag_spec;
1020 const struct rte_flow_item_e_tag *e_tag_mask;
1021 const struct rte_flow_action *act;
1022 const struct rte_flow_action_queue *act_q;
1025 rte_flow_error_set(error, EINVAL,
1026 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1027 NULL, "NULL pattern.");
1032 rte_flow_error_set(error, EINVAL,
1033 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1034 NULL, "NULL action.");
1039 rte_flow_error_set(error, EINVAL,
1040 RTE_FLOW_ERROR_TYPE_ATTR,
1041 NULL, "NULL attribute.");
1045 /* The first not void item should be e-tag. */
1046 item = next_no_void_pattern(pattern, NULL);
1047 if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1048 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1049 rte_flow_error_set(error, EINVAL,
1050 RTE_FLOW_ERROR_TYPE_ITEM,
1051 item, "Not supported by L2 tunnel filter");
1055 if (!item->spec || !item->mask) {
1056 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1057 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1058 item, "Not supported by L2 tunnel filter");
1062 /*Not supported last point for range*/
1064 rte_flow_error_set(error, EINVAL,
1065 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1066 item, "Not supported last point for range");
1070 e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
1071 e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
1073 /* Only care about GRP and E cid base. */
1074 if (e_tag_mask->epcp_edei_in_ecid_b ||
1075 e_tag_mask->in_ecid_e ||
1076 e_tag_mask->ecid_e ||
1077 e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1078 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1079 rte_flow_error_set(error, EINVAL,
1080 RTE_FLOW_ERROR_TYPE_ITEM,
1081 item, "Not supported by L2 tunnel filter");
1085 filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1087 * grp and e_cid_base are bit fields and only use 14 bits.
1088 * e-tag id is taken as little endian by HW.
1090 filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1092 /* check if the next not void item is END */
1093 item = next_no_void_pattern(pattern, item);
1094 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1095 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1096 rte_flow_error_set(error, EINVAL,
1097 RTE_FLOW_ERROR_TYPE_ITEM,
1098 item, "Not supported by L2 tunnel filter");
1103 /* must be input direction */
1104 if (!attr->ingress) {
1105 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1106 rte_flow_error_set(error, EINVAL,
1107 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1108 attr, "Only support ingress.");
1114 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1115 rte_flow_error_set(error, EINVAL,
1116 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1117 attr, "Not support egress.");
1122 if (attr->priority) {
1123 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1124 rte_flow_error_set(error, EINVAL,
1125 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1126 attr, "Not support priority.");
1130 /* check if the first not void action is QUEUE. */
1131 act = next_no_void_action(actions, NULL);
1132 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1133 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1134 rte_flow_error_set(error, EINVAL,
1135 RTE_FLOW_ERROR_TYPE_ACTION,
1136 act, "Not supported action.");
1140 act_q = (const struct rte_flow_action_queue *)act->conf;
1141 filter->pool = act_q->index;
1143 /* check if the next not void item is END */
1144 act = next_no_void_action(actions, act);
1145 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1146 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1147 rte_flow_error_set(error, EINVAL,
1148 RTE_FLOW_ERROR_TYPE_ACTION,
1149 act, "Not supported action.");
1157 ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1158 const struct rte_flow_attr *attr,
1159 const struct rte_flow_item pattern[],
1160 const struct rte_flow_action actions[],
1161 struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1162 struct rte_flow_error *error)
1165 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1167 ret = cons_parse_l2_tn_filter(attr, pattern,
1168 actions, l2_tn_filter, error);
1170 if (hw->mac.type != ixgbe_mac_X550 &&
1171 hw->mac.type != ixgbe_mac_X550EM_x &&
1172 hw->mac.type != ixgbe_mac_X550EM_a) {
1173 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1174 rte_flow_error_set(error, EINVAL,
1175 RTE_FLOW_ERROR_TYPE_ITEM,
1176 NULL, "Not supported by L2 tunnel filter");
1180 if (l2_tn_filter->pool >= dev->data->nb_rx_queues)
1186 /* Parse to get the attr and action info of flow director rule. */
1188 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1189 const struct rte_flow_action actions[],
1190 struct ixgbe_fdir_rule *rule,
1191 struct rte_flow_error *error)
1193 const struct rte_flow_action *act;
1194 const struct rte_flow_action_queue *act_q;
1195 const struct rte_flow_action_mark *mark;
1198 /* must be input direction */
1199 if (!attr->ingress) {
1200 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1201 rte_flow_error_set(error, EINVAL,
1202 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1203 attr, "Only support ingress.");
1209 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1210 rte_flow_error_set(error, EINVAL,
1211 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1212 attr, "Not support egress.");
1217 if (attr->priority) {
1218 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1219 rte_flow_error_set(error, EINVAL,
1220 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1221 attr, "Not support priority.");
1225 /* check if the first not void action is QUEUE or DROP. */
1226 act = next_no_void_action(actions, NULL);
1227 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1228 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1229 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1230 rte_flow_error_set(error, EINVAL,
1231 RTE_FLOW_ERROR_TYPE_ACTION,
1232 act, "Not supported action.");
1236 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1237 act_q = (const struct rte_flow_action_queue *)act->conf;
1238 rule->queue = act_q->index;
1240 /* signature mode does not support drop action. */
1241 if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1242 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1243 rte_flow_error_set(error, EINVAL,
1244 RTE_FLOW_ERROR_TYPE_ACTION,
1245 act, "Not supported action.");
1248 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1251 /* check if the next not void item is MARK */
1252 act = next_no_void_action(actions, act);
1253 if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1254 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1255 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1256 rte_flow_error_set(error, EINVAL,
1257 RTE_FLOW_ERROR_TYPE_ACTION,
1258 act, "Not supported action.");
1264 if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1265 mark = (const struct rte_flow_action_mark *)act->conf;
1266 rule->soft_id = mark->id;
1267 act = next_no_void_action(actions, act);
1270 /* check if the next not void item is END */
1271 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1272 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1273 rte_flow_error_set(error, EINVAL,
1274 RTE_FLOW_ERROR_TYPE_ACTION,
1275 act, "Not supported action.");
1282 /* search next no void pattern and skip fuzzy */
1284 const struct rte_flow_item *next_no_fuzzy_pattern(
1285 const struct rte_flow_item pattern[],
1286 const struct rte_flow_item *cur)
1288 const struct rte_flow_item *next =
1289 next_no_void_pattern(pattern, cur);
1291 if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
1293 next = next_no_void_pattern(pattern, next);
1297 static inline uint8_t signature_match(const struct rte_flow_item pattern[])
1299 const struct rte_flow_item_fuzzy *spec, *last, *mask;
1300 const struct rte_flow_item *item;
1301 uint32_t sh, lh, mh;
1306 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1309 if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
1311 (const struct rte_flow_item_fuzzy *)item->spec;
1313 (const struct rte_flow_item_fuzzy *)item->last;
1315 (const struct rte_flow_item_fuzzy *)item->mask;
1344 * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1345 * And get the flow director filter info BTW.
1346 * UDP/TCP/SCTP PATTERN:
1347 * The first not void item can be ETH or IPV4 or IPV6
1348 * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1349 * The next not void item could be UDP or TCP or SCTP (optional)
1350 * The next not void item could be RAW (for flexbyte, optional)
1351 * The next not void item must be END.
1352 * A Fuzzy Match pattern can appear at any place before END.
1353 * Fuzzy Match is optional for IPV4 but is required for IPV6
1355 * The first not void item must be ETH.
1356 * The second not void item must be MAC VLAN.
1357 * The next not void item must be END.
1359 * The first not void action should be QUEUE or DROP.
1360 * The second not void optional action should be MARK,
1361 * mark_id is a uint32_t number.
1362 * The next not void action should be END.
1363 * UDP/TCP/SCTP pattern example:
1366 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
1367 * dst_addr 192.167.3.50 0xFFFFFFFF
1368 * UDP/TCP/SCTP src_port 80 0xFFFF
1369 * dst_port 80 0xFFFF
1370 * FLEX relative 0 0x1
1373 * offset 12 0xFFFFFFFF
1376 * pattern[0] 0x86 0xFF
1377 * pattern[1] 0xDD 0xFF
1379 * MAC VLAN pattern example:
1382 {0xAC, 0x7B, 0xA1, {0xFF, 0xFF, 0xFF,
1383 0x2C, 0x6D, 0x36} 0xFF, 0xFF, 0xFF}
1384 * MAC VLAN tci 0x2016 0xEFFF
1386 * Other members in mask and spec should set to 0x00.
1387 * Item->last should be NULL.
1390 ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
1391 const struct rte_flow_attr *attr,
1392 const struct rte_flow_item pattern[],
1393 const struct rte_flow_action actions[],
1394 struct ixgbe_fdir_rule *rule,
1395 struct rte_flow_error *error)
1397 const struct rte_flow_item *item;
1398 const struct rte_flow_item_eth *eth_spec;
1399 const struct rte_flow_item_eth *eth_mask;
1400 const struct rte_flow_item_ipv4 *ipv4_spec;
1401 const struct rte_flow_item_ipv4 *ipv4_mask;
1402 const struct rte_flow_item_ipv6 *ipv6_spec;
1403 const struct rte_flow_item_ipv6 *ipv6_mask;
1404 const struct rte_flow_item_tcp *tcp_spec;
1405 const struct rte_flow_item_tcp *tcp_mask;
1406 const struct rte_flow_item_udp *udp_spec;
1407 const struct rte_flow_item_udp *udp_mask;
1408 const struct rte_flow_item_sctp *sctp_spec;
1409 const struct rte_flow_item_sctp *sctp_mask;
1410 const struct rte_flow_item_vlan *vlan_spec;
1411 const struct rte_flow_item_vlan *vlan_mask;
1412 const struct rte_flow_item_raw *raw_mask;
1413 const struct rte_flow_item_raw *raw_spec;
1416 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1419 rte_flow_error_set(error, EINVAL,
1420 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1421 NULL, "NULL pattern.");
1426 rte_flow_error_set(error, EINVAL,
1427 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1428 NULL, "NULL action.");
1433 rte_flow_error_set(error, EINVAL,
1434 RTE_FLOW_ERROR_TYPE_ATTR,
1435 NULL, "NULL attribute.");
1440 * Some fields may not be provided. Set spec to 0 and mask to default
1441 * value. So, we need not do anything for the not provided fields later.
1443 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1444 memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1445 rule->mask.vlan_tci_mask = 0;
1446 rule->mask.flex_bytes_mask = 0;
1449 * The first not void item should be
1450 * MAC or IPv4 or TCP or UDP or SCTP.
1452 item = next_no_fuzzy_pattern(pattern, NULL);
1453 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1454 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1455 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1456 item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1457 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1458 item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1459 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1460 rte_flow_error_set(error, EINVAL,
1461 RTE_FLOW_ERROR_TYPE_ITEM,
1462 item, "Not supported by fdir filter");
1466 if (signature_match(pattern))
1467 rule->mode = RTE_FDIR_MODE_SIGNATURE;
1469 rule->mode = RTE_FDIR_MODE_PERFECT;
1471 /*Not supported last point for range*/
1473 rte_flow_error_set(error, EINVAL,
1474 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1475 item, "Not supported last point for range");
1479 /* Get the MAC info. */
1480 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1482 * Only support vlan and dst MAC address,
1483 * others should be masked.
1485 if (item->spec && !item->mask) {
1486 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1487 rte_flow_error_set(error, EINVAL,
1488 RTE_FLOW_ERROR_TYPE_ITEM,
1489 item, "Not supported by fdir filter");
1494 rule->b_spec = TRUE;
1495 eth_spec = (const struct rte_flow_item_eth *)item->spec;
1497 /* Get the dst MAC. */
1498 for (j = 0; j < ETHER_ADDR_LEN; j++) {
1499 rule->ixgbe_fdir.formatted.inner_mac[j] =
1500 eth_spec->dst.addr_bytes[j];
1507 rule->b_mask = TRUE;
1508 eth_mask = (const struct rte_flow_item_eth *)item->mask;
1510 /* Ether type should be masked. */
1511 if (eth_mask->type ||
1512 rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1513 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1514 rte_flow_error_set(error, EINVAL,
1515 RTE_FLOW_ERROR_TYPE_ITEM,
1516 item, "Not supported by fdir filter");
1520 /* If ethernet has meaning, it means MAC VLAN mode. */
1521 rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1524 * src MAC address must be masked,
1525 * and don't support dst MAC address mask.
1527 for (j = 0; j < ETHER_ADDR_LEN; j++) {
1528 if (eth_mask->src.addr_bytes[j] ||
1529 eth_mask->dst.addr_bytes[j] != 0xFF) {
1531 sizeof(struct ixgbe_fdir_rule));
1532 rte_flow_error_set(error, EINVAL,
1533 RTE_FLOW_ERROR_TYPE_ITEM,
1534 item, "Not supported by fdir filter");
1539 /* When no VLAN, considered as full mask. */
1540 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1542 /*** If both spec and mask are item,
1543 * it means don't care about ETH.
1548 * Check if the next not void item is vlan or ipv4.
1549 * IPv6 is not supported.
1551 item = next_no_fuzzy_pattern(pattern, item);
1552 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1553 if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1554 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1555 rte_flow_error_set(error, EINVAL,
1556 RTE_FLOW_ERROR_TYPE_ITEM,
1557 item, "Not supported by fdir filter");
1561 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
1562 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1563 rte_flow_error_set(error, EINVAL,
1564 RTE_FLOW_ERROR_TYPE_ITEM,
1565 item, "Not supported by fdir filter");
1571 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1572 if (!(item->spec && item->mask)) {
1573 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1574 rte_flow_error_set(error, EINVAL,
1575 RTE_FLOW_ERROR_TYPE_ITEM,
1576 item, "Not supported by fdir filter");
1580 /*Not supported last point for range*/
1582 rte_flow_error_set(error, EINVAL,
1583 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1584 item, "Not supported last point for range");
1588 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
1589 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
1591 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1593 rule->mask.vlan_tci_mask = vlan_mask->tci;
1594 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
1595 /* More than one tags are not supported. */
1597 /* Next not void item must be END */
1598 item = next_no_fuzzy_pattern(pattern, item);
1599 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1600 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1601 rte_flow_error_set(error, EINVAL,
1602 RTE_FLOW_ERROR_TYPE_ITEM,
1603 item, "Not supported by fdir filter");
1608 /* Get the IPV4 info. */
1609 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1611 * Set the flow type even if there's no content
1612 * as we must have a flow type.
1614 rule->ixgbe_fdir.formatted.flow_type =
1615 IXGBE_ATR_FLOW_TYPE_IPV4;
1616 /*Not supported last point for range*/
1618 rte_flow_error_set(error, EINVAL,
1619 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1620 item, "Not supported last point for range");
1624 * Only care about src & dst addresses,
1625 * others should be masked.
1628 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1629 rte_flow_error_set(error, EINVAL,
1630 RTE_FLOW_ERROR_TYPE_ITEM,
1631 item, "Not supported by fdir filter");
1634 rule->b_mask = TRUE;
1636 (const struct rte_flow_item_ipv4 *)item->mask;
1637 if (ipv4_mask->hdr.version_ihl ||
1638 ipv4_mask->hdr.type_of_service ||
1639 ipv4_mask->hdr.total_length ||
1640 ipv4_mask->hdr.packet_id ||
1641 ipv4_mask->hdr.fragment_offset ||
1642 ipv4_mask->hdr.time_to_live ||
1643 ipv4_mask->hdr.next_proto_id ||
1644 ipv4_mask->hdr.hdr_checksum) {
1645 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1646 rte_flow_error_set(error, EINVAL,
1647 RTE_FLOW_ERROR_TYPE_ITEM,
1648 item, "Not supported by fdir filter");
1651 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1652 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1655 rule->b_spec = TRUE;
1657 (const struct rte_flow_item_ipv4 *)item->spec;
1658 rule->ixgbe_fdir.formatted.dst_ip[0] =
1659 ipv4_spec->hdr.dst_addr;
1660 rule->ixgbe_fdir.formatted.src_ip[0] =
1661 ipv4_spec->hdr.src_addr;
1665 * Check if the next not void item is
1666 * TCP or UDP or SCTP or END.
1668 item = next_no_fuzzy_pattern(pattern, item);
1669 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1670 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1671 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1672 item->type != RTE_FLOW_ITEM_TYPE_END &&
1673 item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1674 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1675 rte_flow_error_set(error, EINVAL,
1676 RTE_FLOW_ERROR_TYPE_ITEM,
1677 item, "Not supported by fdir filter");
1682 /* Get the IPV6 info. */
1683 if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1685 * Set the flow type even if there's no content
1686 * as we must have a flow type.
1688 rule->ixgbe_fdir.formatted.flow_type =
1689 IXGBE_ATR_FLOW_TYPE_IPV6;
1692 * 1. must signature match
1693 * 2. not support last
1694 * 3. mask must not null
1696 if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
1699 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1700 rte_flow_error_set(error, EINVAL,
1701 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1702 item, "Not supported last point for range");
1706 rule->b_mask = TRUE;
1708 (const struct rte_flow_item_ipv6 *)item->mask;
1709 if (ipv6_mask->hdr.vtc_flow ||
1710 ipv6_mask->hdr.payload_len ||
1711 ipv6_mask->hdr.proto ||
1712 ipv6_mask->hdr.hop_limits) {
1713 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1714 rte_flow_error_set(error, EINVAL,
1715 RTE_FLOW_ERROR_TYPE_ITEM,
1716 item, "Not supported by fdir filter");
1720 /* check src addr mask */
1721 for (j = 0; j < 16; j++) {
1722 if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
1723 rule->mask.src_ipv6_mask |= 1 << j;
1724 } else if (ipv6_mask->hdr.src_addr[j] != 0) {
1725 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1726 rte_flow_error_set(error, EINVAL,
1727 RTE_FLOW_ERROR_TYPE_ITEM,
1728 item, "Not supported by fdir filter");
1733 /* check dst addr mask */
1734 for (j = 0; j < 16; j++) {
1735 if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
1736 rule->mask.dst_ipv6_mask |= 1 << j;
1737 } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
1738 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1739 rte_flow_error_set(error, EINVAL,
1740 RTE_FLOW_ERROR_TYPE_ITEM,
1741 item, "Not supported by fdir filter");
1747 rule->b_spec = TRUE;
1749 (const struct rte_flow_item_ipv6 *)item->spec;
1750 rte_memcpy(rule->ixgbe_fdir.formatted.src_ip,
1751 ipv6_spec->hdr.src_addr, 16);
1752 rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip,
1753 ipv6_spec->hdr.dst_addr, 16);
1757 * Check if the next not void item is
1758 * TCP or UDP or SCTP or END.
1760 item = next_no_fuzzy_pattern(pattern, item);
1761 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1762 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1763 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1764 item->type != RTE_FLOW_ITEM_TYPE_END &&
1765 item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1766 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1767 rte_flow_error_set(error, EINVAL,
1768 RTE_FLOW_ERROR_TYPE_ITEM,
1769 item, "Not supported by fdir filter");
1774 /* Get the TCP info. */
1775 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1777 * Set the flow type even if there's no content
1778 * as we must have a flow type.
1780 rule->ixgbe_fdir.formatted.flow_type |=
1781 IXGBE_ATR_L4TYPE_TCP;
1782 /*Not supported last point for range*/
1784 rte_flow_error_set(error, EINVAL,
1785 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1786 item, "Not supported last point for range");
1790 * Only care about src & dst ports,
1791 * others should be masked.
1794 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1795 rte_flow_error_set(error, EINVAL,
1796 RTE_FLOW_ERROR_TYPE_ITEM,
1797 item, "Not supported by fdir filter");
1800 rule->b_mask = TRUE;
1801 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1802 if (tcp_mask->hdr.sent_seq ||
1803 tcp_mask->hdr.recv_ack ||
1804 tcp_mask->hdr.data_off ||
1805 tcp_mask->hdr.tcp_flags ||
1806 tcp_mask->hdr.rx_win ||
1807 tcp_mask->hdr.cksum ||
1808 tcp_mask->hdr.tcp_urp) {
1809 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1810 rte_flow_error_set(error, EINVAL,
1811 RTE_FLOW_ERROR_TYPE_ITEM,
1812 item, "Not supported by fdir filter");
1815 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1816 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1819 rule->b_spec = TRUE;
1820 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1821 rule->ixgbe_fdir.formatted.src_port =
1822 tcp_spec->hdr.src_port;
1823 rule->ixgbe_fdir.formatted.dst_port =
1824 tcp_spec->hdr.dst_port;
1827 item = next_no_fuzzy_pattern(pattern, item);
1828 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1829 item->type != RTE_FLOW_ITEM_TYPE_END) {
1830 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1831 rte_flow_error_set(error, EINVAL,
1832 RTE_FLOW_ERROR_TYPE_ITEM,
1833 item, "Not supported by fdir filter");
1839 /* Get the UDP info */
1840 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1842 * Set the flow type even if there's no content
1843 * as we must have a flow type.
1845 rule->ixgbe_fdir.formatted.flow_type |=
1846 IXGBE_ATR_L4TYPE_UDP;
1847 /*Not supported last point for range*/
1849 rte_flow_error_set(error, EINVAL,
1850 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1851 item, "Not supported last point for range");
1855 * Only care about src & dst ports,
1856 * others should be masked.
1859 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1860 rte_flow_error_set(error, EINVAL,
1861 RTE_FLOW_ERROR_TYPE_ITEM,
1862 item, "Not supported by fdir filter");
1865 rule->b_mask = TRUE;
1866 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1867 if (udp_mask->hdr.dgram_len ||
1868 udp_mask->hdr.dgram_cksum) {
1869 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1870 rte_flow_error_set(error, EINVAL,
1871 RTE_FLOW_ERROR_TYPE_ITEM,
1872 item, "Not supported by fdir filter");
1875 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1876 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1879 rule->b_spec = TRUE;
1880 udp_spec = (const struct rte_flow_item_udp *)item->spec;
1881 rule->ixgbe_fdir.formatted.src_port =
1882 udp_spec->hdr.src_port;
1883 rule->ixgbe_fdir.formatted.dst_port =
1884 udp_spec->hdr.dst_port;
1887 item = next_no_fuzzy_pattern(pattern, item);
1888 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1889 item->type != RTE_FLOW_ITEM_TYPE_END) {
1890 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1891 rte_flow_error_set(error, EINVAL,
1892 RTE_FLOW_ERROR_TYPE_ITEM,
1893 item, "Not supported by fdir filter");
1899 /* Get the SCTP info */
1900 if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
1902 * Set the flow type even if there's no content
1903 * as we must have a flow type.
1905 rule->ixgbe_fdir.formatted.flow_type |=
1906 IXGBE_ATR_L4TYPE_SCTP;
1907 /*Not supported last point for range*/
1909 rte_flow_error_set(error, EINVAL,
1910 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1911 item, "Not supported last point for range");
1915 /* only x550 family only support sctp port */
1916 if (hw->mac.type == ixgbe_mac_X550 ||
1917 hw->mac.type == ixgbe_mac_X550EM_x ||
1918 hw->mac.type == ixgbe_mac_X550EM_a) {
1920 * Only care about src & dst ports,
1921 * others should be masked.
1924 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1925 rte_flow_error_set(error, EINVAL,
1926 RTE_FLOW_ERROR_TYPE_ITEM,
1927 item, "Not supported by fdir filter");
1930 rule->b_mask = TRUE;
1932 (const struct rte_flow_item_sctp *)item->mask;
1933 if (sctp_mask->hdr.tag ||
1934 sctp_mask->hdr.cksum) {
1935 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1936 rte_flow_error_set(error, EINVAL,
1937 RTE_FLOW_ERROR_TYPE_ITEM,
1938 item, "Not supported by fdir filter");
1941 rule->mask.src_port_mask = sctp_mask->hdr.src_port;
1942 rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
1945 rule->b_spec = TRUE;
1947 (const struct rte_flow_item_sctp *)item->spec;
1948 rule->ixgbe_fdir.formatted.src_port =
1949 sctp_spec->hdr.src_port;
1950 rule->ixgbe_fdir.formatted.dst_port =
1951 sctp_spec->hdr.dst_port;
1953 /* others even sctp port is not supported */
1956 (const struct rte_flow_item_sctp *)item->mask;
1958 (sctp_mask->hdr.src_port ||
1959 sctp_mask->hdr.dst_port ||
1960 sctp_mask->hdr.tag ||
1961 sctp_mask->hdr.cksum)) {
1962 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1963 rte_flow_error_set(error, EINVAL,
1964 RTE_FLOW_ERROR_TYPE_ITEM,
1965 item, "Not supported by fdir filter");
1970 item = next_no_fuzzy_pattern(pattern, item);
1971 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1972 item->type != RTE_FLOW_ITEM_TYPE_END) {
1973 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1974 rte_flow_error_set(error, EINVAL,
1975 RTE_FLOW_ERROR_TYPE_ITEM,
1976 item, "Not supported by fdir filter");
1981 /* Get the flex byte info */
1982 if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
1983 /* Not supported last point for range*/
1985 rte_flow_error_set(error, EINVAL,
1986 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1987 item, "Not supported last point for range");
1990 /* mask should not be null */
1991 if (!item->mask || !item->spec) {
1992 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1993 rte_flow_error_set(error, EINVAL,
1994 RTE_FLOW_ERROR_TYPE_ITEM,
1995 item, "Not supported by fdir filter");
1999 raw_mask = (const struct rte_flow_item_raw *)item->mask;
2002 if (raw_mask->relative != 0x1 ||
2003 raw_mask->search != 0x1 ||
2004 raw_mask->reserved != 0x0 ||
2005 (uint32_t)raw_mask->offset != 0xffffffff ||
2006 raw_mask->limit != 0xffff ||
2007 raw_mask->length != 0xffff) {
2008 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2009 rte_flow_error_set(error, EINVAL,
2010 RTE_FLOW_ERROR_TYPE_ITEM,
2011 item, "Not supported by fdir filter");
2015 raw_spec = (const struct rte_flow_item_raw *)item->spec;
2018 if (raw_spec->relative != 0 ||
2019 raw_spec->search != 0 ||
2020 raw_spec->reserved != 0 ||
2021 raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
2022 raw_spec->offset % 2 ||
2023 raw_spec->limit != 0 ||
2024 raw_spec->length != 2 ||
2025 /* pattern can't be 0xffff */
2026 (raw_spec->pattern[0] == 0xff &&
2027 raw_spec->pattern[1] == 0xff)) {
2028 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2029 rte_flow_error_set(error, EINVAL,
2030 RTE_FLOW_ERROR_TYPE_ITEM,
2031 item, "Not supported by fdir filter");
2035 /* check pattern mask */
2036 if (raw_mask->pattern[0] != 0xff ||
2037 raw_mask->pattern[1] != 0xff) {
2038 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2039 rte_flow_error_set(error, EINVAL,
2040 RTE_FLOW_ERROR_TYPE_ITEM,
2041 item, "Not supported by fdir filter");
2045 rule->mask.flex_bytes_mask = 0xffff;
2046 rule->ixgbe_fdir.formatted.flex_bytes =
2047 (((uint16_t)raw_spec->pattern[1]) << 8) |
2048 raw_spec->pattern[0];
2049 rule->flex_bytes_offset = raw_spec->offset;
2052 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2053 /* check if the next not void item is END */
2054 item = next_no_fuzzy_pattern(pattern, item);
2055 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2056 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2057 rte_flow_error_set(error, EINVAL,
2058 RTE_FLOW_ERROR_TYPE_ITEM,
2059 item, "Not supported by fdir filter");
2064 return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2067 #define NVGRE_PROTOCOL 0x6558
2070 * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
2071 * And get the flow director filter info BTW.
2073 * The first not void item must be ETH.
2074 * The second not void item must be IPV4/ IPV6.
2075 * The third not void item must be NVGRE.
2076 * The next not void item must be END.
2078 * The first not void item must be ETH.
2079 * The second not void item must be IPV4/ IPV6.
2080 * The third not void item must be NVGRE.
2081 * The next not void item must be END.
2083 * The first not void action should be QUEUE or DROP.
2084 * The second not void optional action should be MARK,
2085 * mark_id is a uint32_t number.
2086 * The next not void action should be END.
2087 * VxLAN pattern example:
2090 * IPV4/IPV6 NULL NULL
2092 * VxLAN vni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
2093 * MAC VLAN tci 0x2016 0xEFFF
2095 * NEGRV pattern example:
2098 * IPV4/IPV6 NULL NULL
2099 * NVGRE protocol 0x6558 0xFFFF
2100 * tni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
2101 * MAC VLAN tci 0x2016 0xEFFF
2103 * other members in mask and spec should set to 0x00.
2104 * item->last should be NULL.
2107 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
2108 const struct rte_flow_item pattern[],
2109 const struct rte_flow_action actions[],
2110 struct ixgbe_fdir_rule *rule,
2111 struct rte_flow_error *error)
2113 const struct rte_flow_item *item;
2114 const struct rte_flow_item_vxlan *vxlan_spec;
2115 const struct rte_flow_item_vxlan *vxlan_mask;
2116 const struct rte_flow_item_nvgre *nvgre_spec;
2117 const struct rte_flow_item_nvgre *nvgre_mask;
2118 const struct rte_flow_item_eth *eth_spec;
2119 const struct rte_flow_item_eth *eth_mask;
2120 const struct rte_flow_item_vlan *vlan_spec;
2121 const struct rte_flow_item_vlan *vlan_mask;
2125 rte_flow_error_set(error, EINVAL,
2126 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2127 NULL, "NULL pattern.");
2132 rte_flow_error_set(error, EINVAL,
2133 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2134 NULL, "NULL action.");
2139 rte_flow_error_set(error, EINVAL,
2140 RTE_FLOW_ERROR_TYPE_ATTR,
2141 NULL, "NULL attribute.");
2146 * Some fields may not be provided. Set spec to 0 and mask to default
2147 * value. So, we need not do anything for the not provided fields later.
2149 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2150 memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
2151 rule->mask.vlan_tci_mask = 0;
2154 * The first not void item should be
2155 * MAC or IPv4 or IPv6 or UDP or VxLAN.
2157 item = next_no_void_pattern(pattern, NULL);
2158 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
2159 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2160 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
2161 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2162 item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
2163 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2164 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2165 rte_flow_error_set(error, EINVAL,
2166 RTE_FLOW_ERROR_TYPE_ITEM,
2167 item, "Not supported by fdir filter");
2171 rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
2174 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
2175 /* Only used to describe the protocol stack. */
2176 if (item->spec || item->mask) {
2177 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2178 rte_flow_error_set(error, EINVAL,
2179 RTE_FLOW_ERROR_TYPE_ITEM,
2180 item, "Not supported by fdir filter");
2183 /* Not supported last point for range*/
2185 rte_flow_error_set(error, EINVAL,
2186 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2187 item, "Not supported last point for range");
2191 /* Check if the next not void item is IPv4 or IPv6. */
2192 item = next_no_void_pattern(pattern, item);
2193 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2194 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2195 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2196 rte_flow_error_set(error, EINVAL,
2197 RTE_FLOW_ERROR_TYPE_ITEM,
2198 item, "Not supported by fdir filter");
2204 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2205 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2206 /* Only used to describe the protocol stack. */
2207 if (item->spec || item->mask) {
2208 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2209 rte_flow_error_set(error, EINVAL,
2210 RTE_FLOW_ERROR_TYPE_ITEM,
2211 item, "Not supported by fdir filter");
2214 /*Not supported last point for range*/
2216 rte_flow_error_set(error, EINVAL,
2217 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2218 item, "Not supported last point for range");
2222 /* Check if the next not void item is UDP or NVGRE. */
2223 item = next_no_void_pattern(pattern, item);
2224 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2225 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2226 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2227 rte_flow_error_set(error, EINVAL,
2228 RTE_FLOW_ERROR_TYPE_ITEM,
2229 item, "Not supported by fdir filter");
2235 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2236 /* Only used to describe the protocol stack. */
2237 if (item->spec || item->mask) {
2238 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2239 rte_flow_error_set(error, EINVAL,
2240 RTE_FLOW_ERROR_TYPE_ITEM,
2241 item, "Not supported by fdir filter");
2244 /*Not supported last point for range*/
2246 rte_flow_error_set(error, EINVAL,
2247 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2248 item, "Not supported last point for range");
2252 /* Check if the next not void item is VxLAN. */
2253 item = next_no_void_pattern(pattern, item);
2254 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2255 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2256 rte_flow_error_set(error, EINVAL,
2257 RTE_FLOW_ERROR_TYPE_ITEM,
2258 item, "Not supported by fdir filter");
2263 /* Get the VxLAN info */
2264 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2265 rule->ixgbe_fdir.formatted.tunnel_type =
2266 RTE_FDIR_TUNNEL_TYPE_VXLAN;
2268 /* Only care about VNI, others should be masked. */
2270 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2271 rte_flow_error_set(error, EINVAL,
2272 RTE_FLOW_ERROR_TYPE_ITEM,
2273 item, "Not supported by fdir filter");
2276 /*Not supported last point for range*/
2278 rte_flow_error_set(error, EINVAL,
2279 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2280 item, "Not supported last point for range");
2283 rule->b_mask = TRUE;
2285 /* Tunnel type is always meaningful. */
2286 rule->mask.tunnel_type_mask = 1;
2289 (const struct rte_flow_item_vxlan *)item->mask;
2290 if (vxlan_mask->flags) {
2291 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2292 rte_flow_error_set(error, EINVAL,
2293 RTE_FLOW_ERROR_TYPE_ITEM,
2294 item, "Not supported by fdir filter");
2297 /* VNI must be totally masked or not. */
2298 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2299 vxlan_mask->vni[2]) &&
2300 ((vxlan_mask->vni[0] != 0xFF) ||
2301 (vxlan_mask->vni[1] != 0xFF) ||
2302 (vxlan_mask->vni[2] != 0xFF))) {
2303 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2304 rte_flow_error_set(error, EINVAL,
2305 RTE_FLOW_ERROR_TYPE_ITEM,
2306 item, "Not supported by fdir filter");
2310 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2311 RTE_DIM(vxlan_mask->vni));
2314 rule->b_spec = TRUE;
2315 vxlan_spec = (const struct rte_flow_item_vxlan *)
2317 rte_memcpy(((uint8_t *)
2318 &rule->ixgbe_fdir.formatted.tni_vni + 1),
2319 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2320 rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
2321 rule->ixgbe_fdir.formatted.tni_vni);
2325 /* Get the NVGRE info */
2326 if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2327 rule->ixgbe_fdir.formatted.tunnel_type =
2328 RTE_FDIR_TUNNEL_TYPE_NVGRE;
2331 * Only care about flags0, flags1, protocol and TNI,
2332 * others should be masked.
2335 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2336 rte_flow_error_set(error, EINVAL,
2337 RTE_FLOW_ERROR_TYPE_ITEM,
2338 item, "Not supported by fdir filter");
2341 /*Not supported last point for range*/
2343 rte_flow_error_set(error, EINVAL,
2344 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2345 item, "Not supported last point for range");
2348 rule->b_mask = TRUE;
2350 /* Tunnel type is always meaningful. */
2351 rule->mask.tunnel_type_mask = 1;
2354 (const struct rte_flow_item_nvgre *)item->mask;
2355 if (nvgre_mask->flow_id) {
2356 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2357 rte_flow_error_set(error, EINVAL,
2358 RTE_FLOW_ERROR_TYPE_ITEM,
2359 item, "Not supported by fdir filter");
2362 if (nvgre_mask->c_k_s_rsvd0_ver !=
2363 rte_cpu_to_be_16(0x3000) ||
2364 nvgre_mask->protocol != 0xFFFF) {
2365 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2366 rte_flow_error_set(error, EINVAL,
2367 RTE_FLOW_ERROR_TYPE_ITEM,
2368 item, "Not supported by fdir filter");
2371 /* TNI must be totally masked or not. */
2372 if (nvgre_mask->tni[0] &&
2373 ((nvgre_mask->tni[0] != 0xFF) ||
2374 (nvgre_mask->tni[1] != 0xFF) ||
2375 (nvgre_mask->tni[2] != 0xFF))) {
2376 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2377 rte_flow_error_set(error, EINVAL,
2378 RTE_FLOW_ERROR_TYPE_ITEM,
2379 item, "Not supported by fdir filter");
2382 /* tni is a 24-bits bit field */
2383 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2384 RTE_DIM(nvgre_mask->tni));
2385 rule->mask.tunnel_id_mask <<= 8;
2388 rule->b_spec = TRUE;
2390 (const struct rte_flow_item_nvgre *)item->spec;
2391 if (nvgre_spec->c_k_s_rsvd0_ver !=
2392 rte_cpu_to_be_16(0x2000) ||
2393 nvgre_spec->protocol !=
2394 rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2395 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2396 rte_flow_error_set(error, EINVAL,
2397 RTE_FLOW_ERROR_TYPE_ITEM,
2398 item, "Not supported by fdir filter");
2401 /* tni is a 24-bits bit field */
2402 rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2403 nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2404 rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2408 /* check if the next not void item is MAC */
2409 item = next_no_void_pattern(pattern, item);
2410 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2411 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2412 rte_flow_error_set(error, EINVAL,
2413 RTE_FLOW_ERROR_TYPE_ITEM,
2414 item, "Not supported by fdir filter");
2419 * Only support vlan and dst MAC address,
2420 * others should be masked.
2424 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2425 rte_flow_error_set(error, EINVAL,
2426 RTE_FLOW_ERROR_TYPE_ITEM,
2427 item, "Not supported by fdir filter");
2430 /*Not supported last point for range*/
2432 rte_flow_error_set(error, EINVAL,
2433 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2434 item, "Not supported last point for range");
2437 rule->b_mask = TRUE;
2438 eth_mask = (const struct rte_flow_item_eth *)item->mask;
2440 /* Ether type should be masked. */
2441 if (eth_mask->type) {
2442 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2443 rte_flow_error_set(error, EINVAL,
2444 RTE_FLOW_ERROR_TYPE_ITEM,
2445 item, "Not supported by fdir filter");
2449 /* src MAC address should be masked. */
2450 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2451 if (eth_mask->src.addr_bytes[j]) {
2453 sizeof(struct ixgbe_fdir_rule));
2454 rte_flow_error_set(error, EINVAL,
2455 RTE_FLOW_ERROR_TYPE_ITEM,
2456 item, "Not supported by fdir filter");
2460 rule->mask.mac_addr_byte_mask = 0;
2461 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2462 /* It's a per byte mask. */
2463 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2464 rule->mask.mac_addr_byte_mask |= 0x1 << j;
2465 } else if (eth_mask->dst.addr_bytes[j]) {
2466 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2467 rte_flow_error_set(error, EINVAL,
2468 RTE_FLOW_ERROR_TYPE_ITEM,
2469 item, "Not supported by fdir filter");
2474 /* When no vlan, considered as full mask. */
2475 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2478 rule->b_spec = TRUE;
2479 eth_spec = (const struct rte_flow_item_eth *)item->spec;
2481 /* Get the dst MAC. */
2482 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2483 rule->ixgbe_fdir.formatted.inner_mac[j] =
2484 eth_spec->dst.addr_bytes[j];
2489 * Check if the next not void item is vlan or ipv4.
2490 * IPv6 is not supported.
2492 item = next_no_void_pattern(pattern, item);
2493 if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2494 (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
2495 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2496 rte_flow_error_set(error, EINVAL,
2497 RTE_FLOW_ERROR_TYPE_ITEM,
2498 item, "Not supported by fdir filter");
2501 /*Not supported last point for range*/
2503 rte_flow_error_set(error, EINVAL,
2504 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2505 item, "Not supported last point for range");
2509 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2510 if (!(item->spec && item->mask)) {
2511 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2512 rte_flow_error_set(error, EINVAL,
2513 RTE_FLOW_ERROR_TYPE_ITEM,
2514 item, "Not supported by fdir filter");
2518 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
2519 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
2521 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2523 rule->mask.vlan_tci_mask = vlan_mask->tci;
2524 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
2525 /* More than one tags are not supported. */
2527 /* check if the next not void item is END */
2528 item = next_no_void_pattern(pattern, item);
2530 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2531 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2532 rte_flow_error_set(error, EINVAL,
2533 RTE_FLOW_ERROR_TYPE_ITEM,
2534 item, "Not supported by fdir filter");
2540 * If the tags is 0, it means don't care about the VLAN.
2544 return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2548 ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2549 const struct rte_flow_attr *attr,
2550 const struct rte_flow_item pattern[],
2551 const struct rte_flow_action actions[],
2552 struct ixgbe_fdir_rule *rule,
2553 struct rte_flow_error *error)
2556 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2557 enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2559 if (hw->mac.type != ixgbe_mac_82599EB &&
2560 hw->mac.type != ixgbe_mac_X540 &&
2561 hw->mac.type != ixgbe_mac_X550 &&
2562 hw->mac.type != ixgbe_mac_X550EM_x &&
2563 hw->mac.type != ixgbe_mac_X550EM_a)
2566 ret = ixgbe_parse_fdir_filter_normal(dev, attr, pattern,
2567 actions, rule, error);
2572 ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2573 actions, rule, error);
2580 if (hw->mac.type == ixgbe_mac_82599EB &&
2581 rule->fdirflags == IXGBE_FDIRCMD_DROP &&
2582 (rule->mask.src_port_mask != 0 ||
2583 rule->mask.dst_port_mask != 0))
2586 if (fdir_mode == RTE_FDIR_MODE_NONE ||
2587 fdir_mode != rule->mode)
2590 if (rule->queue >= dev->data->nb_rx_queues)
2597 ixgbe_filterlist_flush(void)
2599 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2600 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2601 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2602 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2603 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2604 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2606 while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2607 TAILQ_REMOVE(&filter_ntuple_list,
2610 rte_free(ntuple_filter_ptr);
2613 while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2614 TAILQ_REMOVE(&filter_ethertype_list,
2615 ethertype_filter_ptr,
2617 rte_free(ethertype_filter_ptr);
2620 while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2621 TAILQ_REMOVE(&filter_syn_list,
2624 rte_free(syn_filter_ptr);
2627 while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2628 TAILQ_REMOVE(&filter_l2_tunnel_list,
2631 rte_free(l2_tn_filter_ptr);
2634 while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2635 TAILQ_REMOVE(&filter_fdir_list,
2638 rte_free(fdir_rule_ptr);
2641 while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
2642 TAILQ_REMOVE(&ixgbe_flow_list,
2645 rte_free(ixgbe_flow_mem_ptr->flow);
2646 rte_free(ixgbe_flow_mem_ptr);
2651 * Create or destroy a flow rule.
2652 * Theorically one rule can match more than one filters.
2653 * We will let it use the filter which it hitt first.
2654 * So, the sequence matters.
2656 static struct rte_flow *
2657 ixgbe_flow_create(struct rte_eth_dev *dev,
2658 const struct rte_flow_attr *attr,
2659 const struct rte_flow_item pattern[],
2660 const struct rte_flow_action actions[],
2661 struct rte_flow_error *error)
2664 struct rte_eth_ntuple_filter ntuple_filter;
2665 struct rte_eth_ethertype_filter ethertype_filter;
2666 struct rte_eth_syn_filter syn_filter;
2667 struct ixgbe_fdir_rule fdir_rule;
2668 struct rte_eth_l2_tunnel_conf l2_tn_filter;
2669 struct ixgbe_hw_fdir_info *fdir_info =
2670 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2671 struct rte_flow *flow = NULL;
2672 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2673 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2674 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2675 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2676 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2677 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2679 flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
2681 PMD_DRV_LOG(ERR, "failed to allocate memory");
2682 return (struct rte_flow *)flow;
2684 ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
2685 sizeof(struct ixgbe_flow_mem), 0);
2686 if (!ixgbe_flow_mem_ptr) {
2687 PMD_DRV_LOG(ERR, "failed to allocate memory");
2691 ixgbe_flow_mem_ptr->flow = flow;
2692 TAILQ_INSERT_TAIL(&ixgbe_flow_list,
2693 ixgbe_flow_mem_ptr, entries);
2695 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2696 ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2697 actions, &ntuple_filter, error);
2699 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2701 ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
2702 sizeof(struct ixgbe_ntuple_filter_ele), 0);
2703 (void)rte_memcpy(&ntuple_filter_ptr->filter_info,
2705 sizeof(struct rte_eth_ntuple_filter));
2706 TAILQ_INSERT_TAIL(&filter_ntuple_list,
2707 ntuple_filter_ptr, entries);
2708 flow->rule = ntuple_filter_ptr;
2709 flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2715 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2716 ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2717 actions, ðertype_filter, error);
2719 ret = ixgbe_add_del_ethertype_filter(dev,
2720 ðertype_filter, TRUE);
2722 ethertype_filter_ptr = rte_zmalloc(
2723 "ixgbe_ethertype_filter",
2724 sizeof(struct ixgbe_ethertype_filter_ele), 0);
2725 (void)rte_memcpy(ðertype_filter_ptr->filter_info,
2727 sizeof(struct rte_eth_ethertype_filter));
2728 TAILQ_INSERT_TAIL(&filter_ethertype_list,
2729 ethertype_filter_ptr, entries);
2730 flow->rule = ethertype_filter_ptr;
2731 flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2737 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2738 ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2739 actions, &syn_filter, error);
2741 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
2743 syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
2744 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
2745 (void)rte_memcpy(&syn_filter_ptr->filter_info,
2747 sizeof(struct rte_eth_syn_filter));
2748 TAILQ_INSERT_TAIL(&filter_syn_list,
2751 flow->rule = syn_filter_ptr;
2752 flow->filter_type = RTE_ETH_FILTER_SYN;
2758 memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2759 ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2760 actions, &fdir_rule, error);
2762 /* A mask cannot be deleted. */
2763 if (fdir_rule.b_mask) {
2764 if (!fdir_info->mask_added) {
2765 /* It's the first time the mask is set. */
2766 rte_memcpy(&fdir_info->mask,
2768 sizeof(struct ixgbe_hw_fdir_mask));
2769 fdir_info->flex_bytes_offset =
2770 fdir_rule.flex_bytes_offset;
2772 if (fdir_rule.mask.flex_bytes_mask)
2773 ixgbe_fdir_set_flexbytes_offset(dev,
2774 fdir_rule.flex_bytes_offset);
2776 ret = ixgbe_fdir_set_input_mask(dev);
2780 fdir_info->mask_added = TRUE;
2783 * Only support one global mask,
2784 * all the masks should be the same.
2786 ret = memcmp(&fdir_info->mask,
2788 sizeof(struct ixgbe_hw_fdir_mask));
2792 if (fdir_info->flex_bytes_offset !=
2793 fdir_rule.flex_bytes_offset)
2798 if (fdir_rule.b_spec) {
2799 ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
2802 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
2803 sizeof(struct ixgbe_fdir_rule_ele), 0);
2804 (void)rte_memcpy(&fdir_rule_ptr->filter_info,
2806 sizeof(struct ixgbe_fdir_rule));
2807 TAILQ_INSERT_TAIL(&filter_fdir_list,
2808 fdir_rule_ptr, entries);
2809 flow->rule = fdir_rule_ptr;
2810 flow->filter_type = RTE_ETH_FILTER_FDIR;
2822 memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2823 ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2824 actions, &l2_tn_filter, error);
2826 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
2828 l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
2829 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
2830 (void)rte_memcpy(&l2_tn_filter_ptr->filter_info,
2832 sizeof(struct rte_eth_l2_tunnel_conf));
2833 TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
2834 l2_tn_filter_ptr, entries);
2835 flow->rule = l2_tn_filter_ptr;
2836 flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
2842 TAILQ_REMOVE(&ixgbe_flow_list,
2843 ixgbe_flow_mem_ptr, entries);
2844 rte_flow_error_set(error, -ret,
2845 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2846 "Failed to create flow.");
2847 rte_free(ixgbe_flow_mem_ptr);
2853 * Check if the flow rule is supported by ixgbe.
2854 * It only checkes the format. Don't guarantee the rule can be programmed into
2855 * the HW. Because there can be no enough room for the rule.
2858 ixgbe_flow_validate(struct rte_eth_dev *dev,
2859 const struct rte_flow_attr *attr,
2860 const struct rte_flow_item pattern[],
2861 const struct rte_flow_action actions[],
2862 struct rte_flow_error *error)
2864 struct rte_eth_ntuple_filter ntuple_filter;
2865 struct rte_eth_ethertype_filter ethertype_filter;
2866 struct rte_eth_syn_filter syn_filter;
2867 struct rte_eth_l2_tunnel_conf l2_tn_filter;
2868 struct ixgbe_fdir_rule fdir_rule;
2871 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2872 ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2873 actions, &ntuple_filter, error);
2877 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2878 ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2879 actions, ðertype_filter, error);
2883 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2884 ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2885 actions, &syn_filter, error);
2889 memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2890 ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2891 actions, &fdir_rule, error);
2895 memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2896 ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2897 actions, &l2_tn_filter, error);
2902 /* Destroy a flow rule on ixgbe. */
2904 ixgbe_flow_destroy(struct rte_eth_dev *dev,
2905 struct rte_flow *flow,
2906 struct rte_flow_error *error)
2909 struct rte_flow *pmd_flow = flow;
2910 enum rte_filter_type filter_type = pmd_flow->filter_type;
2911 struct rte_eth_ntuple_filter ntuple_filter;
2912 struct rte_eth_ethertype_filter ethertype_filter;
2913 struct rte_eth_syn_filter syn_filter;
2914 struct ixgbe_fdir_rule fdir_rule;
2915 struct rte_eth_l2_tunnel_conf l2_tn_filter;
2916 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2917 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2918 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2919 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2920 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2921 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2922 struct ixgbe_hw_fdir_info *fdir_info =
2923 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2925 switch (filter_type) {
2926 case RTE_ETH_FILTER_NTUPLE:
2927 ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
2929 (void)rte_memcpy(&ntuple_filter,
2930 &ntuple_filter_ptr->filter_info,
2931 sizeof(struct rte_eth_ntuple_filter));
2932 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
2934 TAILQ_REMOVE(&filter_ntuple_list,
2935 ntuple_filter_ptr, entries);
2936 rte_free(ntuple_filter_ptr);
2939 case RTE_ETH_FILTER_ETHERTYPE:
2940 ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
2942 (void)rte_memcpy(ðertype_filter,
2943 ðertype_filter_ptr->filter_info,
2944 sizeof(struct rte_eth_ethertype_filter));
2945 ret = ixgbe_add_del_ethertype_filter(dev,
2946 ðertype_filter, FALSE);
2948 TAILQ_REMOVE(&filter_ethertype_list,
2949 ethertype_filter_ptr, entries);
2950 rte_free(ethertype_filter_ptr);
2953 case RTE_ETH_FILTER_SYN:
2954 syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
2956 (void)rte_memcpy(&syn_filter,
2957 &syn_filter_ptr->filter_info,
2958 sizeof(struct rte_eth_syn_filter));
2959 ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
2961 TAILQ_REMOVE(&filter_syn_list,
2962 syn_filter_ptr, entries);
2963 rte_free(syn_filter_ptr);
2966 case RTE_ETH_FILTER_FDIR:
2967 fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
2968 (void)rte_memcpy(&fdir_rule,
2969 &fdir_rule_ptr->filter_info,
2970 sizeof(struct ixgbe_fdir_rule));
2971 ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
2973 TAILQ_REMOVE(&filter_fdir_list,
2974 fdir_rule_ptr, entries);
2975 rte_free(fdir_rule_ptr);
2976 if (TAILQ_EMPTY(&filter_fdir_list))
2977 fdir_info->mask_added = false;
2980 case RTE_ETH_FILTER_L2_TUNNEL:
2981 l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
2983 (void)rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
2984 sizeof(struct rte_eth_l2_tunnel_conf));
2985 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
2987 TAILQ_REMOVE(&filter_l2_tunnel_list,
2988 l2_tn_filter_ptr, entries);
2989 rte_free(l2_tn_filter_ptr);
2993 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
3000 rte_flow_error_set(error, EINVAL,
3001 RTE_FLOW_ERROR_TYPE_HANDLE,
3002 NULL, "Failed to destroy flow");
3006 TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
3007 if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
3008 TAILQ_REMOVE(&ixgbe_flow_list,
3009 ixgbe_flow_mem_ptr, entries);
3010 rte_free(ixgbe_flow_mem_ptr);
3018 /* Destroy all flow rules associated with a port on ixgbe. */
3020 ixgbe_flow_flush(struct rte_eth_dev *dev,
3021 struct rte_flow_error *error)
3025 ixgbe_clear_all_ntuple_filter(dev);
3026 ixgbe_clear_all_ethertype_filter(dev);
3027 ixgbe_clear_syn_filter(dev);
3029 ret = ixgbe_clear_all_fdir_filter(dev);
3031 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3032 NULL, "Failed to flush rule");
3036 ret = ixgbe_clear_all_l2_tn_filter(dev);
3038 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3039 NULL, "Failed to flush rule");
3043 ixgbe_filterlist_flush();
3048 const struct rte_flow_ops ixgbe_flow_ops = {
3049 .validate = ixgbe_flow_validate,
3050 .create = ixgbe_flow_create,
3051 .destroy = ixgbe_flow_destroy,
3052 .flush = ixgbe_flow_flush,