4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
47 #include <rte_interrupts.h>
49 #include <rte_debug.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_malloc.h>
60 #include <rte_random.h>
62 #include <rte_hash_crc.h>
64 #include <rte_flow_driver.h>
66 #include "ixgbe_logs.h"
67 #include "base/ixgbe_api.h"
68 #include "base/ixgbe_vf.h"
69 #include "base/ixgbe_common.h"
70 #include "ixgbe_ethdev.h"
71 #include "ixgbe_bypass.h"
72 #include "ixgbe_rxtx.h"
73 #include "base/ixgbe_type.h"
74 #include "base/ixgbe_phy.h"
75 #include "rte_pmd_ixgbe.h"
78 #define IXGBE_MIN_N_TUPLE_PRIO 1
79 #define IXGBE_MAX_N_TUPLE_PRIO 7
80 #define IXGBE_MAX_FLX_SOURCE_OFF 62
83 * Endless loop will never happen with below assumption
84 * 1. there is at least one no-void item(END)
85 * 2. cur is before END.
88 const struct rte_flow_item *next_no_void_pattern(
89 const struct rte_flow_item pattern[],
90 const struct rte_flow_item *cur)
92 const struct rte_flow_item *next =
93 cur ? cur + 1 : &pattern[0];
95 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
102 const struct rte_flow_action *next_no_void_action(
103 const struct rte_flow_action actions[],
104 const struct rte_flow_action *cur)
106 const struct rte_flow_action *next =
107 cur ? cur + 1 : &actions[0];
109 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
116 * Please aware there's an asumption for all the parsers.
117 * rte_flow_item is using big endian, rte_flow_attr and
118 * rte_flow_action are using CPU order.
119 * Because the pattern is used to describe the packets,
120 * normally the packets should use network order.
124 * Parse the rule to see if it is a n-tuple rule.
125 * And get the n-tuple filter info BTW.
127 * The first not void item can be ETH or IPV4.
128 * The second not void item must be IPV4 if the first one is ETH.
129 * The third not void item must be UDP or TCP.
130 * The next not void item must be END.
132 * The first not void action should be QUEUE.
133 * The next not void action should be END.
137 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
138 * dst_addr 192.167.3.50 0xFFFFFFFF
139 * next_proto_id 17 0xFF
140 * UDP/TCP/ src_port 80 0xFFFF
141 * SCTP dst_port 80 0xFFFF
143 * other members in mask and spec should set to 0x00.
144 * item->last should be NULL.
147 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
148 const struct rte_flow_item pattern[],
149 const struct rte_flow_action actions[],
150 struct rte_eth_ntuple_filter *filter,
151 struct rte_flow_error *error)
153 const struct rte_flow_item *item;
154 const struct rte_flow_action *act;
155 const struct rte_flow_item_ipv4 *ipv4_spec;
156 const struct rte_flow_item_ipv4 *ipv4_mask;
157 const struct rte_flow_item_tcp *tcp_spec;
158 const struct rte_flow_item_tcp *tcp_mask;
159 const struct rte_flow_item_udp *udp_spec;
160 const struct rte_flow_item_udp *udp_mask;
161 const struct rte_flow_item_sctp *sctp_spec;
162 const struct rte_flow_item_sctp *sctp_mask;
165 rte_flow_error_set(error,
166 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
167 NULL, "NULL pattern.");
172 rte_flow_error_set(error, EINVAL,
173 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
174 NULL, "NULL action.");
178 rte_flow_error_set(error, EINVAL,
179 RTE_FLOW_ERROR_TYPE_ATTR,
180 NULL, "NULL attribute.");
184 /* the first not void item can be MAC or IPv4 */
185 item = next_no_void_pattern(pattern, NULL);
187 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
188 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
189 rte_flow_error_set(error, EINVAL,
190 RTE_FLOW_ERROR_TYPE_ITEM,
191 item, "Not supported by ntuple filter");
195 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
196 /*Not supported last point for range*/
198 rte_flow_error_set(error,
200 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
201 item, "Not supported last point for range");
205 /* if the first item is MAC, the content should be NULL */
206 if (item->spec || item->mask) {
207 rte_flow_error_set(error, EINVAL,
208 RTE_FLOW_ERROR_TYPE_ITEM,
209 item, "Not supported by ntuple filter");
212 /* check if the next not void item is IPv4 */
213 item = next_no_void_pattern(pattern, item);
214 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
215 rte_flow_error_set(error,
216 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
217 item, "Not supported by ntuple filter");
222 /* get the IPv4 info */
223 if (!item->spec || !item->mask) {
224 rte_flow_error_set(error, EINVAL,
225 RTE_FLOW_ERROR_TYPE_ITEM,
226 item, "Invalid ntuple mask");
229 /*Not supported last point for range*/
231 rte_flow_error_set(error, EINVAL,
232 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
233 item, "Not supported last point for range");
238 ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
240 * Only support src & dst addresses, protocol,
241 * others should be masked.
243 if (ipv4_mask->hdr.version_ihl ||
244 ipv4_mask->hdr.type_of_service ||
245 ipv4_mask->hdr.total_length ||
246 ipv4_mask->hdr.packet_id ||
247 ipv4_mask->hdr.fragment_offset ||
248 ipv4_mask->hdr.time_to_live ||
249 ipv4_mask->hdr.hdr_checksum) {
250 rte_flow_error_set(error,
251 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
252 item, "Not supported by ntuple filter");
256 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
257 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
258 filter->proto_mask = ipv4_mask->hdr.next_proto_id;
260 ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
261 filter->dst_ip = ipv4_spec->hdr.dst_addr;
262 filter->src_ip = ipv4_spec->hdr.src_addr;
263 filter->proto = ipv4_spec->hdr.next_proto_id;
265 /* check if the next not void item is TCP or UDP */
266 item = next_no_void_pattern(pattern, item);
267 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
268 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
269 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
270 item->type != RTE_FLOW_ITEM_TYPE_END) {
271 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
272 rte_flow_error_set(error, EINVAL,
273 RTE_FLOW_ERROR_TYPE_ITEM,
274 item, "Not supported by ntuple filter");
278 /* get the TCP/UDP info */
279 if ((item->type != RTE_FLOW_ITEM_TYPE_END) &&
280 (!item->spec || !item->mask)) {
281 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
282 rte_flow_error_set(error, EINVAL,
283 RTE_FLOW_ERROR_TYPE_ITEM,
284 item, "Invalid ntuple mask");
288 /*Not supported last point for range*/
290 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
291 rte_flow_error_set(error, EINVAL,
292 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
293 item, "Not supported last point for range");
298 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
299 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
302 * Only support src & dst ports, tcp flags,
303 * others should be masked.
305 if (tcp_mask->hdr.sent_seq ||
306 tcp_mask->hdr.recv_ack ||
307 tcp_mask->hdr.data_off ||
308 tcp_mask->hdr.rx_win ||
309 tcp_mask->hdr.cksum ||
310 tcp_mask->hdr.tcp_urp) {
312 sizeof(struct rte_eth_ntuple_filter));
313 rte_flow_error_set(error, EINVAL,
314 RTE_FLOW_ERROR_TYPE_ITEM,
315 item, "Not supported by ntuple filter");
319 filter->dst_port_mask = tcp_mask->hdr.dst_port;
320 filter->src_port_mask = tcp_mask->hdr.src_port;
321 if (tcp_mask->hdr.tcp_flags == 0xFF) {
322 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
323 } else if (!tcp_mask->hdr.tcp_flags) {
324 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
326 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
327 rte_flow_error_set(error, EINVAL,
328 RTE_FLOW_ERROR_TYPE_ITEM,
329 item, "Not supported by ntuple filter");
333 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
334 filter->dst_port = tcp_spec->hdr.dst_port;
335 filter->src_port = tcp_spec->hdr.src_port;
336 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
337 } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
338 udp_mask = (const struct rte_flow_item_udp *)item->mask;
341 * Only support src & dst ports,
342 * others should be masked.
344 if (udp_mask->hdr.dgram_len ||
345 udp_mask->hdr.dgram_cksum) {
347 sizeof(struct rte_eth_ntuple_filter));
348 rte_flow_error_set(error, EINVAL,
349 RTE_FLOW_ERROR_TYPE_ITEM,
350 item, "Not supported by ntuple filter");
354 filter->dst_port_mask = udp_mask->hdr.dst_port;
355 filter->src_port_mask = udp_mask->hdr.src_port;
357 udp_spec = (const struct rte_flow_item_udp *)item->spec;
358 filter->dst_port = udp_spec->hdr.dst_port;
359 filter->src_port = udp_spec->hdr.src_port;
360 } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
361 sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
364 * Only support src & dst ports,
365 * others should be masked.
367 if (sctp_mask->hdr.tag ||
368 sctp_mask->hdr.cksum) {
370 sizeof(struct rte_eth_ntuple_filter));
371 rte_flow_error_set(error, EINVAL,
372 RTE_FLOW_ERROR_TYPE_ITEM,
373 item, "Not supported by ntuple filter");
377 filter->dst_port_mask = sctp_mask->hdr.dst_port;
378 filter->src_port_mask = sctp_mask->hdr.src_port;
380 sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
381 filter->dst_port = sctp_spec->hdr.dst_port;
382 filter->src_port = sctp_spec->hdr.src_port;
387 /* check if the next not void item is END */
388 item = next_no_void_pattern(pattern, item);
389 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
390 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
391 rte_flow_error_set(error, EINVAL,
392 RTE_FLOW_ERROR_TYPE_ITEM,
393 item, "Not supported by ntuple filter");
400 * n-tuple only supports forwarding,
401 * check if the first not void action is QUEUE.
403 act = next_no_void_action(actions, NULL);
404 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
405 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
406 rte_flow_error_set(error, EINVAL,
407 RTE_FLOW_ERROR_TYPE_ACTION,
408 item, "Not supported action.");
412 ((const struct rte_flow_action_queue *)act->conf)->index;
414 /* check if the next not void item is END */
415 act = next_no_void_action(actions, act);
416 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
417 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
418 rte_flow_error_set(error, EINVAL,
419 RTE_FLOW_ERROR_TYPE_ACTION,
420 act, "Not supported action.");
425 /* must be input direction */
426 if (!attr->ingress) {
427 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
428 rte_flow_error_set(error, EINVAL,
429 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
430 attr, "Only support ingress.");
436 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
437 rte_flow_error_set(error, EINVAL,
438 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
439 attr, "Not support egress.");
443 if (attr->priority > 0xFFFF) {
444 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
445 rte_flow_error_set(error, EINVAL,
446 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
447 attr, "Error priority.");
450 filter->priority = (uint16_t)attr->priority;
451 if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
452 attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
453 filter->priority = 1;
458 /* a specific function for ixgbe because the flags is specific */
460 ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
461 const struct rte_flow_attr *attr,
462 const struct rte_flow_item pattern[],
463 const struct rte_flow_action actions[],
464 struct rte_eth_ntuple_filter *filter,
465 struct rte_flow_error *error)
468 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
470 MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
472 ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
477 /* Ixgbe doesn't support tcp flags. */
478 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
479 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
480 rte_flow_error_set(error, EINVAL,
481 RTE_FLOW_ERROR_TYPE_ITEM,
482 NULL, "Not supported by ntuple filter");
486 /* Ixgbe doesn't support many priorities. */
487 if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
488 filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
489 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
490 rte_flow_error_set(error, EINVAL,
491 RTE_FLOW_ERROR_TYPE_ITEM,
492 NULL, "Priority not supported by ntuple filter");
496 if (filter->queue >= dev->data->nb_rx_queues)
499 /* fixed value for ixgbe */
500 filter->flags = RTE_5TUPLE_FLAGS;
505 * Parse the rule to see if it is a ethertype rule.
506 * And get the ethertype filter info BTW.
508 * The first not void item can be ETH.
509 * The next not void item must be END.
511 * The first not void action should be QUEUE.
512 * The next not void action should be END.
515 * ETH type 0x0807 0xFFFF
517 * other members in mask and spec should set to 0x00.
518 * item->last should be NULL.
521 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
522 const struct rte_flow_item *pattern,
523 const struct rte_flow_action *actions,
524 struct rte_eth_ethertype_filter *filter,
525 struct rte_flow_error *error)
527 const struct rte_flow_item *item;
528 const struct rte_flow_action *act;
529 const struct rte_flow_item_eth *eth_spec;
530 const struct rte_flow_item_eth *eth_mask;
531 const struct rte_flow_action_queue *act_q;
534 rte_flow_error_set(error, EINVAL,
535 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
536 NULL, "NULL pattern.");
541 rte_flow_error_set(error, EINVAL,
542 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
543 NULL, "NULL action.");
548 rte_flow_error_set(error, EINVAL,
549 RTE_FLOW_ERROR_TYPE_ATTR,
550 NULL, "NULL attribute.");
554 item = next_no_void_pattern(pattern, NULL);
555 /* The first non-void item should be MAC. */
556 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
557 rte_flow_error_set(error, EINVAL,
558 RTE_FLOW_ERROR_TYPE_ITEM,
559 item, "Not supported by ethertype filter");
563 /*Not supported last point for range*/
565 rte_flow_error_set(error, EINVAL,
566 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
567 item, "Not supported last point for range");
571 /* Get the MAC info. */
572 if (!item->spec || !item->mask) {
573 rte_flow_error_set(error, EINVAL,
574 RTE_FLOW_ERROR_TYPE_ITEM,
575 item, "Not supported by ethertype filter");
579 eth_spec = (const struct rte_flow_item_eth *)item->spec;
580 eth_mask = (const struct rte_flow_item_eth *)item->mask;
582 /* Mask bits of source MAC address must be full of 0.
583 * Mask bits of destination MAC address must be full
586 if (!is_zero_ether_addr(ð_mask->src) ||
587 (!is_zero_ether_addr(ð_mask->dst) &&
588 !is_broadcast_ether_addr(ð_mask->dst))) {
589 rte_flow_error_set(error, EINVAL,
590 RTE_FLOW_ERROR_TYPE_ITEM,
591 item, "Invalid ether address mask");
595 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
596 rte_flow_error_set(error, EINVAL,
597 RTE_FLOW_ERROR_TYPE_ITEM,
598 item, "Invalid ethertype mask");
602 /* If mask bits of destination MAC address
603 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
605 if (is_broadcast_ether_addr(ð_mask->dst)) {
606 filter->mac_addr = eth_spec->dst;
607 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
609 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
611 filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
613 /* Check if the next non-void item is END. */
614 item = next_no_void_pattern(pattern, item);
615 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
616 rte_flow_error_set(error, EINVAL,
617 RTE_FLOW_ERROR_TYPE_ITEM,
618 item, "Not supported by ethertype filter.");
624 act = next_no_void_action(actions, NULL);
625 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
626 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
627 rte_flow_error_set(error, EINVAL,
628 RTE_FLOW_ERROR_TYPE_ACTION,
629 act, "Not supported action.");
633 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
634 act_q = (const struct rte_flow_action_queue *)act->conf;
635 filter->queue = act_q->index;
637 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
640 /* Check if the next non-void item is END */
641 act = next_no_void_action(actions, act);
642 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
643 rte_flow_error_set(error, EINVAL,
644 RTE_FLOW_ERROR_TYPE_ACTION,
645 act, "Not supported action.");
650 /* Must be input direction */
651 if (!attr->ingress) {
652 rte_flow_error_set(error, EINVAL,
653 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
654 attr, "Only support ingress.");
660 rte_flow_error_set(error, EINVAL,
661 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
662 attr, "Not support egress.");
667 if (attr->priority) {
668 rte_flow_error_set(error, EINVAL,
669 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
670 attr, "Not support priority.");
676 rte_flow_error_set(error, EINVAL,
677 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
678 attr, "Not support group.");
686 ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
687 const struct rte_flow_attr *attr,
688 const struct rte_flow_item pattern[],
689 const struct rte_flow_action actions[],
690 struct rte_eth_ethertype_filter *filter,
691 struct rte_flow_error *error)
694 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
696 MAC_TYPE_FILTER_SUP(hw->mac.type);
698 ret = cons_parse_ethertype_filter(attr, pattern,
699 actions, filter, error);
704 /* Ixgbe doesn't support MAC address. */
705 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
706 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
707 rte_flow_error_set(error, EINVAL,
708 RTE_FLOW_ERROR_TYPE_ITEM,
709 NULL, "Not supported by ethertype filter");
713 if (filter->queue >= dev->data->nb_rx_queues) {
714 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
715 rte_flow_error_set(error, EINVAL,
716 RTE_FLOW_ERROR_TYPE_ITEM,
717 NULL, "queue index much too big");
721 if (filter->ether_type == ETHER_TYPE_IPv4 ||
722 filter->ether_type == ETHER_TYPE_IPv6) {
723 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
724 rte_flow_error_set(error, EINVAL,
725 RTE_FLOW_ERROR_TYPE_ITEM,
726 NULL, "IPv4/IPv6 not supported by ethertype filter");
730 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
731 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
732 rte_flow_error_set(error, EINVAL,
733 RTE_FLOW_ERROR_TYPE_ITEM,
734 NULL, "mac compare is unsupported");
738 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
739 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
740 rte_flow_error_set(error, EINVAL,
741 RTE_FLOW_ERROR_TYPE_ITEM,
742 NULL, "drop option is unsupported");
750 * Parse the rule to see if it is a TCP SYN rule.
751 * And get the TCP SYN filter info BTW.
753 * The first not void item must be ETH.
754 * The second not void item must be IPV4 or IPV6.
755 * The third not void item must be TCP.
756 * The next not void item must be END.
758 * The first not void action should be QUEUE.
759 * The next not void action should be END.
763 * IPV4/IPV6 NULL NULL
764 * TCP tcp_flags 0x02 0xFF
766 * other members in mask and spec should set to 0x00.
767 * item->last should be NULL.
770 cons_parse_syn_filter(const struct rte_flow_attr *attr,
771 const struct rte_flow_item pattern[],
772 const struct rte_flow_action actions[],
773 struct rte_eth_syn_filter *filter,
774 struct rte_flow_error *error)
776 const struct rte_flow_item *item;
777 const struct rte_flow_action *act;
778 const struct rte_flow_item_tcp *tcp_spec;
779 const struct rte_flow_item_tcp *tcp_mask;
780 const struct rte_flow_action_queue *act_q;
783 rte_flow_error_set(error, EINVAL,
784 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
785 NULL, "NULL pattern.");
790 rte_flow_error_set(error, EINVAL,
791 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
792 NULL, "NULL action.");
797 rte_flow_error_set(error, EINVAL,
798 RTE_FLOW_ERROR_TYPE_ATTR,
799 NULL, "NULL attribute.");
804 /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
805 item = next_no_void_pattern(pattern, NULL);
806 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
807 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
808 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
809 item->type != RTE_FLOW_ITEM_TYPE_TCP) {
810 rte_flow_error_set(error, EINVAL,
811 RTE_FLOW_ERROR_TYPE_ITEM,
812 item, "Not supported by syn filter");
815 /*Not supported last point for range*/
817 rte_flow_error_set(error, EINVAL,
818 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
819 item, "Not supported last point for range");
824 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
825 /* if the item is MAC, the content should be NULL */
826 if (item->spec || item->mask) {
827 rte_flow_error_set(error, EINVAL,
828 RTE_FLOW_ERROR_TYPE_ITEM,
829 item, "Invalid SYN address mask");
833 /* check if the next not void item is IPv4 or IPv6 */
834 item = next_no_void_pattern(pattern, item);
835 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
836 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
837 rte_flow_error_set(error, EINVAL,
838 RTE_FLOW_ERROR_TYPE_ITEM,
839 item, "Not supported by syn filter");
845 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
846 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
847 /* if the item is IP, the content should be NULL */
848 if (item->spec || item->mask) {
849 rte_flow_error_set(error, EINVAL,
850 RTE_FLOW_ERROR_TYPE_ITEM,
851 item, "Invalid SYN mask");
855 /* check if the next not void item is TCP */
856 item = next_no_void_pattern(pattern, item);
857 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
858 rte_flow_error_set(error, EINVAL,
859 RTE_FLOW_ERROR_TYPE_ITEM,
860 item, "Not supported by syn filter");
865 /* Get the TCP info. Only support SYN. */
866 if (!item->spec || !item->mask) {
867 rte_flow_error_set(error, EINVAL,
868 RTE_FLOW_ERROR_TYPE_ITEM,
869 item, "Invalid SYN mask");
872 /*Not supported last point for range*/
874 rte_flow_error_set(error, EINVAL,
875 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
876 item, "Not supported last point for range");
880 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
881 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
882 if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
883 tcp_mask->hdr.src_port ||
884 tcp_mask->hdr.dst_port ||
885 tcp_mask->hdr.sent_seq ||
886 tcp_mask->hdr.recv_ack ||
887 tcp_mask->hdr.data_off ||
888 tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
889 tcp_mask->hdr.rx_win ||
890 tcp_mask->hdr.cksum ||
891 tcp_mask->hdr.tcp_urp) {
892 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
893 rte_flow_error_set(error, EINVAL,
894 RTE_FLOW_ERROR_TYPE_ITEM,
895 item, "Not supported by syn filter");
899 /* check if the next not void item is END */
900 item = next_no_void_pattern(pattern, item);
901 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
902 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
903 rte_flow_error_set(error, EINVAL,
904 RTE_FLOW_ERROR_TYPE_ITEM,
905 item, "Not supported by syn filter");
909 /* check if the first not void action is QUEUE. */
910 act = next_no_void_action(actions, NULL);
911 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
912 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
913 rte_flow_error_set(error, EINVAL,
914 RTE_FLOW_ERROR_TYPE_ACTION,
915 act, "Not supported action.");
919 act_q = (const struct rte_flow_action_queue *)act->conf;
920 filter->queue = act_q->index;
921 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
922 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
923 rte_flow_error_set(error, EINVAL,
924 RTE_FLOW_ERROR_TYPE_ACTION,
925 act, "Not supported action.");
929 /* check if the next not void item is END */
930 act = next_no_void_action(actions, act);
931 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
932 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
933 rte_flow_error_set(error, EINVAL,
934 RTE_FLOW_ERROR_TYPE_ACTION,
935 act, "Not supported action.");
940 /* must be input direction */
941 if (!attr->ingress) {
942 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
943 rte_flow_error_set(error, EINVAL,
944 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
945 attr, "Only support ingress.");
951 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
952 rte_flow_error_set(error, EINVAL,
953 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
954 attr, "Not support egress.");
958 /* Support 2 priorities, the lowest or highest. */
959 if (!attr->priority) {
961 } else if (attr->priority == (uint32_t)~0U) {
964 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
965 rte_flow_error_set(error, EINVAL,
966 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
967 attr, "Not support priority.");
975 ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
976 const struct rte_flow_attr *attr,
977 const struct rte_flow_item pattern[],
978 const struct rte_flow_action actions[],
979 struct rte_eth_syn_filter *filter,
980 struct rte_flow_error *error)
983 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
985 MAC_TYPE_FILTER_SUP(hw->mac.type);
987 ret = cons_parse_syn_filter(attr, pattern,
988 actions, filter, error);
990 if (filter->queue >= dev->data->nb_rx_queues)
1000 * Parse the rule to see if it is a L2 tunnel rule.
1001 * And get the L2 tunnel filter info BTW.
1002 * Only support E-tag now.
1004 * The first not void item can be E_TAG.
1005 * The next not void item must be END.
1007 * The first not void action should be QUEUE.
1008 * The next not void action should be END.
1012 e_cid_base 0x309 0xFFF
1014 * other members in mask and spec should set to 0x00.
1015 * item->last should be NULL.
1018 cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
1019 const struct rte_flow_item pattern[],
1020 const struct rte_flow_action actions[],
1021 struct rte_eth_l2_tunnel_conf *filter,
1022 struct rte_flow_error *error)
1024 const struct rte_flow_item *item;
1025 const struct rte_flow_item_e_tag *e_tag_spec;
1026 const struct rte_flow_item_e_tag *e_tag_mask;
1027 const struct rte_flow_action *act;
1028 const struct rte_flow_action_queue *act_q;
1031 rte_flow_error_set(error, EINVAL,
1032 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1033 NULL, "NULL pattern.");
1038 rte_flow_error_set(error, EINVAL,
1039 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1040 NULL, "NULL action.");
1045 rte_flow_error_set(error, EINVAL,
1046 RTE_FLOW_ERROR_TYPE_ATTR,
1047 NULL, "NULL attribute.");
1051 /* The first not void item should be e-tag. */
1052 item = next_no_void_pattern(pattern, NULL);
1053 if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1054 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1055 rte_flow_error_set(error, EINVAL,
1056 RTE_FLOW_ERROR_TYPE_ITEM,
1057 item, "Not supported by L2 tunnel filter");
1061 if (!item->spec || !item->mask) {
1062 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1063 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1064 item, "Not supported by L2 tunnel filter");
1068 /*Not supported last point for range*/
1070 rte_flow_error_set(error, EINVAL,
1071 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1072 item, "Not supported last point for range");
1076 e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
1077 e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
1079 /* Only care about GRP and E cid base. */
1080 if (e_tag_mask->epcp_edei_in_ecid_b ||
1081 e_tag_mask->in_ecid_e ||
1082 e_tag_mask->ecid_e ||
1083 e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1084 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1085 rte_flow_error_set(error, EINVAL,
1086 RTE_FLOW_ERROR_TYPE_ITEM,
1087 item, "Not supported by L2 tunnel filter");
1091 filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1093 * grp and e_cid_base are bit fields and only use 14 bits.
1094 * e-tag id is taken as little endian by HW.
1096 filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1098 /* check if the next not void item is END */
1099 item = next_no_void_pattern(pattern, item);
1100 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1101 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1102 rte_flow_error_set(error, EINVAL,
1103 RTE_FLOW_ERROR_TYPE_ITEM,
1104 item, "Not supported by L2 tunnel filter");
1109 /* must be input direction */
1110 if (!attr->ingress) {
1111 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1112 rte_flow_error_set(error, EINVAL,
1113 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1114 attr, "Only support ingress.");
1120 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1121 rte_flow_error_set(error, EINVAL,
1122 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1123 attr, "Not support egress.");
1128 if (attr->priority) {
1129 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1130 rte_flow_error_set(error, EINVAL,
1131 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1132 attr, "Not support priority.");
1136 /* check if the first not void action is QUEUE. */
1137 act = next_no_void_action(actions, NULL);
1138 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1139 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1140 rte_flow_error_set(error, EINVAL,
1141 RTE_FLOW_ERROR_TYPE_ACTION,
1142 act, "Not supported action.");
1146 act_q = (const struct rte_flow_action_queue *)act->conf;
1147 filter->pool = act_q->index;
1149 /* check if the next not void item is END */
1150 act = next_no_void_action(actions, act);
1151 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1152 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1153 rte_flow_error_set(error, EINVAL,
1154 RTE_FLOW_ERROR_TYPE_ACTION,
1155 act, "Not supported action.");
1163 ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1164 const struct rte_flow_attr *attr,
1165 const struct rte_flow_item pattern[],
1166 const struct rte_flow_action actions[],
1167 struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1168 struct rte_flow_error *error)
1171 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1173 ret = cons_parse_l2_tn_filter(attr, pattern,
1174 actions, l2_tn_filter, error);
1176 if (hw->mac.type != ixgbe_mac_X550 &&
1177 hw->mac.type != ixgbe_mac_X550EM_x &&
1178 hw->mac.type != ixgbe_mac_X550EM_a) {
1179 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1180 rte_flow_error_set(error, EINVAL,
1181 RTE_FLOW_ERROR_TYPE_ITEM,
1182 NULL, "Not supported by L2 tunnel filter");
1186 if (l2_tn_filter->pool >= dev->data->nb_rx_queues)
1192 /* Parse to get the attr and action info of flow director rule. */
1194 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1195 const struct rte_flow_action actions[],
1196 struct ixgbe_fdir_rule *rule,
1197 struct rte_flow_error *error)
1199 const struct rte_flow_action *act;
1200 const struct rte_flow_action_queue *act_q;
1201 const struct rte_flow_action_mark *mark;
1204 /* must be input direction */
1205 if (!attr->ingress) {
1206 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1207 rte_flow_error_set(error, EINVAL,
1208 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1209 attr, "Only support ingress.");
1215 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1216 rte_flow_error_set(error, EINVAL,
1217 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1218 attr, "Not support egress.");
1223 if (attr->priority) {
1224 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1225 rte_flow_error_set(error, EINVAL,
1226 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1227 attr, "Not support priority.");
1231 /* check if the first not void action is QUEUE or DROP. */
1232 act = next_no_void_action(actions, NULL);
1233 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1234 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1235 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1236 rte_flow_error_set(error, EINVAL,
1237 RTE_FLOW_ERROR_TYPE_ACTION,
1238 act, "Not supported action.");
1242 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1243 act_q = (const struct rte_flow_action_queue *)act->conf;
1244 rule->queue = act_q->index;
1246 /* signature mode does not support drop action. */
1247 if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1248 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1249 rte_flow_error_set(error, EINVAL,
1250 RTE_FLOW_ERROR_TYPE_ACTION,
1251 act, "Not supported action.");
1254 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1257 /* check if the next not void item is MARK */
1258 act = next_no_void_action(actions, act);
1259 if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1260 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1261 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1262 rte_flow_error_set(error, EINVAL,
1263 RTE_FLOW_ERROR_TYPE_ACTION,
1264 act, "Not supported action.");
1270 if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1271 mark = (const struct rte_flow_action_mark *)act->conf;
1272 rule->soft_id = mark->id;
1273 act = next_no_void_action(actions, act);
1276 /* check if the next not void item is END */
1277 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1278 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1279 rte_flow_error_set(error, EINVAL,
1280 RTE_FLOW_ERROR_TYPE_ACTION,
1281 act, "Not supported action.");
1288 /* search next no void pattern and skip fuzzy */
1290 const struct rte_flow_item *next_no_fuzzy_pattern(
1291 const struct rte_flow_item pattern[],
1292 const struct rte_flow_item *cur)
1294 const struct rte_flow_item *next =
1295 next_no_void_pattern(pattern, cur);
1297 if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
1299 next = next_no_void_pattern(pattern, next);
1303 static inline uint8_t signature_match(const struct rte_flow_item pattern[])
1305 const struct rte_flow_item_fuzzy *spec, *last, *mask;
1306 const struct rte_flow_item *item;
1307 uint32_t sh, lh, mh;
1312 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1315 if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
1317 (const struct rte_flow_item_fuzzy *)item->spec;
1319 (const struct rte_flow_item_fuzzy *)item->last;
1321 (const struct rte_flow_item_fuzzy *)item->mask;
1350 * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1351 * And get the flow director filter info BTW.
1352 * UDP/TCP/SCTP PATTERN:
1353 * The first not void item can be ETH or IPV4 or IPV6
1354 * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1355 * The next not void item could be UDP or TCP or SCTP (optional)
1356 * The next not void item could be RAW (for flexbyte, optional)
1357 * The next not void item must be END.
1358 * A Fuzzy Match pattern can appear at any place before END.
1359 * Fuzzy Match is optional for IPV4 but is required for IPV6
1361 * The first not void item must be ETH.
1362 * The second not void item must be MAC VLAN.
1363 * The next not void item must be END.
1365 * The first not void action should be QUEUE or DROP.
1366 * The second not void optional action should be MARK,
1367 * mark_id is a uint32_t number.
1368 * The next not void action should be END.
1369 * UDP/TCP/SCTP pattern example:
1372 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
1373 * dst_addr 192.167.3.50 0xFFFFFFFF
1374 * UDP/TCP/SCTP src_port 80 0xFFFF
1375 * dst_port 80 0xFFFF
1376 * FLEX relative 0 0x1
1379 * offset 12 0xFFFFFFFF
1382 * pattern[0] 0x86 0xFF
1383 * pattern[1] 0xDD 0xFF
1385 * MAC VLAN pattern example:
1388 {0xAC, 0x7B, 0xA1, {0xFF, 0xFF, 0xFF,
1389 0x2C, 0x6D, 0x36} 0xFF, 0xFF, 0xFF}
1390 * MAC VLAN tci 0x2016 0xEFFF
1392 * Other members in mask and spec should set to 0x00.
1393 * Item->last should be NULL.
1396 ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
1397 const struct rte_flow_attr *attr,
1398 const struct rte_flow_item pattern[],
1399 const struct rte_flow_action actions[],
1400 struct ixgbe_fdir_rule *rule,
1401 struct rte_flow_error *error)
1403 const struct rte_flow_item *item;
1404 const struct rte_flow_item_eth *eth_spec;
1405 const struct rte_flow_item_eth *eth_mask;
1406 const struct rte_flow_item_ipv4 *ipv4_spec;
1407 const struct rte_flow_item_ipv4 *ipv4_mask;
1408 const struct rte_flow_item_ipv6 *ipv6_spec;
1409 const struct rte_flow_item_ipv6 *ipv6_mask;
1410 const struct rte_flow_item_tcp *tcp_spec;
1411 const struct rte_flow_item_tcp *tcp_mask;
1412 const struct rte_flow_item_udp *udp_spec;
1413 const struct rte_flow_item_udp *udp_mask;
1414 const struct rte_flow_item_sctp *sctp_spec;
1415 const struct rte_flow_item_sctp *sctp_mask;
1416 const struct rte_flow_item_vlan *vlan_spec;
1417 const struct rte_flow_item_vlan *vlan_mask;
1418 const struct rte_flow_item_raw *raw_mask;
1419 const struct rte_flow_item_raw *raw_spec;
1422 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1425 rte_flow_error_set(error, EINVAL,
1426 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1427 NULL, "NULL pattern.");
1432 rte_flow_error_set(error, EINVAL,
1433 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1434 NULL, "NULL action.");
1439 rte_flow_error_set(error, EINVAL,
1440 RTE_FLOW_ERROR_TYPE_ATTR,
1441 NULL, "NULL attribute.");
1446 * Some fields may not be provided. Set spec to 0 and mask to default
1447 * value. So, we need not do anything for the not provided fields later.
1449 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1450 memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1451 rule->mask.vlan_tci_mask = 0;
1452 rule->mask.flex_bytes_mask = 0;
1455 * The first not void item should be
1456 * MAC or IPv4 or TCP or UDP or SCTP.
1458 item = next_no_fuzzy_pattern(pattern, NULL);
1459 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1460 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1461 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1462 item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1463 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1464 item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1465 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1466 rte_flow_error_set(error, EINVAL,
1467 RTE_FLOW_ERROR_TYPE_ITEM,
1468 item, "Not supported by fdir filter");
1472 if (signature_match(pattern))
1473 rule->mode = RTE_FDIR_MODE_SIGNATURE;
1475 rule->mode = RTE_FDIR_MODE_PERFECT;
1477 /*Not supported last point for range*/
1479 rte_flow_error_set(error, EINVAL,
1480 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1481 item, "Not supported last point for range");
1485 /* Get the MAC info. */
1486 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1488 * Only support vlan and dst MAC address,
1489 * others should be masked.
1491 if (item->spec && !item->mask) {
1492 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1493 rte_flow_error_set(error, EINVAL,
1494 RTE_FLOW_ERROR_TYPE_ITEM,
1495 item, "Not supported by fdir filter");
1500 rule->b_spec = TRUE;
1501 eth_spec = (const struct rte_flow_item_eth *)item->spec;
1503 /* Get the dst MAC. */
1504 for (j = 0; j < ETHER_ADDR_LEN; j++) {
1505 rule->ixgbe_fdir.formatted.inner_mac[j] =
1506 eth_spec->dst.addr_bytes[j];
1513 rule->b_mask = TRUE;
1514 eth_mask = (const struct rte_flow_item_eth *)item->mask;
1516 /* Ether type should be masked. */
1517 if (eth_mask->type ||
1518 rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1519 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1520 rte_flow_error_set(error, EINVAL,
1521 RTE_FLOW_ERROR_TYPE_ITEM,
1522 item, "Not supported by fdir filter");
1526 /* If ethernet has meaning, it means MAC VLAN mode. */
1527 rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1530 * src MAC address must be masked,
1531 * and don't support dst MAC address mask.
1533 for (j = 0; j < ETHER_ADDR_LEN; j++) {
1534 if (eth_mask->src.addr_bytes[j] ||
1535 eth_mask->dst.addr_bytes[j] != 0xFF) {
1537 sizeof(struct ixgbe_fdir_rule));
1538 rte_flow_error_set(error, EINVAL,
1539 RTE_FLOW_ERROR_TYPE_ITEM,
1540 item, "Not supported by fdir filter");
1545 /* When no VLAN, considered as full mask. */
1546 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1548 /*** If both spec and mask are item,
1549 * it means don't care about ETH.
1554 * Check if the next not void item is vlan or ipv4.
1555 * IPv6 is not supported.
1557 item = next_no_fuzzy_pattern(pattern, item);
1558 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1559 if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1560 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1561 rte_flow_error_set(error, EINVAL,
1562 RTE_FLOW_ERROR_TYPE_ITEM,
1563 item, "Not supported by fdir filter");
1567 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
1568 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1569 rte_flow_error_set(error, EINVAL,
1570 RTE_FLOW_ERROR_TYPE_ITEM,
1571 item, "Not supported by fdir filter");
1577 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1578 if (!(item->spec && item->mask)) {
1579 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1580 rte_flow_error_set(error, EINVAL,
1581 RTE_FLOW_ERROR_TYPE_ITEM,
1582 item, "Not supported by fdir filter");
1586 /*Not supported last point for range*/
1588 rte_flow_error_set(error, EINVAL,
1589 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1590 item, "Not supported last point for range");
1594 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
1595 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
1597 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1599 rule->mask.vlan_tci_mask = vlan_mask->tci;
1600 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
1601 /* More than one tags are not supported. */
1603 /* Next not void item must be END */
1604 item = next_no_fuzzy_pattern(pattern, item);
1605 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1606 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1607 rte_flow_error_set(error, EINVAL,
1608 RTE_FLOW_ERROR_TYPE_ITEM,
1609 item, "Not supported by fdir filter");
1614 /* Get the IPV4 info. */
1615 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1617 * Set the flow type even if there's no content
1618 * as we must have a flow type.
1620 rule->ixgbe_fdir.formatted.flow_type =
1621 IXGBE_ATR_FLOW_TYPE_IPV4;
1622 /*Not supported last point for range*/
1624 rte_flow_error_set(error, EINVAL,
1625 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1626 item, "Not supported last point for range");
1630 * Only care about src & dst addresses,
1631 * others should be masked.
1634 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1635 rte_flow_error_set(error, EINVAL,
1636 RTE_FLOW_ERROR_TYPE_ITEM,
1637 item, "Not supported by fdir filter");
1640 rule->b_mask = TRUE;
1642 (const struct rte_flow_item_ipv4 *)item->mask;
1643 if (ipv4_mask->hdr.version_ihl ||
1644 ipv4_mask->hdr.type_of_service ||
1645 ipv4_mask->hdr.total_length ||
1646 ipv4_mask->hdr.packet_id ||
1647 ipv4_mask->hdr.fragment_offset ||
1648 ipv4_mask->hdr.time_to_live ||
1649 ipv4_mask->hdr.next_proto_id ||
1650 ipv4_mask->hdr.hdr_checksum) {
1651 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1652 rte_flow_error_set(error, EINVAL,
1653 RTE_FLOW_ERROR_TYPE_ITEM,
1654 item, "Not supported by fdir filter");
1657 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1658 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1661 rule->b_spec = TRUE;
1663 (const struct rte_flow_item_ipv4 *)item->spec;
1664 rule->ixgbe_fdir.formatted.dst_ip[0] =
1665 ipv4_spec->hdr.dst_addr;
1666 rule->ixgbe_fdir.formatted.src_ip[0] =
1667 ipv4_spec->hdr.src_addr;
1671 * Check if the next not void item is
1672 * TCP or UDP or SCTP or END.
1674 item = next_no_fuzzy_pattern(pattern, item);
1675 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1676 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1677 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1678 item->type != RTE_FLOW_ITEM_TYPE_END &&
1679 item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1680 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1681 rte_flow_error_set(error, EINVAL,
1682 RTE_FLOW_ERROR_TYPE_ITEM,
1683 item, "Not supported by fdir filter");
1688 /* Get the IPV6 info. */
1689 if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1691 * Set the flow type even if there's no content
1692 * as we must have a flow type.
1694 rule->ixgbe_fdir.formatted.flow_type =
1695 IXGBE_ATR_FLOW_TYPE_IPV6;
1698 * 1. must signature match
1699 * 2. not support last
1700 * 3. mask must not null
1702 if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
1705 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1706 rte_flow_error_set(error, EINVAL,
1707 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1708 item, "Not supported last point for range");
1712 rule->b_mask = TRUE;
1714 (const struct rte_flow_item_ipv6 *)item->mask;
1715 if (ipv6_mask->hdr.vtc_flow ||
1716 ipv6_mask->hdr.payload_len ||
1717 ipv6_mask->hdr.proto ||
1718 ipv6_mask->hdr.hop_limits) {
1719 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1720 rte_flow_error_set(error, EINVAL,
1721 RTE_FLOW_ERROR_TYPE_ITEM,
1722 item, "Not supported by fdir filter");
1726 /* check src addr mask */
1727 for (j = 0; j < 16; j++) {
1728 if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
1729 rule->mask.src_ipv6_mask |= 1 << j;
1730 } else if (ipv6_mask->hdr.src_addr[j] != 0) {
1731 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1732 rte_flow_error_set(error, EINVAL,
1733 RTE_FLOW_ERROR_TYPE_ITEM,
1734 item, "Not supported by fdir filter");
1739 /* check dst addr mask */
1740 for (j = 0; j < 16; j++) {
1741 if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
1742 rule->mask.dst_ipv6_mask |= 1 << j;
1743 } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
1744 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1745 rte_flow_error_set(error, EINVAL,
1746 RTE_FLOW_ERROR_TYPE_ITEM,
1747 item, "Not supported by fdir filter");
1753 rule->b_spec = TRUE;
1755 (const struct rte_flow_item_ipv6 *)item->spec;
1756 rte_memcpy(rule->ixgbe_fdir.formatted.src_ip,
1757 ipv6_spec->hdr.src_addr, 16);
1758 rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip,
1759 ipv6_spec->hdr.dst_addr, 16);
1763 * Check if the next not void item is
1764 * TCP or UDP or SCTP or END.
1766 item = next_no_fuzzy_pattern(pattern, item);
1767 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1768 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1769 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1770 item->type != RTE_FLOW_ITEM_TYPE_END &&
1771 item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1772 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1773 rte_flow_error_set(error, EINVAL,
1774 RTE_FLOW_ERROR_TYPE_ITEM,
1775 item, "Not supported by fdir filter");
1780 /* Get the TCP info. */
1781 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1783 * Set the flow type even if there's no content
1784 * as we must have a flow type.
1786 rule->ixgbe_fdir.formatted.flow_type |=
1787 IXGBE_ATR_L4TYPE_TCP;
1788 /*Not supported last point for range*/
1790 rte_flow_error_set(error, EINVAL,
1791 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1792 item, "Not supported last point for range");
1796 * Only care about src & dst ports,
1797 * others should be masked.
1800 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1801 rte_flow_error_set(error, EINVAL,
1802 RTE_FLOW_ERROR_TYPE_ITEM,
1803 item, "Not supported by fdir filter");
1806 rule->b_mask = TRUE;
1807 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1808 if (tcp_mask->hdr.sent_seq ||
1809 tcp_mask->hdr.recv_ack ||
1810 tcp_mask->hdr.data_off ||
1811 tcp_mask->hdr.tcp_flags ||
1812 tcp_mask->hdr.rx_win ||
1813 tcp_mask->hdr.cksum ||
1814 tcp_mask->hdr.tcp_urp) {
1815 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1816 rte_flow_error_set(error, EINVAL,
1817 RTE_FLOW_ERROR_TYPE_ITEM,
1818 item, "Not supported by fdir filter");
1821 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1822 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1825 rule->b_spec = TRUE;
1826 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1827 rule->ixgbe_fdir.formatted.src_port =
1828 tcp_spec->hdr.src_port;
1829 rule->ixgbe_fdir.formatted.dst_port =
1830 tcp_spec->hdr.dst_port;
1833 item = next_no_fuzzy_pattern(pattern, item);
1834 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1835 item->type != RTE_FLOW_ITEM_TYPE_END) {
1836 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1837 rte_flow_error_set(error, EINVAL,
1838 RTE_FLOW_ERROR_TYPE_ITEM,
1839 item, "Not supported by fdir filter");
1845 /* Get the UDP info */
1846 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1848 * Set the flow type even if there's no content
1849 * as we must have a flow type.
1851 rule->ixgbe_fdir.formatted.flow_type |=
1852 IXGBE_ATR_L4TYPE_UDP;
1853 /*Not supported last point for range*/
1855 rte_flow_error_set(error, EINVAL,
1856 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1857 item, "Not supported last point for range");
1861 * Only care about src & dst ports,
1862 * others should be masked.
1865 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1866 rte_flow_error_set(error, EINVAL,
1867 RTE_FLOW_ERROR_TYPE_ITEM,
1868 item, "Not supported by fdir filter");
1871 rule->b_mask = TRUE;
1872 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1873 if (udp_mask->hdr.dgram_len ||
1874 udp_mask->hdr.dgram_cksum) {
1875 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1876 rte_flow_error_set(error, EINVAL,
1877 RTE_FLOW_ERROR_TYPE_ITEM,
1878 item, "Not supported by fdir filter");
1881 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1882 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1885 rule->b_spec = TRUE;
1886 udp_spec = (const struct rte_flow_item_udp *)item->spec;
1887 rule->ixgbe_fdir.formatted.src_port =
1888 udp_spec->hdr.src_port;
1889 rule->ixgbe_fdir.formatted.dst_port =
1890 udp_spec->hdr.dst_port;
1893 item = next_no_fuzzy_pattern(pattern, item);
1894 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1895 item->type != RTE_FLOW_ITEM_TYPE_END) {
1896 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1897 rte_flow_error_set(error, EINVAL,
1898 RTE_FLOW_ERROR_TYPE_ITEM,
1899 item, "Not supported by fdir filter");
1905 /* Get the SCTP info */
1906 if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
1908 * Set the flow type even if there's no content
1909 * as we must have a flow type.
1911 rule->ixgbe_fdir.formatted.flow_type |=
1912 IXGBE_ATR_L4TYPE_SCTP;
1913 /*Not supported last point for range*/
1915 rte_flow_error_set(error, EINVAL,
1916 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1917 item, "Not supported last point for range");
1921 /* only x550 family only support sctp port */
1922 if (hw->mac.type == ixgbe_mac_X550 ||
1923 hw->mac.type == ixgbe_mac_X550EM_x ||
1924 hw->mac.type == ixgbe_mac_X550EM_a) {
1926 * Only care about src & dst ports,
1927 * others should be masked.
1930 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1931 rte_flow_error_set(error, EINVAL,
1932 RTE_FLOW_ERROR_TYPE_ITEM,
1933 item, "Not supported by fdir filter");
1936 rule->b_mask = TRUE;
1938 (const struct rte_flow_item_sctp *)item->mask;
1939 if (sctp_mask->hdr.tag ||
1940 sctp_mask->hdr.cksum) {
1941 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1942 rte_flow_error_set(error, EINVAL,
1943 RTE_FLOW_ERROR_TYPE_ITEM,
1944 item, "Not supported by fdir filter");
1947 rule->mask.src_port_mask = sctp_mask->hdr.src_port;
1948 rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
1951 rule->b_spec = TRUE;
1953 (const struct rte_flow_item_sctp *)item->spec;
1954 rule->ixgbe_fdir.formatted.src_port =
1955 sctp_spec->hdr.src_port;
1956 rule->ixgbe_fdir.formatted.dst_port =
1957 sctp_spec->hdr.dst_port;
1959 /* others even sctp port is not supported */
1962 (const struct rte_flow_item_sctp *)item->mask;
1964 (sctp_mask->hdr.src_port ||
1965 sctp_mask->hdr.dst_port ||
1966 sctp_mask->hdr.tag ||
1967 sctp_mask->hdr.cksum)) {
1968 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1969 rte_flow_error_set(error, EINVAL,
1970 RTE_FLOW_ERROR_TYPE_ITEM,
1971 item, "Not supported by fdir filter");
1976 item = next_no_fuzzy_pattern(pattern, item);
1977 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1978 item->type != RTE_FLOW_ITEM_TYPE_END) {
1979 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1980 rte_flow_error_set(error, EINVAL,
1981 RTE_FLOW_ERROR_TYPE_ITEM,
1982 item, "Not supported by fdir filter");
1987 /* Get the flex byte info */
1988 if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
1989 /* Not supported last point for range*/
1991 rte_flow_error_set(error, EINVAL,
1992 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1993 item, "Not supported last point for range");
1996 /* mask should not be null */
1997 if (!item->mask || !item->spec) {
1998 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1999 rte_flow_error_set(error, EINVAL,
2000 RTE_FLOW_ERROR_TYPE_ITEM,
2001 item, "Not supported by fdir filter");
2005 raw_mask = (const struct rte_flow_item_raw *)item->mask;
2008 if (raw_mask->relative != 0x1 ||
2009 raw_mask->search != 0x1 ||
2010 raw_mask->reserved != 0x0 ||
2011 (uint32_t)raw_mask->offset != 0xffffffff ||
2012 raw_mask->limit != 0xffff ||
2013 raw_mask->length != 0xffff) {
2014 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2015 rte_flow_error_set(error, EINVAL,
2016 RTE_FLOW_ERROR_TYPE_ITEM,
2017 item, "Not supported by fdir filter");
2021 raw_spec = (const struct rte_flow_item_raw *)item->spec;
2024 if (raw_spec->relative != 0 ||
2025 raw_spec->search != 0 ||
2026 raw_spec->reserved != 0 ||
2027 raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
2028 raw_spec->offset % 2 ||
2029 raw_spec->limit != 0 ||
2030 raw_spec->length != 2 ||
2031 /* pattern can't be 0xffff */
2032 (raw_spec->pattern[0] == 0xff &&
2033 raw_spec->pattern[1] == 0xff)) {
2034 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2035 rte_flow_error_set(error, EINVAL,
2036 RTE_FLOW_ERROR_TYPE_ITEM,
2037 item, "Not supported by fdir filter");
2041 /* check pattern mask */
2042 if (raw_mask->pattern[0] != 0xff ||
2043 raw_mask->pattern[1] != 0xff) {
2044 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2045 rte_flow_error_set(error, EINVAL,
2046 RTE_FLOW_ERROR_TYPE_ITEM,
2047 item, "Not supported by fdir filter");
2051 rule->mask.flex_bytes_mask = 0xffff;
2052 rule->ixgbe_fdir.formatted.flex_bytes =
2053 (((uint16_t)raw_spec->pattern[1]) << 8) |
2054 raw_spec->pattern[0];
2055 rule->flex_bytes_offset = raw_spec->offset;
2058 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2059 /* check if the next not void item is END */
2060 item = next_no_fuzzy_pattern(pattern, item);
2061 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2062 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2063 rte_flow_error_set(error, EINVAL,
2064 RTE_FLOW_ERROR_TYPE_ITEM,
2065 item, "Not supported by fdir filter");
2070 return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2073 #define NVGRE_PROTOCOL 0x6558
2076 * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
2077 * And get the flow director filter info BTW.
2079 * The first not void item must be ETH.
2080 * The second not void item must be IPV4/ IPV6.
2081 * The third not void item must be NVGRE.
2082 * The next not void item must be END.
2084 * The first not void item must be ETH.
2085 * The second not void item must be IPV4/ IPV6.
2086 * The third not void item must be NVGRE.
2087 * The next not void item must be END.
2089 * The first not void action should be QUEUE or DROP.
2090 * The second not void optional action should be MARK,
2091 * mark_id is a uint32_t number.
2092 * The next not void action should be END.
2093 * VxLAN pattern example:
2096 * IPV4/IPV6 NULL NULL
2098 * VxLAN vni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
2099 * MAC VLAN tci 0x2016 0xEFFF
2101 * NEGRV pattern example:
2104 * IPV4/IPV6 NULL NULL
2105 * NVGRE protocol 0x6558 0xFFFF
2106 * tni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
2107 * MAC VLAN tci 0x2016 0xEFFF
2109 * other members in mask and spec should set to 0x00.
2110 * item->last should be NULL.
2113 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
2114 const struct rte_flow_item pattern[],
2115 const struct rte_flow_action actions[],
2116 struct ixgbe_fdir_rule *rule,
2117 struct rte_flow_error *error)
2119 const struct rte_flow_item *item;
2120 const struct rte_flow_item_vxlan *vxlan_spec;
2121 const struct rte_flow_item_vxlan *vxlan_mask;
2122 const struct rte_flow_item_nvgre *nvgre_spec;
2123 const struct rte_flow_item_nvgre *nvgre_mask;
2124 const struct rte_flow_item_eth *eth_spec;
2125 const struct rte_flow_item_eth *eth_mask;
2126 const struct rte_flow_item_vlan *vlan_spec;
2127 const struct rte_flow_item_vlan *vlan_mask;
2131 rte_flow_error_set(error, EINVAL,
2132 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2133 NULL, "NULL pattern.");
2138 rte_flow_error_set(error, EINVAL,
2139 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2140 NULL, "NULL action.");
2145 rte_flow_error_set(error, EINVAL,
2146 RTE_FLOW_ERROR_TYPE_ATTR,
2147 NULL, "NULL attribute.");
2152 * Some fields may not be provided. Set spec to 0 and mask to default
2153 * value. So, we need not do anything for the not provided fields later.
2155 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2156 memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
2157 rule->mask.vlan_tci_mask = 0;
2160 * The first not void item should be
2161 * MAC or IPv4 or IPv6 or UDP or VxLAN.
2163 item = next_no_void_pattern(pattern, NULL);
2164 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
2165 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2166 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
2167 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2168 item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
2169 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2170 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2171 rte_flow_error_set(error, EINVAL,
2172 RTE_FLOW_ERROR_TYPE_ITEM,
2173 item, "Not supported by fdir filter");
2177 rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
2180 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
2181 /* Only used to describe the protocol stack. */
2182 if (item->spec || item->mask) {
2183 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2184 rte_flow_error_set(error, EINVAL,
2185 RTE_FLOW_ERROR_TYPE_ITEM,
2186 item, "Not supported by fdir filter");
2189 /* Not supported last point for range*/
2191 rte_flow_error_set(error, EINVAL,
2192 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2193 item, "Not supported last point for range");
2197 /* Check if the next not void item is IPv4 or IPv6. */
2198 item = next_no_void_pattern(pattern, item);
2199 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2200 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2201 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2202 rte_flow_error_set(error, EINVAL,
2203 RTE_FLOW_ERROR_TYPE_ITEM,
2204 item, "Not supported by fdir filter");
2210 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2211 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2212 /* Only used to describe the protocol stack. */
2213 if (item->spec || item->mask) {
2214 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2215 rte_flow_error_set(error, EINVAL,
2216 RTE_FLOW_ERROR_TYPE_ITEM,
2217 item, "Not supported by fdir filter");
2220 /*Not supported last point for range*/
2222 rte_flow_error_set(error, EINVAL,
2223 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2224 item, "Not supported last point for range");
2228 /* Check if the next not void item is UDP or NVGRE. */
2229 item = next_no_void_pattern(pattern, item);
2230 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2231 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2232 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2233 rte_flow_error_set(error, EINVAL,
2234 RTE_FLOW_ERROR_TYPE_ITEM,
2235 item, "Not supported by fdir filter");
2241 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2242 /* Only used to describe the protocol stack. */
2243 if (item->spec || item->mask) {
2244 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2245 rte_flow_error_set(error, EINVAL,
2246 RTE_FLOW_ERROR_TYPE_ITEM,
2247 item, "Not supported by fdir filter");
2250 /*Not supported last point for range*/
2252 rte_flow_error_set(error, EINVAL,
2253 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2254 item, "Not supported last point for range");
2258 /* Check if the next not void item is VxLAN. */
2259 item = next_no_void_pattern(pattern, item);
2260 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2261 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2262 rte_flow_error_set(error, EINVAL,
2263 RTE_FLOW_ERROR_TYPE_ITEM,
2264 item, "Not supported by fdir filter");
2269 /* Get the VxLAN info */
2270 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2271 rule->ixgbe_fdir.formatted.tunnel_type =
2272 RTE_FDIR_TUNNEL_TYPE_VXLAN;
2274 /* Only care about VNI, others should be masked. */
2276 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2277 rte_flow_error_set(error, EINVAL,
2278 RTE_FLOW_ERROR_TYPE_ITEM,
2279 item, "Not supported by fdir filter");
2282 /*Not supported last point for range*/
2284 rte_flow_error_set(error, EINVAL,
2285 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2286 item, "Not supported last point for range");
2289 rule->b_mask = TRUE;
2291 /* Tunnel type is always meaningful. */
2292 rule->mask.tunnel_type_mask = 1;
2295 (const struct rte_flow_item_vxlan *)item->mask;
2296 if (vxlan_mask->flags) {
2297 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2298 rte_flow_error_set(error, EINVAL,
2299 RTE_FLOW_ERROR_TYPE_ITEM,
2300 item, "Not supported by fdir filter");
2303 /* VNI must be totally masked or not. */
2304 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2305 vxlan_mask->vni[2]) &&
2306 ((vxlan_mask->vni[0] != 0xFF) ||
2307 (vxlan_mask->vni[1] != 0xFF) ||
2308 (vxlan_mask->vni[2] != 0xFF))) {
2309 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2310 rte_flow_error_set(error, EINVAL,
2311 RTE_FLOW_ERROR_TYPE_ITEM,
2312 item, "Not supported by fdir filter");
2316 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2317 RTE_DIM(vxlan_mask->vni));
2320 rule->b_spec = TRUE;
2321 vxlan_spec = (const struct rte_flow_item_vxlan *)
2323 rte_memcpy(((uint8_t *)
2324 &rule->ixgbe_fdir.formatted.tni_vni + 1),
2325 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2326 rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
2327 rule->ixgbe_fdir.formatted.tni_vni);
2331 /* Get the NVGRE info */
2332 if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2333 rule->ixgbe_fdir.formatted.tunnel_type =
2334 RTE_FDIR_TUNNEL_TYPE_NVGRE;
2337 * Only care about flags0, flags1, protocol and TNI,
2338 * others should be masked.
2341 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2342 rte_flow_error_set(error, EINVAL,
2343 RTE_FLOW_ERROR_TYPE_ITEM,
2344 item, "Not supported by fdir filter");
2347 /*Not supported last point for range*/
2349 rte_flow_error_set(error, EINVAL,
2350 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2351 item, "Not supported last point for range");
2354 rule->b_mask = TRUE;
2356 /* Tunnel type is always meaningful. */
2357 rule->mask.tunnel_type_mask = 1;
2360 (const struct rte_flow_item_nvgre *)item->mask;
2361 if (nvgre_mask->flow_id) {
2362 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2363 rte_flow_error_set(error, EINVAL,
2364 RTE_FLOW_ERROR_TYPE_ITEM,
2365 item, "Not supported by fdir filter");
2368 if (nvgre_mask->c_k_s_rsvd0_ver !=
2369 rte_cpu_to_be_16(0x3000) ||
2370 nvgre_mask->protocol != 0xFFFF) {
2371 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2372 rte_flow_error_set(error, EINVAL,
2373 RTE_FLOW_ERROR_TYPE_ITEM,
2374 item, "Not supported by fdir filter");
2377 /* TNI must be totally masked or not. */
2378 if (nvgre_mask->tni[0] &&
2379 ((nvgre_mask->tni[0] != 0xFF) ||
2380 (nvgre_mask->tni[1] != 0xFF) ||
2381 (nvgre_mask->tni[2] != 0xFF))) {
2382 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2383 rte_flow_error_set(error, EINVAL,
2384 RTE_FLOW_ERROR_TYPE_ITEM,
2385 item, "Not supported by fdir filter");
2388 /* tni is a 24-bits bit field */
2389 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2390 RTE_DIM(nvgre_mask->tni));
2391 rule->mask.tunnel_id_mask <<= 8;
2394 rule->b_spec = TRUE;
2396 (const struct rte_flow_item_nvgre *)item->spec;
2397 if (nvgre_spec->c_k_s_rsvd0_ver !=
2398 rte_cpu_to_be_16(0x2000) ||
2399 nvgre_spec->protocol !=
2400 rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2401 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2402 rte_flow_error_set(error, EINVAL,
2403 RTE_FLOW_ERROR_TYPE_ITEM,
2404 item, "Not supported by fdir filter");
2407 /* tni is a 24-bits bit field */
2408 rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2409 nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2410 rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2414 /* check if the next not void item is MAC */
2415 item = next_no_void_pattern(pattern, item);
2416 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2417 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2418 rte_flow_error_set(error, EINVAL,
2419 RTE_FLOW_ERROR_TYPE_ITEM,
2420 item, "Not supported by fdir filter");
2425 * Only support vlan and dst MAC address,
2426 * others should be masked.
2430 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2431 rte_flow_error_set(error, EINVAL,
2432 RTE_FLOW_ERROR_TYPE_ITEM,
2433 item, "Not supported by fdir filter");
2436 /*Not supported last point for range*/
2438 rte_flow_error_set(error, EINVAL,
2439 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2440 item, "Not supported last point for range");
2443 rule->b_mask = TRUE;
2444 eth_mask = (const struct rte_flow_item_eth *)item->mask;
2446 /* Ether type should be masked. */
2447 if (eth_mask->type) {
2448 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2449 rte_flow_error_set(error, EINVAL,
2450 RTE_FLOW_ERROR_TYPE_ITEM,
2451 item, "Not supported by fdir filter");
2455 /* src MAC address should be masked. */
2456 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2457 if (eth_mask->src.addr_bytes[j]) {
2459 sizeof(struct ixgbe_fdir_rule));
2460 rte_flow_error_set(error, EINVAL,
2461 RTE_FLOW_ERROR_TYPE_ITEM,
2462 item, "Not supported by fdir filter");
2466 rule->mask.mac_addr_byte_mask = 0;
2467 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2468 /* It's a per byte mask. */
2469 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2470 rule->mask.mac_addr_byte_mask |= 0x1 << j;
2471 } else if (eth_mask->dst.addr_bytes[j]) {
2472 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2473 rte_flow_error_set(error, EINVAL,
2474 RTE_FLOW_ERROR_TYPE_ITEM,
2475 item, "Not supported by fdir filter");
2480 /* When no vlan, considered as full mask. */
2481 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2484 rule->b_spec = TRUE;
2485 eth_spec = (const struct rte_flow_item_eth *)item->spec;
2487 /* Get the dst MAC. */
2488 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2489 rule->ixgbe_fdir.formatted.inner_mac[j] =
2490 eth_spec->dst.addr_bytes[j];
2495 * Check if the next not void item is vlan or ipv4.
2496 * IPv6 is not supported.
2498 item = next_no_void_pattern(pattern, item);
2499 if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2500 (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
2501 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2502 rte_flow_error_set(error, EINVAL,
2503 RTE_FLOW_ERROR_TYPE_ITEM,
2504 item, "Not supported by fdir filter");
2507 /*Not supported last point for range*/
2509 rte_flow_error_set(error, EINVAL,
2510 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2511 item, "Not supported last point for range");
2515 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2516 if (!(item->spec && item->mask)) {
2517 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2518 rte_flow_error_set(error, EINVAL,
2519 RTE_FLOW_ERROR_TYPE_ITEM,
2520 item, "Not supported by fdir filter");
2524 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
2525 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
2527 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2529 rule->mask.vlan_tci_mask = vlan_mask->tci;
2530 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
2531 /* More than one tags are not supported. */
2533 /* check if the next not void item is END */
2534 item = next_no_void_pattern(pattern, item);
2536 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2537 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2538 rte_flow_error_set(error, EINVAL,
2539 RTE_FLOW_ERROR_TYPE_ITEM,
2540 item, "Not supported by fdir filter");
2546 * If the tags is 0, it means don't care about the VLAN.
2550 return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2554 ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2555 const struct rte_flow_attr *attr,
2556 const struct rte_flow_item pattern[],
2557 const struct rte_flow_action actions[],
2558 struct ixgbe_fdir_rule *rule,
2559 struct rte_flow_error *error)
2562 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2563 enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2565 if (hw->mac.type != ixgbe_mac_82599EB &&
2566 hw->mac.type != ixgbe_mac_X540 &&
2567 hw->mac.type != ixgbe_mac_X550 &&
2568 hw->mac.type != ixgbe_mac_X550EM_x &&
2569 hw->mac.type != ixgbe_mac_X550EM_a)
2572 ret = ixgbe_parse_fdir_filter_normal(dev, attr, pattern,
2573 actions, rule, error);
2578 ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2579 actions, rule, error);
2586 if (hw->mac.type == ixgbe_mac_82599EB &&
2587 rule->fdirflags == IXGBE_FDIRCMD_DROP &&
2588 (rule->ixgbe_fdir.formatted.src_port != 0 ||
2589 rule->ixgbe_fdir.formatted.dst_port != 0))
2592 if (fdir_mode == RTE_FDIR_MODE_NONE ||
2593 fdir_mode != rule->mode)
2596 if (rule->queue >= dev->data->nb_rx_queues)
2603 ixgbe_filterlist_flush(void)
2605 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2606 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2607 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2608 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2609 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2610 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2612 while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2613 TAILQ_REMOVE(&filter_ntuple_list,
2616 rte_free(ntuple_filter_ptr);
2619 while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2620 TAILQ_REMOVE(&filter_ethertype_list,
2621 ethertype_filter_ptr,
2623 rte_free(ethertype_filter_ptr);
2626 while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2627 TAILQ_REMOVE(&filter_syn_list,
2630 rte_free(syn_filter_ptr);
2633 while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2634 TAILQ_REMOVE(&filter_l2_tunnel_list,
2637 rte_free(l2_tn_filter_ptr);
2640 while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2641 TAILQ_REMOVE(&filter_fdir_list,
2644 rte_free(fdir_rule_ptr);
2647 while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
2648 TAILQ_REMOVE(&ixgbe_flow_list,
2651 rte_free(ixgbe_flow_mem_ptr->flow);
2652 rte_free(ixgbe_flow_mem_ptr);
2657 * Create or destroy a flow rule.
2658 * Theorically one rule can match more than one filters.
2659 * We will let it use the filter which it hitt first.
2660 * So, the sequence matters.
2662 static struct rte_flow *
2663 ixgbe_flow_create(struct rte_eth_dev *dev,
2664 const struct rte_flow_attr *attr,
2665 const struct rte_flow_item pattern[],
2666 const struct rte_flow_action actions[],
2667 struct rte_flow_error *error)
2670 struct rte_eth_ntuple_filter ntuple_filter;
2671 struct rte_eth_ethertype_filter ethertype_filter;
2672 struct rte_eth_syn_filter syn_filter;
2673 struct ixgbe_fdir_rule fdir_rule;
2674 struct rte_eth_l2_tunnel_conf l2_tn_filter;
2675 struct ixgbe_hw_fdir_info *fdir_info =
2676 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2677 struct rte_flow *flow = NULL;
2678 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2679 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2680 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2681 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2682 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2683 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2685 flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
2687 PMD_DRV_LOG(ERR, "failed to allocate memory");
2688 return (struct rte_flow *)flow;
2690 ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
2691 sizeof(struct ixgbe_flow_mem), 0);
2692 if (!ixgbe_flow_mem_ptr) {
2693 PMD_DRV_LOG(ERR, "failed to allocate memory");
2697 ixgbe_flow_mem_ptr->flow = flow;
2698 TAILQ_INSERT_TAIL(&ixgbe_flow_list,
2699 ixgbe_flow_mem_ptr, entries);
2701 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2702 ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2703 actions, &ntuple_filter, error);
2705 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2707 ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
2708 sizeof(struct ixgbe_ntuple_filter_ele), 0);
2709 (void)rte_memcpy(&ntuple_filter_ptr->filter_info,
2711 sizeof(struct rte_eth_ntuple_filter));
2712 TAILQ_INSERT_TAIL(&filter_ntuple_list,
2713 ntuple_filter_ptr, entries);
2714 flow->rule = ntuple_filter_ptr;
2715 flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2721 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2722 ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2723 actions, ðertype_filter, error);
2725 ret = ixgbe_add_del_ethertype_filter(dev,
2726 ðertype_filter, TRUE);
2728 ethertype_filter_ptr = rte_zmalloc(
2729 "ixgbe_ethertype_filter",
2730 sizeof(struct ixgbe_ethertype_filter_ele), 0);
2731 (void)rte_memcpy(ðertype_filter_ptr->filter_info,
2733 sizeof(struct rte_eth_ethertype_filter));
2734 TAILQ_INSERT_TAIL(&filter_ethertype_list,
2735 ethertype_filter_ptr, entries);
2736 flow->rule = ethertype_filter_ptr;
2737 flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2743 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2744 ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2745 actions, &syn_filter, error);
2747 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
2749 syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
2750 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
2751 (void)rte_memcpy(&syn_filter_ptr->filter_info,
2753 sizeof(struct rte_eth_syn_filter));
2754 TAILQ_INSERT_TAIL(&filter_syn_list,
2757 flow->rule = syn_filter_ptr;
2758 flow->filter_type = RTE_ETH_FILTER_SYN;
2764 memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2765 ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2766 actions, &fdir_rule, error);
2768 /* A mask cannot be deleted. */
2769 if (fdir_rule.b_mask) {
2770 if (!fdir_info->mask_added) {
2771 /* It's the first time the mask is set. */
2772 rte_memcpy(&fdir_info->mask,
2774 sizeof(struct ixgbe_hw_fdir_mask));
2775 fdir_info->flex_bytes_offset =
2776 fdir_rule.flex_bytes_offset;
2778 if (fdir_rule.mask.flex_bytes_mask)
2779 ixgbe_fdir_set_flexbytes_offset(dev,
2780 fdir_rule.flex_bytes_offset);
2782 ret = ixgbe_fdir_set_input_mask(dev);
2786 fdir_info->mask_added = TRUE;
2789 * Only support one global mask,
2790 * all the masks should be the same.
2792 ret = memcmp(&fdir_info->mask,
2794 sizeof(struct ixgbe_hw_fdir_mask));
2798 if (fdir_info->flex_bytes_offset !=
2799 fdir_rule.flex_bytes_offset)
2804 if (fdir_rule.b_spec) {
2805 ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
2808 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
2809 sizeof(struct ixgbe_fdir_rule_ele), 0);
2810 (void)rte_memcpy(&fdir_rule_ptr->filter_info,
2812 sizeof(struct ixgbe_fdir_rule));
2813 TAILQ_INSERT_TAIL(&filter_fdir_list,
2814 fdir_rule_ptr, entries);
2815 flow->rule = fdir_rule_ptr;
2816 flow->filter_type = RTE_ETH_FILTER_FDIR;
2828 memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2829 ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2830 actions, &l2_tn_filter, error);
2832 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
2834 l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
2835 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
2836 (void)rte_memcpy(&l2_tn_filter_ptr->filter_info,
2838 sizeof(struct rte_eth_l2_tunnel_conf));
2839 TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
2840 l2_tn_filter_ptr, entries);
2841 flow->rule = l2_tn_filter_ptr;
2842 flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
2848 TAILQ_REMOVE(&ixgbe_flow_list,
2849 ixgbe_flow_mem_ptr, entries);
2850 rte_flow_error_set(error, -ret,
2851 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2852 "Failed to create flow.");
2853 rte_free(ixgbe_flow_mem_ptr);
2859 * Check if the flow rule is supported by ixgbe.
2860 * It only checkes the format. Don't guarantee the rule can be programmed into
2861 * the HW. Because there can be no enough room for the rule.
2864 ixgbe_flow_validate(struct rte_eth_dev *dev,
2865 const struct rte_flow_attr *attr,
2866 const struct rte_flow_item pattern[],
2867 const struct rte_flow_action actions[],
2868 struct rte_flow_error *error)
2870 struct rte_eth_ntuple_filter ntuple_filter;
2871 struct rte_eth_ethertype_filter ethertype_filter;
2872 struct rte_eth_syn_filter syn_filter;
2873 struct rte_eth_l2_tunnel_conf l2_tn_filter;
2874 struct ixgbe_fdir_rule fdir_rule;
2877 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2878 ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2879 actions, &ntuple_filter, error);
2883 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2884 ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2885 actions, ðertype_filter, error);
2889 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2890 ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2891 actions, &syn_filter, error);
2895 memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2896 ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2897 actions, &fdir_rule, error);
2901 memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2902 ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2903 actions, &l2_tn_filter, error);
2908 /* Destroy a flow rule on ixgbe. */
2910 ixgbe_flow_destroy(struct rte_eth_dev *dev,
2911 struct rte_flow *flow,
2912 struct rte_flow_error *error)
2915 struct rte_flow *pmd_flow = flow;
2916 enum rte_filter_type filter_type = pmd_flow->filter_type;
2917 struct rte_eth_ntuple_filter ntuple_filter;
2918 struct rte_eth_ethertype_filter ethertype_filter;
2919 struct rte_eth_syn_filter syn_filter;
2920 struct ixgbe_fdir_rule fdir_rule;
2921 struct rte_eth_l2_tunnel_conf l2_tn_filter;
2922 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2923 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2924 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2925 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2926 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2927 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2928 struct ixgbe_hw_fdir_info *fdir_info =
2929 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2931 switch (filter_type) {
2932 case RTE_ETH_FILTER_NTUPLE:
2933 ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
2935 (void)rte_memcpy(&ntuple_filter,
2936 &ntuple_filter_ptr->filter_info,
2937 sizeof(struct rte_eth_ntuple_filter));
2938 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
2940 TAILQ_REMOVE(&filter_ntuple_list,
2941 ntuple_filter_ptr, entries);
2942 rte_free(ntuple_filter_ptr);
2945 case RTE_ETH_FILTER_ETHERTYPE:
2946 ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
2948 (void)rte_memcpy(ðertype_filter,
2949 ðertype_filter_ptr->filter_info,
2950 sizeof(struct rte_eth_ethertype_filter));
2951 ret = ixgbe_add_del_ethertype_filter(dev,
2952 ðertype_filter, FALSE);
2954 TAILQ_REMOVE(&filter_ethertype_list,
2955 ethertype_filter_ptr, entries);
2956 rte_free(ethertype_filter_ptr);
2959 case RTE_ETH_FILTER_SYN:
2960 syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
2962 (void)rte_memcpy(&syn_filter,
2963 &syn_filter_ptr->filter_info,
2964 sizeof(struct rte_eth_syn_filter));
2965 ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
2967 TAILQ_REMOVE(&filter_syn_list,
2968 syn_filter_ptr, entries);
2969 rte_free(syn_filter_ptr);
2972 case RTE_ETH_FILTER_FDIR:
2973 fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
2974 (void)rte_memcpy(&fdir_rule,
2975 &fdir_rule_ptr->filter_info,
2976 sizeof(struct ixgbe_fdir_rule));
2977 ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
2979 TAILQ_REMOVE(&filter_fdir_list,
2980 fdir_rule_ptr, entries);
2981 rte_free(fdir_rule_ptr);
2982 if (TAILQ_EMPTY(&filter_fdir_list))
2983 fdir_info->mask_added = false;
2986 case RTE_ETH_FILTER_L2_TUNNEL:
2987 l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
2989 (void)rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
2990 sizeof(struct rte_eth_l2_tunnel_conf));
2991 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
2993 TAILQ_REMOVE(&filter_l2_tunnel_list,
2994 l2_tn_filter_ptr, entries);
2995 rte_free(l2_tn_filter_ptr);
2999 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
3006 rte_flow_error_set(error, EINVAL,
3007 RTE_FLOW_ERROR_TYPE_HANDLE,
3008 NULL, "Failed to destroy flow");
3012 TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
3013 if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
3014 TAILQ_REMOVE(&ixgbe_flow_list,
3015 ixgbe_flow_mem_ptr, entries);
3016 rte_free(ixgbe_flow_mem_ptr);
3024 /* Destroy all flow rules associated with a port on ixgbe. */
3026 ixgbe_flow_flush(struct rte_eth_dev *dev,
3027 struct rte_flow_error *error)
3031 ixgbe_clear_all_ntuple_filter(dev);
3032 ixgbe_clear_all_ethertype_filter(dev);
3033 ixgbe_clear_syn_filter(dev);
3035 ret = ixgbe_clear_all_fdir_filter(dev);
3037 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3038 NULL, "Failed to flush rule");
3042 ret = ixgbe_clear_all_l2_tn_filter(dev);
3044 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3045 NULL, "Failed to flush rule");
3049 ixgbe_filterlist_flush();
3054 const struct rte_flow_ops ixgbe_flow_ops = {
3055 .validate = ixgbe_flow_validate,
3056 .create = ixgbe_flow_create,
3057 .destroy = ixgbe_flow_destroy,
3058 .flush = ixgbe_flow_flush,