4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
47 #include <rte_interrupts.h>
49 #include <rte_debug.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_malloc.h>
60 #include <rte_random.h>
62 #include <rte_hash_crc.h>
64 #include <rte_flow_driver.h>
66 #include "ixgbe_logs.h"
67 #include "base/ixgbe_api.h"
68 #include "base/ixgbe_vf.h"
69 #include "base/ixgbe_common.h"
70 #include "ixgbe_ethdev.h"
71 #include "ixgbe_bypass.h"
72 #include "ixgbe_rxtx.h"
73 #include "base/ixgbe_type.h"
74 #include "base/ixgbe_phy.h"
75 #include "rte_pmd_ixgbe.h"
78 #define IXGBE_MIN_N_TUPLE_PRIO 1
79 #define IXGBE_MAX_N_TUPLE_PRIO 7
80 #define IXGBE_MAX_FLX_SOURCE_OFF 62
83 * Endless loop will never happen with below assumption
84 * 1. there is at least one no-void item(END)
85 * 2. cur is before END.
88 const struct rte_flow_item *next_no_void_pattern(
89 const struct rte_flow_item pattern[],
90 const struct rte_flow_item *cur)
92 const struct rte_flow_item *next =
93 cur ? cur + 1 : &pattern[0];
95 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
102 const struct rte_flow_action *next_no_void_action(
103 const struct rte_flow_action actions[],
104 const struct rte_flow_action *cur)
106 const struct rte_flow_action *next =
107 cur ? cur + 1 : &actions[0];
109 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
116 * Please aware there's an asumption for all the parsers.
117 * rte_flow_item is using big endian, rte_flow_attr and
118 * rte_flow_action are using CPU order.
119 * Because the pattern is used to describe the packets,
120 * normally the packets should use network order.
124 * Parse the rule to see if it is a n-tuple rule.
125 * And get the n-tuple filter info BTW.
127 * The first not void item can be ETH or IPV4.
128 * The second not void item must be IPV4 if the first one is ETH.
129 * The third not void item must be UDP or TCP.
130 * The next not void item must be END.
132 * The first not void action should be QUEUE.
133 * The next not void action should be END.
137 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
138 * dst_addr 192.167.3.50 0xFFFFFFFF
139 * next_proto_id 17 0xFF
140 * UDP/TCP/ src_port 80 0xFFFF
141 * SCTP dst_port 80 0xFFFF
143 * other members in mask and spec should set to 0x00.
144 * item->last should be NULL.
147 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
148 const struct rte_flow_item pattern[],
149 const struct rte_flow_action actions[],
150 struct rte_eth_ntuple_filter *filter,
151 struct rte_flow_error *error)
153 const struct rte_flow_item *item;
154 const struct rte_flow_action *act;
155 const struct rte_flow_item_ipv4 *ipv4_spec;
156 const struct rte_flow_item_ipv4 *ipv4_mask;
157 const struct rte_flow_item_tcp *tcp_spec;
158 const struct rte_flow_item_tcp *tcp_mask;
159 const struct rte_flow_item_udp *udp_spec;
160 const struct rte_flow_item_udp *udp_mask;
161 const struct rte_flow_item_sctp *sctp_spec;
162 const struct rte_flow_item_sctp *sctp_mask;
165 rte_flow_error_set(error,
166 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
167 NULL, "NULL pattern.");
172 rte_flow_error_set(error, EINVAL,
173 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
174 NULL, "NULL action.");
178 rte_flow_error_set(error, EINVAL,
179 RTE_FLOW_ERROR_TYPE_ATTR,
180 NULL, "NULL attribute.");
184 /* the first not void item can be MAC or IPv4 */
185 item = next_no_void_pattern(pattern, NULL);
187 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
188 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
189 rte_flow_error_set(error, EINVAL,
190 RTE_FLOW_ERROR_TYPE_ITEM,
191 item, "Not supported by ntuple filter");
195 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
196 /*Not supported last point for range*/
198 rte_flow_error_set(error,
200 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
201 item, "Not supported last point for range");
205 /* if the first item is MAC, the content should be NULL */
206 if (item->spec || item->mask) {
207 rte_flow_error_set(error, EINVAL,
208 RTE_FLOW_ERROR_TYPE_ITEM,
209 item, "Not supported by ntuple filter");
212 /* check if the next not void item is IPv4 */
213 item = next_no_void_pattern(pattern, item);
214 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
215 rte_flow_error_set(error,
216 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
217 item, "Not supported by ntuple filter");
222 /* get the IPv4 info */
223 if (!item->spec || !item->mask) {
224 rte_flow_error_set(error, EINVAL,
225 RTE_FLOW_ERROR_TYPE_ITEM,
226 item, "Invalid ntuple mask");
229 /*Not supported last point for range*/
231 rte_flow_error_set(error, EINVAL,
232 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
233 item, "Not supported last point for range");
238 ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
240 * Only support src & dst addresses, protocol,
241 * others should be masked.
243 if (ipv4_mask->hdr.version_ihl ||
244 ipv4_mask->hdr.type_of_service ||
245 ipv4_mask->hdr.total_length ||
246 ipv4_mask->hdr.packet_id ||
247 ipv4_mask->hdr.fragment_offset ||
248 ipv4_mask->hdr.time_to_live ||
249 ipv4_mask->hdr.hdr_checksum) {
250 rte_flow_error_set(error,
251 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
252 item, "Not supported by ntuple filter");
256 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
257 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
258 filter->proto_mask = ipv4_mask->hdr.next_proto_id;
260 ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
261 filter->dst_ip = ipv4_spec->hdr.dst_addr;
262 filter->src_ip = ipv4_spec->hdr.src_addr;
263 filter->proto = ipv4_spec->hdr.next_proto_id;
265 /* check if the next not void item is TCP or UDP */
266 item = next_no_void_pattern(pattern, item);
267 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
268 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
269 item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
270 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
271 rte_flow_error_set(error, EINVAL,
272 RTE_FLOW_ERROR_TYPE_ITEM,
273 item, "Not supported by ntuple filter");
277 /* get the TCP/UDP info */
278 if (!item->spec || !item->mask) {
279 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
280 rte_flow_error_set(error, EINVAL,
281 RTE_FLOW_ERROR_TYPE_ITEM,
282 item, "Invalid ntuple mask");
286 /*Not supported last point for range*/
288 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
289 rte_flow_error_set(error, EINVAL,
290 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
291 item, "Not supported last point for range");
296 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
297 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
300 * Only support src & dst ports, tcp flags,
301 * others should be masked.
303 if (tcp_mask->hdr.sent_seq ||
304 tcp_mask->hdr.recv_ack ||
305 tcp_mask->hdr.data_off ||
306 tcp_mask->hdr.rx_win ||
307 tcp_mask->hdr.cksum ||
308 tcp_mask->hdr.tcp_urp) {
310 sizeof(struct rte_eth_ntuple_filter));
311 rte_flow_error_set(error, EINVAL,
312 RTE_FLOW_ERROR_TYPE_ITEM,
313 item, "Not supported by ntuple filter");
317 filter->dst_port_mask = tcp_mask->hdr.dst_port;
318 filter->src_port_mask = tcp_mask->hdr.src_port;
319 if (tcp_mask->hdr.tcp_flags == 0xFF) {
320 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
321 } else if (!tcp_mask->hdr.tcp_flags) {
322 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
324 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
325 rte_flow_error_set(error, EINVAL,
326 RTE_FLOW_ERROR_TYPE_ITEM,
327 item, "Not supported by ntuple filter");
331 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
332 filter->dst_port = tcp_spec->hdr.dst_port;
333 filter->src_port = tcp_spec->hdr.src_port;
334 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
335 } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
336 udp_mask = (const struct rte_flow_item_udp *)item->mask;
339 * Only support src & dst ports,
340 * others should be masked.
342 if (udp_mask->hdr.dgram_len ||
343 udp_mask->hdr.dgram_cksum) {
345 sizeof(struct rte_eth_ntuple_filter));
346 rte_flow_error_set(error, EINVAL,
347 RTE_FLOW_ERROR_TYPE_ITEM,
348 item, "Not supported by ntuple filter");
352 filter->dst_port_mask = udp_mask->hdr.dst_port;
353 filter->src_port_mask = udp_mask->hdr.src_port;
355 udp_spec = (const struct rte_flow_item_udp *)item->spec;
356 filter->dst_port = udp_spec->hdr.dst_port;
357 filter->src_port = udp_spec->hdr.src_port;
359 sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
362 * Only support src & dst ports,
363 * others should be masked.
365 if (sctp_mask->hdr.tag ||
366 sctp_mask->hdr.cksum) {
368 sizeof(struct rte_eth_ntuple_filter));
369 rte_flow_error_set(error, EINVAL,
370 RTE_FLOW_ERROR_TYPE_ITEM,
371 item, "Not supported by ntuple filter");
375 filter->dst_port_mask = sctp_mask->hdr.dst_port;
376 filter->src_port_mask = sctp_mask->hdr.src_port;
378 sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
379 filter->dst_port = sctp_spec->hdr.dst_port;
380 filter->src_port = sctp_spec->hdr.src_port;
383 /* check if the next not void item is END */
384 item = next_no_void_pattern(pattern, item);
385 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
386 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
387 rte_flow_error_set(error, EINVAL,
388 RTE_FLOW_ERROR_TYPE_ITEM,
389 item, "Not supported by ntuple filter");
394 * n-tuple only supports forwarding,
395 * check if the first not void action is QUEUE.
397 act = next_no_void_action(actions, NULL);
398 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
399 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
400 rte_flow_error_set(error, EINVAL,
401 RTE_FLOW_ERROR_TYPE_ACTION,
402 item, "Not supported action.");
406 ((const struct rte_flow_action_queue *)act->conf)->index;
408 /* check if the next not void item is END */
409 act = next_no_void_action(actions, act);
410 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
411 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
412 rte_flow_error_set(error, EINVAL,
413 RTE_FLOW_ERROR_TYPE_ACTION,
414 act, "Not supported action.");
419 /* must be input direction */
420 if (!attr->ingress) {
421 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
422 rte_flow_error_set(error, EINVAL,
423 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
424 attr, "Only support ingress.");
430 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
431 rte_flow_error_set(error, EINVAL,
432 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
433 attr, "Not support egress.");
437 if (attr->priority > 0xFFFF) {
438 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
439 rte_flow_error_set(error, EINVAL,
440 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
441 attr, "Error priority.");
444 filter->priority = (uint16_t)attr->priority;
445 if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
446 attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
447 filter->priority = 1;
452 /* a specific function for ixgbe because the flags is specific */
454 ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
455 const struct rte_flow_attr *attr,
456 const struct rte_flow_item pattern[],
457 const struct rte_flow_action actions[],
458 struct rte_eth_ntuple_filter *filter,
459 struct rte_flow_error *error)
462 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
464 MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
466 ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
471 /* Ixgbe doesn't support tcp flags. */
472 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
473 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
474 rte_flow_error_set(error, EINVAL,
475 RTE_FLOW_ERROR_TYPE_ITEM,
476 NULL, "Not supported by ntuple filter");
480 /* Ixgbe doesn't support many priorities. */
481 if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
482 filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
483 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
484 rte_flow_error_set(error, EINVAL,
485 RTE_FLOW_ERROR_TYPE_ITEM,
486 NULL, "Priority not supported by ntuple filter");
490 if (filter->queue >= dev->data->nb_rx_queues)
493 /* fixed value for ixgbe */
494 filter->flags = RTE_5TUPLE_FLAGS;
499 * Parse the rule to see if it is a ethertype rule.
500 * And get the ethertype filter info BTW.
502 * The first not void item can be ETH.
503 * The next not void item must be END.
505 * The first not void action should be QUEUE.
506 * The next not void action should be END.
509 * ETH type 0x0807 0xFFFF
511 * other members in mask and spec should set to 0x00.
512 * item->last should be NULL.
515 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
516 const struct rte_flow_item *pattern,
517 const struct rte_flow_action *actions,
518 struct rte_eth_ethertype_filter *filter,
519 struct rte_flow_error *error)
521 const struct rte_flow_item *item;
522 const struct rte_flow_action *act;
523 const struct rte_flow_item_eth *eth_spec;
524 const struct rte_flow_item_eth *eth_mask;
525 const struct rte_flow_action_queue *act_q;
528 rte_flow_error_set(error, EINVAL,
529 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
530 NULL, "NULL pattern.");
535 rte_flow_error_set(error, EINVAL,
536 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
537 NULL, "NULL action.");
542 rte_flow_error_set(error, EINVAL,
543 RTE_FLOW_ERROR_TYPE_ATTR,
544 NULL, "NULL attribute.");
548 item = next_no_void_pattern(pattern, NULL);
549 /* The first non-void item should be MAC. */
550 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
551 rte_flow_error_set(error, EINVAL,
552 RTE_FLOW_ERROR_TYPE_ITEM,
553 item, "Not supported by ethertype filter");
557 /*Not supported last point for range*/
559 rte_flow_error_set(error, EINVAL,
560 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
561 item, "Not supported last point for range");
565 /* Get the MAC info. */
566 if (!item->spec || !item->mask) {
567 rte_flow_error_set(error, EINVAL,
568 RTE_FLOW_ERROR_TYPE_ITEM,
569 item, "Not supported by ethertype filter");
573 eth_spec = (const struct rte_flow_item_eth *)item->spec;
574 eth_mask = (const struct rte_flow_item_eth *)item->mask;
576 /* Mask bits of source MAC address must be full of 0.
577 * Mask bits of destination MAC address must be full
580 if (!is_zero_ether_addr(ð_mask->src) ||
581 (!is_zero_ether_addr(ð_mask->dst) &&
582 !is_broadcast_ether_addr(ð_mask->dst))) {
583 rte_flow_error_set(error, EINVAL,
584 RTE_FLOW_ERROR_TYPE_ITEM,
585 item, "Invalid ether address mask");
589 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
590 rte_flow_error_set(error, EINVAL,
591 RTE_FLOW_ERROR_TYPE_ITEM,
592 item, "Invalid ethertype mask");
596 /* If mask bits of destination MAC address
597 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
599 if (is_broadcast_ether_addr(ð_mask->dst)) {
600 filter->mac_addr = eth_spec->dst;
601 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
603 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
605 filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
607 /* Check if the next non-void item is END. */
608 item = next_no_void_pattern(pattern, item);
609 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
610 rte_flow_error_set(error, EINVAL,
611 RTE_FLOW_ERROR_TYPE_ITEM,
612 item, "Not supported by ethertype filter.");
618 act = next_no_void_action(actions, NULL);
619 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
620 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
621 rte_flow_error_set(error, EINVAL,
622 RTE_FLOW_ERROR_TYPE_ACTION,
623 act, "Not supported action.");
627 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
628 act_q = (const struct rte_flow_action_queue *)act->conf;
629 filter->queue = act_q->index;
631 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
634 /* Check if the next non-void item is END */
635 act = next_no_void_action(actions, act);
636 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
637 rte_flow_error_set(error, EINVAL,
638 RTE_FLOW_ERROR_TYPE_ACTION,
639 act, "Not supported action.");
644 /* Must be input direction */
645 if (!attr->ingress) {
646 rte_flow_error_set(error, EINVAL,
647 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
648 attr, "Only support ingress.");
654 rte_flow_error_set(error, EINVAL,
655 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
656 attr, "Not support egress.");
661 if (attr->priority) {
662 rte_flow_error_set(error, EINVAL,
663 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
664 attr, "Not support priority.");
670 rte_flow_error_set(error, EINVAL,
671 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
672 attr, "Not support group.");
680 ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
681 const struct rte_flow_attr *attr,
682 const struct rte_flow_item pattern[],
683 const struct rte_flow_action actions[],
684 struct rte_eth_ethertype_filter *filter,
685 struct rte_flow_error *error)
688 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
690 MAC_TYPE_FILTER_SUP(hw->mac.type);
692 ret = cons_parse_ethertype_filter(attr, pattern,
693 actions, filter, error);
698 /* Ixgbe doesn't support MAC address. */
699 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
700 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
701 rte_flow_error_set(error, EINVAL,
702 RTE_FLOW_ERROR_TYPE_ITEM,
703 NULL, "Not supported by ethertype filter");
707 if (filter->queue >= dev->data->nb_rx_queues) {
708 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
709 rte_flow_error_set(error, EINVAL,
710 RTE_FLOW_ERROR_TYPE_ITEM,
711 NULL, "queue index much too big");
715 if (filter->ether_type == ETHER_TYPE_IPv4 ||
716 filter->ether_type == ETHER_TYPE_IPv6) {
717 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
718 rte_flow_error_set(error, EINVAL,
719 RTE_FLOW_ERROR_TYPE_ITEM,
720 NULL, "IPv4/IPv6 not supported by ethertype filter");
724 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
725 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
726 rte_flow_error_set(error, EINVAL,
727 RTE_FLOW_ERROR_TYPE_ITEM,
728 NULL, "mac compare is unsupported");
732 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
733 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
734 rte_flow_error_set(error, EINVAL,
735 RTE_FLOW_ERROR_TYPE_ITEM,
736 NULL, "drop option is unsupported");
744 * Parse the rule to see if it is a TCP SYN rule.
745 * And get the TCP SYN filter info BTW.
747 * The first not void item must be ETH.
748 * The second not void item must be IPV4 or IPV6.
749 * The third not void item must be TCP.
750 * The next not void item must be END.
752 * The first not void action should be QUEUE.
753 * The next not void action should be END.
757 * IPV4/IPV6 NULL NULL
758 * TCP tcp_flags 0x02 0xFF
760 * other members in mask and spec should set to 0x00.
761 * item->last should be NULL.
764 cons_parse_syn_filter(const struct rte_flow_attr *attr,
765 const struct rte_flow_item pattern[],
766 const struct rte_flow_action actions[],
767 struct rte_eth_syn_filter *filter,
768 struct rte_flow_error *error)
770 const struct rte_flow_item *item;
771 const struct rte_flow_action *act;
772 const struct rte_flow_item_tcp *tcp_spec;
773 const struct rte_flow_item_tcp *tcp_mask;
774 const struct rte_flow_action_queue *act_q;
777 rte_flow_error_set(error, EINVAL,
778 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
779 NULL, "NULL pattern.");
784 rte_flow_error_set(error, EINVAL,
785 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
786 NULL, "NULL action.");
791 rte_flow_error_set(error, EINVAL,
792 RTE_FLOW_ERROR_TYPE_ATTR,
793 NULL, "NULL attribute.");
798 /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
799 item = next_no_void_pattern(pattern, NULL);
800 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
801 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
802 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
803 item->type != RTE_FLOW_ITEM_TYPE_TCP) {
804 rte_flow_error_set(error, EINVAL,
805 RTE_FLOW_ERROR_TYPE_ITEM,
806 item, "Not supported by syn filter");
809 /*Not supported last point for range*/
811 rte_flow_error_set(error, EINVAL,
812 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
813 item, "Not supported last point for range");
818 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
819 /* if the item is MAC, the content should be NULL */
820 if (item->spec || item->mask) {
821 rte_flow_error_set(error, EINVAL,
822 RTE_FLOW_ERROR_TYPE_ITEM,
823 item, "Invalid SYN address mask");
827 /* check if the next not void item is IPv4 or IPv6 */
828 item = next_no_void_pattern(pattern, item);
829 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
830 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
831 rte_flow_error_set(error, EINVAL,
832 RTE_FLOW_ERROR_TYPE_ITEM,
833 item, "Not supported by syn filter");
839 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
840 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
841 /* if the item is IP, the content should be NULL */
842 if (item->spec || item->mask) {
843 rte_flow_error_set(error, EINVAL,
844 RTE_FLOW_ERROR_TYPE_ITEM,
845 item, "Invalid SYN mask");
849 /* check if the next not void item is TCP */
850 item = next_no_void_pattern(pattern, item);
851 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
852 rte_flow_error_set(error, EINVAL,
853 RTE_FLOW_ERROR_TYPE_ITEM,
854 item, "Not supported by syn filter");
859 /* Get the TCP info. Only support SYN. */
860 if (!item->spec || !item->mask) {
861 rte_flow_error_set(error, EINVAL,
862 RTE_FLOW_ERROR_TYPE_ITEM,
863 item, "Invalid SYN mask");
866 /*Not supported last point for range*/
868 rte_flow_error_set(error, EINVAL,
869 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
870 item, "Not supported last point for range");
874 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
875 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
876 if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
877 tcp_mask->hdr.src_port ||
878 tcp_mask->hdr.dst_port ||
879 tcp_mask->hdr.sent_seq ||
880 tcp_mask->hdr.recv_ack ||
881 tcp_mask->hdr.data_off ||
882 tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
883 tcp_mask->hdr.rx_win ||
884 tcp_mask->hdr.cksum ||
885 tcp_mask->hdr.tcp_urp) {
886 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
887 rte_flow_error_set(error, EINVAL,
888 RTE_FLOW_ERROR_TYPE_ITEM,
889 item, "Not supported by syn filter");
893 /* check if the next not void item is END */
894 item = next_no_void_pattern(pattern, item);
895 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
896 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
897 rte_flow_error_set(error, EINVAL,
898 RTE_FLOW_ERROR_TYPE_ITEM,
899 item, "Not supported by syn filter");
903 /* check if the first not void action is QUEUE. */
904 act = next_no_void_action(actions, NULL);
905 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
906 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
907 rte_flow_error_set(error, EINVAL,
908 RTE_FLOW_ERROR_TYPE_ACTION,
909 act, "Not supported action.");
913 act_q = (const struct rte_flow_action_queue *)act->conf;
914 filter->queue = act_q->index;
915 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
916 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
917 rte_flow_error_set(error, EINVAL,
918 RTE_FLOW_ERROR_TYPE_ACTION,
919 act, "Not supported action.");
923 /* check if the next not void item is END */
924 act = next_no_void_action(actions, act);
925 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
926 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
927 rte_flow_error_set(error, EINVAL,
928 RTE_FLOW_ERROR_TYPE_ACTION,
929 act, "Not supported action.");
934 /* must be input direction */
935 if (!attr->ingress) {
936 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
937 rte_flow_error_set(error, EINVAL,
938 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
939 attr, "Only support ingress.");
945 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
946 rte_flow_error_set(error, EINVAL,
947 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
948 attr, "Not support egress.");
952 /* Support 2 priorities, the lowest or highest. */
953 if (!attr->priority) {
955 } else if (attr->priority == (uint32_t)~0U) {
958 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
959 rte_flow_error_set(error, EINVAL,
960 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
961 attr, "Not support priority.");
969 ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
970 const struct rte_flow_attr *attr,
971 const struct rte_flow_item pattern[],
972 const struct rte_flow_action actions[],
973 struct rte_eth_syn_filter *filter,
974 struct rte_flow_error *error)
977 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
979 MAC_TYPE_FILTER_SUP(hw->mac.type);
981 ret = cons_parse_syn_filter(attr, pattern,
982 actions, filter, error);
984 if (filter->queue >= dev->data->nb_rx_queues)
994 * Parse the rule to see if it is a L2 tunnel rule.
995 * And get the L2 tunnel filter info BTW.
996 * Only support E-tag now.
998 * The first not void item can be E_TAG.
999 * The next not void item must be END.
1001 * The first not void action should be QUEUE.
1002 * The next not void action should be END.
1006 e_cid_base 0x309 0xFFF
1008 * other members in mask and spec should set to 0x00.
1009 * item->last should be NULL.
1012 cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
1013 const struct rte_flow_item pattern[],
1014 const struct rte_flow_action actions[],
1015 struct rte_eth_l2_tunnel_conf *filter,
1016 struct rte_flow_error *error)
1018 const struct rte_flow_item *item;
1019 const struct rte_flow_item_e_tag *e_tag_spec;
1020 const struct rte_flow_item_e_tag *e_tag_mask;
1021 const struct rte_flow_action *act;
1022 const struct rte_flow_action_queue *act_q;
1025 rte_flow_error_set(error, EINVAL,
1026 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1027 NULL, "NULL pattern.");
1032 rte_flow_error_set(error, EINVAL,
1033 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1034 NULL, "NULL action.");
1039 rte_flow_error_set(error, EINVAL,
1040 RTE_FLOW_ERROR_TYPE_ATTR,
1041 NULL, "NULL attribute.");
1045 /* The first not void item should be e-tag. */
1046 item = next_no_void_pattern(pattern, NULL);
1047 if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1048 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1049 rte_flow_error_set(error, EINVAL,
1050 RTE_FLOW_ERROR_TYPE_ITEM,
1051 item, "Not supported by L2 tunnel filter");
1055 if (!item->spec || !item->mask) {
1056 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1057 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1058 item, "Not supported by L2 tunnel filter");
1062 /*Not supported last point for range*/
1064 rte_flow_error_set(error, EINVAL,
1065 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1066 item, "Not supported last point for range");
1070 e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
1071 e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
1073 /* Only care about GRP and E cid base. */
1074 if (e_tag_mask->epcp_edei_in_ecid_b ||
1075 e_tag_mask->in_ecid_e ||
1076 e_tag_mask->ecid_e ||
1077 e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1078 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1079 rte_flow_error_set(error, EINVAL,
1080 RTE_FLOW_ERROR_TYPE_ITEM,
1081 item, "Not supported by L2 tunnel filter");
1085 filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1087 * grp and e_cid_base are bit fields and only use 14 bits.
1088 * e-tag id is taken as little endian by HW.
1090 filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1092 /* check if the next not void item is END */
1093 item = next_no_void_pattern(pattern, item);
1094 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1095 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1096 rte_flow_error_set(error, EINVAL,
1097 RTE_FLOW_ERROR_TYPE_ITEM,
1098 item, "Not supported by L2 tunnel filter");
1103 /* must be input direction */
1104 if (!attr->ingress) {
1105 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1106 rte_flow_error_set(error, EINVAL,
1107 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1108 attr, "Only support ingress.");
1114 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1115 rte_flow_error_set(error, EINVAL,
1116 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1117 attr, "Not support egress.");
1122 if (attr->priority) {
1123 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1124 rte_flow_error_set(error, EINVAL,
1125 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1126 attr, "Not support priority.");
1130 /* check if the first not void action is QUEUE. */
1131 act = next_no_void_action(actions, NULL);
1132 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1133 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1134 rte_flow_error_set(error, EINVAL,
1135 RTE_FLOW_ERROR_TYPE_ACTION,
1136 act, "Not supported action.");
1140 act_q = (const struct rte_flow_action_queue *)act->conf;
1141 filter->pool = act_q->index;
1143 /* check if the next not void item is END */
1144 act = next_no_void_action(actions, act);
1145 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1146 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1147 rte_flow_error_set(error, EINVAL,
1148 RTE_FLOW_ERROR_TYPE_ACTION,
1149 act, "Not supported action.");
1157 ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1158 const struct rte_flow_attr *attr,
1159 const struct rte_flow_item pattern[],
1160 const struct rte_flow_action actions[],
1161 struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1162 struct rte_flow_error *error)
1165 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1167 ret = cons_parse_l2_tn_filter(attr, pattern,
1168 actions, l2_tn_filter, error);
1170 if (hw->mac.type != ixgbe_mac_X550 &&
1171 hw->mac.type != ixgbe_mac_X550EM_x &&
1172 hw->mac.type != ixgbe_mac_X550EM_a) {
1173 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1174 rte_flow_error_set(error, EINVAL,
1175 RTE_FLOW_ERROR_TYPE_ITEM,
1176 NULL, "Not supported by L2 tunnel filter");
1180 if (l2_tn_filter->pool >= dev->data->nb_rx_queues)
1186 /* Parse to get the attr and action info of flow director rule. */
1188 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1189 const struct rte_flow_action actions[],
1190 struct ixgbe_fdir_rule *rule,
1191 struct rte_flow_error *error)
1193 const struct rte_flow_action *act;
1194 const struct rte_flow_action_queue *act_q;
1195 const struct rte_flow_action_mark *mark;
1198 /* must be input direction */
1199 if (!attr->ingress) {
1200 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1201 rte_flow_error_set(error, EINVAL,
1202 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1203 attr, "Only support ingress.");
1209 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1210 rte_flow_error_set(error, EINVAL,
1211 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1212 attr, "Not support egress.");
1217 if (attr->priority) {
1218 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1219 rte_flow_error_set(error, EINVAL,
1220 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1221 attr, "Not support priority.");
1225 /* check if the first not void action is QUEUE or DROP. */
1226 act = next_no_void_action(actions, NULL);
1227 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1228 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1229 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1230 rte_flow_error_set(error, EINVAL,
1231 RTE_FLOW_ERROR_TYPE_ACTION,
1232 act, "Not supported action.");
1236 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1237 act_q = (const struct rte_flow_action_queue *)act->conf;
1238 rule->queue = act_q->index;
1240 /* signature mode does not support drop action. */
1241 if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1242 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1243 rte_flow_error_set(error, EINVAL,
1244 RTE_FLOW_ERROR_TYPE_ACTION,
1245 act, "Not supported action.");
1248 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1251 /* check if the next not void item is MARK */
1252 act = next_no_void_action(actions, act);
1253 if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1254 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1255 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1256 rte_flow_error_set(error, EINVAL,
1257 RTE_FLOW_ERROR_TYPE_ACTION,
1258 act, "Not supported action.");
1264 if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1265 mark = (const struct rte_flow_action_mark *)act->conf;
1266 rule->soft_id = mark->id;
1267 act = next_no_void_action(actions, act);
1270 /* check if the next not void item is END */
1271 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1272 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1273 rte_flow_error_set(error, EINVAL,
1274 RTE_FLOW_ERROR_TYPE_ACTION,
1275 act, "Not supported action.");
1282 /* search next no void pattern and skip fuzzy */
1284 const struct rte_flow_item *next_no_fuzzy_pattern(
1285 const struct rte_flow_item pattern[],
1286 const struct rte_flow_item *cur)
1288 const struct rte_flow_item *next =
1289 next_no_void_pattern(pattern, cur);
1291 if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
1293 next = next_no_void_pattern(pattern, next);
1297 static inline uint8_t signature_match(const struct rte_flow_item pattern[])
1299 const struct rte_flow_item_fuzzy *spec, *last, *mask;
1300 const struct rte_flow_item *item;
1301 uint32_t sh, lh, mh;
1306 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1309 if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
1311 (const struct rte_flow_item_fuzzy *)item->spec;
1313 (const struct rte_flow_item_fuzzy *)item->last;
1315 (const struct rte_flow_item_fuzzy *)item->mask;
1344 * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1345 * And get the flow director filter info BTW.
1346 * UDP/TCP/SCTP PATTERN:
1347 * The first not void item can be ETH or IPV4 or IPV6
1348 * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1349 * The next not void item could be UDP or TCP or SCTP (optional)
1350 * The next not void item could be RAW (for flexbyte, optional)
1351 * The next not void item must be END.
1352 * A Fuzzy Match pattern can appear at any place before END.
1353 * Fuzzy Match is optional for IPV4 but is required for IPV6
1355 * The first not void item must be ETH.
1356 * The second not void item must be MAC VLAN.
1357 * The next not void item must be END.
1359 * The first not void action should be QUEUE or DROP.
1360 * The second not void optional action should be MARK,
1361 * mark_id is a uint32_t number.
1362 * The next not void action should be END.
1363 * UDP/TCP/SCTP pattern example:
1366 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
1367 * dst_addr 192.167.3.50 0xFFFFFFFF
1368 * UDP/TCP/SCTP src_port 80 0xFFFF
1369 * dst_port 80 0xFFFF
1370 * FLEX relative 0 0x1
1373 * offset 12 0xFFFFFFFF
1376 * pattern[0] 0x86 0xFF
1377 * pattern[1] 0xDD 0xFF
1379 * MAC VLAN pattern example:
1382 {0xAC, 0x7B, 0xA1, {0xFF, 0xFF, 0xFF,
1383 0x2C, 0x6D, 0x36} 0xFF, 0xFF, 0xFF}
1384 * MAC VLAN tci 0x2016 0xEFFF
1386 * Other members in mask and spec should set to 0x00.
1387 * Item->last should be NULL.
1390 ixgbe_parse_fdir_filter_normal(const struct rte_flow_attr *attr,
1391 const struct rte_flow_item pattern[],
1392 const struct rte_flow_action actions[],
1393 struct ixgbe_fdir_rule *rule,
1394 struct rte_flow_error *error)
1396 const struct rte_flow_item *item;
1397 const struct rte_flow_item_eth *eth_spec;
1398 const struct rte_flow_item_eth *eth_mask;
1399 const struct rte_flow_item_ipv4 *ipv4_spec;
1400 const struct rte_flow_item_ipv4 *ipv4_mask;
1401 const struct rte_flow_item_ipv6 *ipv6_spec;
1402 const struct rte_flow_item_ipv6 *ipv6_mask;
1403 const struct rte_flow_item_tcp *tcp_spec;
1404 const struct rte_flow_item_tcp *tcp_mask;
1405 const struct rte_flow_item_udp *udp_spec;
1406 const struct rte_flow_item_udp *udp_mask;
1407 const struct rte_flow_item_sctp *sctp_spec;
1408 const struct rte_flow_item_sctp *sctp_mask;
1409 const struct rte_flow_item_vlan *vlan_spec;
1410 const struct rte_flow_item_vlan *vlan_mask;
1411 const struct rte_flow_item_raw *raw_mask;
1412 const struct rte_flow_item_raw *raw_spec;
1417 rte_flow_error_set(error, EINVAL,
1418 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1419 NULL, "NULL pattern.");
1424 rte_flow_error_set(error, EINVAL,
1425 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1426 NULL, "NULL action.");
1431 rte_flow_error_set(error, EINVAL,
1432 RTE_FLOW_ERROR_TYPE_ATTR,
1433 NULL, "NULL attribute.");
1438 * Some fields may not be provided. Set spec to 0 and mask to default
1439 * value. So, we need not do anything for the not provided fields later.
1441 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1442 memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1443 rule->mask.vlan_tci_mask = 0;
1444 rule->mask.flex_bytes_mask = 0;
1447 * The first not void item should be
1448 * MAC or IPv4 or TCP or UDP or SCTP.
1450 item = next_no_fuzzy_pattern(pattern, NULL);
1451 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1452 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1453 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1454 item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1455 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1456 item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1457 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1458 rte_flow_error_set(error, EINVAL,
1459 RTE_FLOW_ERROR_TYPE_ITEM,
1460 item, "Not supported by fdir filter");
1464 if (signature_match(pattern))
1465 rule->mode = RTE_FDIR_MODE_SIGNATURE;
1467 rule->mode = RTE_FDIR_MODE_PERFECT;
1469 /*Not supported last point for range*/
1471 rte_flow_error_set(error, EINVAL,
1472 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1473 item, "Not supported last point for range");
1477 /* Get the MAC info. */
1478 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1480 * Only support vlan and dst MAC address,
1481 * others should be masked.
1483 if (item->spec && !item->mask) {
1484 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1485 rte_flow_error_set(error, EINVAL,
1486 RTE_FLOW_ERROR_TYPE_ITEM,
1487 item, "Not supported by fdir filter");
1492 rule->b_spec = TRUE;
1493 eth_spec = (const struct rte_flow_item_eth *)item->spec;
1495 /* Get the dst MAC. */
1496 for (j = 0; j < ETHER_ADDR_LEN; j++) {
1497 rule->ixgbe_fdir.formatted.inner_mac[j] =
1498 eth_spec->dst.addr_bytes[j];
1505 rule->b_mask = TRUE;
1506 eth_mask = (const struct rte_flow_item_eth *)item->mask;
1508 /* Ether type should be masked. */
1509 if (eth_mask->type ||
1510 rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1511 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1512 rte_flow_error_set(error, EINVAL,
1513 RTE_FLOW_ERROR_TYPE_ITEM,
1514 item, "Not supported by fdir filter");
1518 /* If ethernet has meaning, it means MAC VLAN mode. */
1519 rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1522 * src MAC address must be masked,
1523 * and don't support dst MAC address mask.
1525 for (j = 0; j < ETHER_ADDR_LEN; j++) {
1526 if (eth_mask->src.addr_bytes[j] ||
1527 eth_mask->dst.addr_bytes[j] != 0xFF) {
1529 sizeof(struct ixgbe_fdir_rule));
1530 rte_flow_error_set(error, EINVAL,
1531 RTE_FLOW_ERROR_TYPE_ITEM,
1532 item, "Not supported by fdir filter");
1537 /* When no VLAN, considered as full mask. */
1538 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1540 /*** If both spec and mask are item,
1541 * it means don't care about ETH.
1546 * Check if the next not void item is vlan or ipv4.
1547 * IPv6 is not supported.
1549 item = next_no_fuzzy_pattern(pattern, item);
1550 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1551 if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1552 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1553 rte_flow_error_set(error, EINVAL,
1554 RTE_FLOW_ERROR_TYPE_ITEM,
1555 item, "Not supported by fdir filter");
1559 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
1560 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1561 rte_flow_error_set(error, EINVAL,
1562 RTE_FLOW_ERROR_TYPE_ITEM,
1563 item, "Not supported by fdir filter");
1569 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1570 if (!(item->spec && item->mask)) {
1571 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1572 rte_flow_error_set(error, EINVAL,
1573 RTE_FLOW_ERROR_TYPE_ITEM,
1574 item, "Not supported by fdir filter");
1578 /*Not supported last point for range*/
1580 rte_flow_error_set(error, EINVAL,
1581 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1582 item, "Not supported last point for range");
1586 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
1587 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
1589 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1591 rule->mask.vlan_tci_mask = vlan_mask->tci;
1592 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
1593 /* More than one tags are not supported. */
1595 /* Next not void item must be END */
1596 item = next_no_fuzzy_pattern(pattern, item);
1597 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1598 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1599 rte_flow_error_set(error, EINVAL,
1600 RTE_FLOW_ERROR_TYPE_ITEM,
1601 item, "Not supported by fdir filter");
1606 /* Get the IPV4 info. */
1607 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1609 * Set the flow type even if there's no content
1610 * as we must have a flow type.
1612 rule->ixgbe_fdir.formatted.flow_type =
1613 IXGBE_ATR_FLOW_TYPE_IPV4;
1614 /*Not supported last point for range*/
1616 rte_flow_error_set(error, EINVAL,
1617 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1618 item, "Not supported last point for range");
1622 * Only care about src & dst addresses,
1623 * others should be masked.
1626 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1627 rte_flow_error_set(error, EINVAL,
1628 RTE_FLOW_ERROR_TYPE_ITEM,
1629 item, "Not supported by fdir filter");
1632 rule->b_mask = TRUE;
1634 (const struct rte_flow_item_ipv4 *)item->mask;
1635 if (ipv4_mask->hdr.version_ihl ||
1636 ipv4_mask->hdr.type_of_service ||
1637 ipv4_mask->hdr.total_length ||
1638 ipv4_mask->hdr.packet_id ||
1639 ipv4_mask->hdr.fragment_offset ||
1640 ipv4_mask->hdr.time_to_live ||
1641 ipv4_mask->hdr.next_proto_id ||
1642 ipv4_mask->hdr.hdr_checksum) {
1643 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1644 rte_flow_error_set(error, EINVAL,
1645 RTE_FLOW_ERROR_TYPE_ITEM,
1646 item, "Not supported by fdir filter");
1649 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1650 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1653 rule->b_spec = TRUE;
1655 (const struct rte_flow_item_ipv4 *)item->spec;
1656 rule->ixgbe_fdir.formatted.dst_ip[0] =
1657 ipv4_spec->hdr.dst_addr;
1658 rule->ixgbe_fdir.formatted.src_ip[0] =
1659 ipv4_spec->hdr.src_addr;
1663 * Check if the next not void item is
1664 * TCP or UDP or SCTP or END.
1666 item = next_no_fuzzy_pattern(pattern, item);
1667 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1668 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1669 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1670 item->type != RTE_FLOW_ITEM_TYPE_END &&
1671 item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1672 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1673 rte_flow_error_set(error, EINVAL,
1674 RTE_FLOW_ERROR_TYPE_ITEM,
1675 item, "Not supported by fdir filter");
1680 /* Get the IPV6 info. */
1681 if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1683 * Set the flow type even if there's no content
1684 * as we must have a flow type.
1686 rule->ixgbe_fdir.formatted.flow_type =
1687 IXGBE_ATR_FLOW_TYPE_IPV6;
1690 * 1. must signature match
1691 * 2. not support last
1692 * 3. mask must not null
1694 if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
1697 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1698 rte_flow_error_set(error, EINVAL,
1699 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1700 item, "Not supported last point for range");
1704 rule->b_mask = TRUE;
1706 (const struct rte_flow_item_ipv6 *)item->mask;
1707 if (ipv6_mask->hdr.vtc_flow ||
1708 ipv6_mask->hdr.payload_len ||
1709 ipv6_mask->hdr.proto ||
1710 ipv6_mask->hdr.hop_limits) {
1711 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1712 rte_flow_error_set(error, EINVAL,
1713 RTE_FLOW_ERROR_TYPE_ITEM,
1714 item, "Not supported by fdir filter");
1718 /* check src addr mask */
1719 for (j = 0; j < 16; j++) {
1720 if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
1721 rule->mask.src_ipv6_mask |= 1 << j;
1722 } else if (ipv6_mask->hdr.src_addr[j] != 0) {
1723 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1724 rte_flow_error_set(error, EINVAL,
1725 RTE_FLOW_ERROR_TYPE_ITEM,
1726 item, "Not supported by fdir filter");
1731 /* check dst addr mask */
1732 for (j = 0; j < 16; j++) {
1733 if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
1734 rule->mask.dst_ipv6_mask |= 1 << j;
1735 } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
1736 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1737 rte_flow_error_set(error, EINVAL,
1738 RTE_FLOW_ERROR_TYPE_ITEM,
1739 item, "Not supported by fdir filter");
1745 rule->b_spec = TRUE;
1747 (const struct rte_flow_item_ipv6 *)item->spec;
1748 rte_memcpy(rule->ixgbe_fdir.formatted.src_ip,
1749 ipv6_spec->hdr.src_addr, 16);
1750 rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip,
1751 ipv6_spec->hdr.dst_addr, 16);
1755 * Check if the next not void item is
1756 * TCP or UDP or SCTP or END.
1758 item = next_no_fuzzy_pattern(pattern, item);
1759 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1760 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1761 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1762 item->type != RTE_FLOW_ITEM_TYPE_END &&
1763 item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1764 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1765 rte_flow_error_set(error, EINVAL,
1766 RTE_FLOW_ERROR_TYPE_ITEM,
1767 item, "Not supported by fdir filter");
1772 /* Get the TCP info. */
1773 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1775 * Set the flow type even if there's no content
1776 * as we must have a flow type.
1778 rule->ixgbe_fdir.formatted.flow_type |=
1779 IXGBE_ATR_L4TYPE_TCP;
1780 /*Not supported last point for range*/
1782 rte_flow_error_set(error, EINVAL,
1783 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1784 item, "Not supported last point for range");
1788 * Only care about src & dst ports,
1789 * others should be masked.
1792 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1793 rte_flow_error_set(error, EINVAL,
1794 RTE_FLOW_ERROR_TYPE_ITEM,
1795 item, "Not supported by fdir filter");
1798 rule->b_mask = TRUE;
1799 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1800 if (tcp_mask->hdr.sent_seq ||
1801 tcp_mask->hdr.recv_ack ||
1802 tcp_mask->hdr.data_off ||
1803 tcp_mask->hdr.tcp_flags ||
1804 tcp_mask->hdr.rx_win ||
1805 tcp_mask->hdr.cksum ||
1806 tcp_mask->hdr.tcp_urp) {
1807 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1808 rte_flow_error_set(error, EINVAL,
1809 RTE_FLOW_ERROR_TYPE_ITEM,
1810 item, "Not supported by fdir filter");
1813 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1814 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1817 rule->b_spec = TRUE;
1818 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1819 rule->ixgbe_fdir.formatted.src_port =
1820 tcp_spec->hdr.src_port;
1821 rule->ixgbe_fdir.formatted.dst_port =
1822 tcp_spec->hdr.dst_port;
1825 item = next_no_fuzzy_pattern(pattern, item);
1826 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1827 item->type != RTE_FLOW_ITEM_TYPE_END) {
1828 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1829 rte_flow_error_set(error, EINVAL,
1830 RTE_FLOW_ERROR_TYPE_ITEM,
1831 item, "Not supported by fdir filter");
1837 /* Get the UDP info */
1838 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1840 * Set the flow type even if there's no content
1841 * as we must have a flow type.
1843 rule->ixgbe_fdir.formatted.flow_type |=
1844 IXGBE_ATR_L4TYPE_UDP;
1845 /*Not supported last point for range*/
1847 rte_flow_error_set(error, EINVAL,
1848 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1849 item, "Not supported last point for range");
1853 * Only care about src & dst ports,
1854 * others should be masked.
1857 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1858 rte_flow_error_set(error, EINVAL,
1859 RTE_FLOW_ERROR_TYPE_ITEM,
1860 item, "Not supported by fdir filter");
1863 rule->b_mask = TRUE;
1864 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1865 if (udp_mask->hdr.dgram_len ||
1866 udp_mask->hdr.dgram_cksum) {
1867 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1868 rte_flow_error_set(error, EINVAL,
1869 RTE_FLOW_ERROR_TYPE_ITEM,
1870 item, "Not supported by fdir filter");
1873 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1874 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1877 rule->b_spec = TRUE;
1878 udp_spec = (const struct rte_flow_item_udp *)item->spec;
1879 rule->ixgbe_fdir.formatted.src_port =
1880 udp_spec->hdr.src_port;
1881 rule->ixgbe_fdir.formatted.dst_port =
1882 udp_spec->hdr.dst_port;
1885 item = next_no_fuzzy_pattern(pattern, item);
1886 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1887 item->type != RTE_FLOW_ITEM_TYPE_END) {
1888 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1889 rte_flow_error_set(error, EINVAL,
1890 RTE_FLOW_ERROR_TYPE_ITEM,
1891 item, "Not supported by fdir filter");
1897 /* Get the SCTP info */
1898 if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
1900 * Set the flow type even if there's no content
1901 * as we must have a flow type.
1903 rule->ixgbe_fdir.formatted.flow_type |=
1904 IXGBE_ATR_L4TYPE_SCTP;
1905 /*Not supported last point for range*/
1907 rte_flow_error_set(error, EINVAL,
1908 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1909 item, "Not supported last point for range");
1914 rule->b_mask = TRUE;
1916 (const struct rte_flow_item_sctp *)item->mask;
1917 if (sctp_mask->hdr.tag ||
1918 sctp_mask->hdr.cksum) {
1919 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1920 rte_flow_error_set(error, EINVAL,
1921 RTE_FLOW_ERROR_TYPE_ITEM,
1922 item, "Not supported by fdir filter");
1925 rule->mask.src_port_mask = sctp_mask->hdr.src_port;
1926 rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
1930 rule->b_spec = TRUE;
1932 (const struct rte_flow_item_sctp *)item->spec;
1933 rule->ixgbe_fdir.formatted.src_port =
1934 sctp_spec->hdr.src_port;
1935 rule->ixgbe_fdir.formatted.dst_port =
1936 sctp_spec->hdr.dst_port;
1939 item = next_no_fuzzy_pattern(pattern, item);
1940 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1941 item->type != RTE_FLOW_ITEM_TYPE_END) {
1942 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1943 rte_flow_error_set(error, EINVAL,
1944 RTE_FLOW_ERROR_TYPE_ITEM,
1945 item, "Not supported by fdir filter");
1950 /* Get the flex byte info */
1951 if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
1952 /* Not supported last point for range*/
1954 rte_flow_error_set(error, EINVAL,
1955 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1956 item, "Not supported last point for range");
1959 /* mask should not be null */
1960 if (!item->mask || !item->spec) {
1961 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1962 rte_flow_error_set(error, EINVAL,
1963 RTE_FLOW_ERROR_TYPE_ITEM,
1964 item, "Not supported by fdir filter");
1968 raw_mask = (const struct rte_flow_item_raw *)item->mask;
1971 if (raw_mask->relative != 0x1 ||
1972 raw_mask->search != 0x1 ||
1973 raw_mask->reserved != 0x0 ||
1974 (uint32_t)raw_mask->offset != 0xffffffff ||
1975 raw_mask->limit != 0xffff ||
1976 raw_mask->length != 0xffff) {
1977 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1978 rte_flow_error_set(error, EINVAL,
1979 RTE_FLOW_ERROR_TYPE_ITEM,
1980 item, "Not supported by fdir filter");
1984 raw_spec = (const struct rte_flow_item_raw *)item->spec;
1987 if (raw_spec->relative != 0 ||
1988 raw_spec->search != 0 ||
1989 raw_spec->reserved != 0 ||
1990 raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
1991 raw_spec->offset % 2 ||
1992 raw_spec->limit != 0 ||
1993 raw_spec->length != 2 ||
1994 /* pattern can't be 0xffff */
1995 (raw_spec->pattern[0] == 0xff &&
1996 raw_spec->pattern[1] == 0xff)) {
1997 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1998 rte_flow_error_set(error, EINVAL,
1999 RTE_FLOW_ERROR_TYPE_ITEM,
2000 item, "Not supported by fdir filter");
2004 /* check pattern mask */
2005 if (raw_mask->pattern[0] != 0xff ||
2006 raw_mask->pattern[1] != 0xff) {
2007 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2008 rte_flow_error_set(error, EINVAL,
2009 RTE_FLOW_ERROR_TYPE_ITEM,
2010 item, "Not supported by fdir filter");
2014 rule->mask.flex_bytes_mask = 0xffff;
2015 rule->ixgbe_fdir.formatted.flex_bytes =
2016 (((uint16_t)raw_spec->pattern[1]) << 8) |
2017 raw_spec->pattern[0];
2018 rule->flex_bytes_offset = raw_spec->offset;
2021 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2022 /* check if the next not void item is END */
2023 item = next_no_fuzzy_pattern(pattern, item);
2024 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2025 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2026 rte_flow_error_set(error, EINVAL,
2027 RTE_FLOW_ERROR_TYPE_ITEM,
2028 item, "Not supported by fdir filter");
2033 return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2036 #define NVGRE_PROTOCOL 0x6558
2039 * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
2040 * And get the flow director filter info BTW.
2042 * The first not void item must be ETH.
2043 * The second not void item must be IPV4/ IPV6.
2044 * The third not void item must be NVGRE.
2045 * The next not void item must be END.
2047 * The first not void item must be ETH.
2048 * The second not void item must be IPV4/ IPV6.
2049 * The third not void item must be NVGRE.
2050 * The next not void item must be END.
2052 * The first not void action should be QUEUE or DROP.
2053 * The second not void optional action should be MARK,
2054 * mark_id is a uint32_t number.
2055 * The next not void action should be END.
2056 * VxLAN pattern example:
2059 * IPV4/IPV6 NULL NULL
2061 * VxLAN vni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
2062 * MAC VLAN tci 0x2016 0xEFFF
2064 * NEGRV pattern example:
2067 * IPV4/IPV6 NULL NULL
2068 * NVGRE protocol 0x6558 0xFFFF
2069 * tni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
2070 * MAC VLAN tci 0x2016 0xEFFF
2072 * other members in mask and spec should set to 0x00.
2073 * item->last should be NULL.
2076 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
2077 const struct rte_flow_item pattern[],
2078 const struct rte_flow_action actions[],
2079 struct ixgbe_fdir_rule *rule,
2080 struct rte_flow_error *error)
2082 const struct rte_flow_item *item;
2083 const struct rte_flow_item_vxlan *vxlan_spec;
2084 const struct rte_flow_item_vxlan *vxlan_mask;
2085 const struct rte_flow_item_nvgre *nvgre_spec;
2086 const struct rte_flow_item_nvgre *nvgre_mask;
2087 const struct rte_flow_item_eth *eth_spec;
2088 const struct rte_flow_item_eth *eth_mask;
2089 const struct rte_flow_item_vlan *vlan_spec;
2090 const struct rte_flow_item_vlan *vlan_mask;
2094 rte_flow_error_set(error, EINVAL,
2095 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2096 NULL, "NULL pattern.");
2101 rte_flow_error_set(error, EINVAL,
2102 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2103 NULL, "NULL action.");
2108 rte_flow_error_set(error, EINVAL,
2109 RTE_FLOW_ERROR_TYPE_ATTR,
2110 NULL, "NULL attribute.");
2115 * Some fields may not be provided. Set spec to 0 and mask to default
2116 * value. So, we need not do anything for the not provided fields later.
2118 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2119 memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
2120 rule->mask.vlan_tci_mask = 0;
2123 * The first not void item should be
2124 * MAC or IPv4 or IPv6 or UDP or VxLAN.
2126 item = next_no_void_pattern(pattern, NULL);
2127 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
2128 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2129 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
2130 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2131 item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
2132 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2133 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2134 rte_flow_error_set(error, EINVAL,
2135 RTE_FLOW_ERROR_TYPE_ITEM,
2136 item, "Not supported by fdir filter");
2140 rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
2143 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
2144 /* Only used to describe the protocol stack. */
2145 if (item->spec || item->mask) {
2146 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2147 rte_flow_error_set(error, EINVAL,
2148 RTE_FLOW_ERROR_TYPE_ITEM,
2149 item, "Not supported by fdir filter");
2152 /* Not supported last point for range*/
2154 rte_flow_error_set(error, EINVAL,
2155 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2156 item, "Not supported last point for range");
2160 /* Check if the next not void item is IPv4 or IPv6. */
2161 item = next_no_void_pattern(pattern, item);
2162 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2163 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2164 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2165 rte_flow_error_set(error, EINVAL,
2166 RTE_FLOW_ERROR_TYPE_ITEM,
2167 item, "Not supported by fdir filter");
2173 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2174 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2175 /* Only used to describe the protocol stack. */
2176 if (item->spec || item->mask) {
2177 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2178 rte_flow_error_set(error, EINVAL,
2179 RTE_FLOW_ERROR_TYPE_ITEM,
2180 item, "Not supported by fdir filter");
2183 /*Not supported last point for range*/
2185 rte_flow_error_set(error, EINVAL,
2186 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2187 item, "Not supported last point for range");
2191 /* Check if the next not void item is UDP or NVGRE. */
2192 item = next_no_void_pattern(pattern, item);
2193 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2194 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2195 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2196 rte_flow_error_set(error, EINVAL,
2197 RTE_FLOW_ERROR_TYPE_ITEM,
2198 item, "Not supported by fdir filter");
2204 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2205 /* Only used to describe the protocol stack. */
2206 if (item->spec || item->mask) {
2207 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2208 rte_flow_error_set(error, EINVAL,
2209 RTE_FLOW_ERROR_TYPE_ITEM,
2210 item, "Not supported by fdir filter");
2213 /*Not supported last point for range*/
2215 rte_flow_error_set(error, EINVAL,
2216 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2217 item, "Not supported last point for range");
2221 /* Check if the next not void item is VxLAN. */
2222 item = next_no_void_pattern(pattern, item);
2223 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2224 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2225 rte_flow_error_set(error, EINVAL,
2226 RTE_FLOW_ERROR_TYPE_ITEM,
2227 item, "Not supported by fdir filter");
2232 /* Get the VxLAN info */
2233 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2234 rule->ixgbe_fdir.formatted.tunnel_type =
2235 RTE_FDIR_TUNNEL_TYPE_VXLAN;
2237 /* Only care about VNI, others should be masked. */
2239 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2240 rte_flow_error_set(error, EINVAL,
2241 RTE_FLOW_ERROR_TYPE_ITEM,
2242 item, "Not supported by fdir filter");
2245 /*Not supported last point for range*/
2247 rte_flow_error_set(error, EINVAL,
2248 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2249 item, "Not supported last point for range");
2252 rule->b_mask = TRUE;
2254 /* Tunnel type is always meaningful. */
2255 rule->mask.tunnel_type_mask = 1;
2258 (const struct rte_flow_item_vxlan *)item->mask;
2259 if (vxlan_mask->flags) {
2260 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2261 rte_flow_error_set(error, EINVAL,
2262 RTE_FLOW_ERROR_TYPE_ITEM,
2263 item, "Not supported by fdir filter");
2266 /* VNI must be totally masked or not. */
2267 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2268 vxlan_mask->vni[2]) &&
2269 ((vxlan_mask->vni[0] != 0xFF) ||
2270 (vxlan_mask->vni[1] != 0xFF) ||
2271 (vxlan_mask->vni[2] != 0xFF))) {
2272 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2273 rte_flow_error_set(error, EINVAL,
2274 RTE_FLOW_ERROR_TYPE_ITEM,
2275 item, "Not supported by fdir filter");
2279 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2280 RTE_DIM(vxlan_mask->vni));
2283 rule->b_spec = TRUE;
2284 vxlan_spec = (const struct rte_flow_item_vxlan *)
2286 rte_memcpy(((uint8_t *)
2287 &rule->ixgbe_fdir.formatted.tni_vni + 1),
2288 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2289 rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
2290 rule->ixgbe_fdir.formatted.tni_vni);
2294 /* Get the NVGRE info */
2295 if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2296 rule->ixgbe_fdir.formatted.tunnel_type =
2297 RTE_FDIR_TUNNEL_TYPE_NVGRE;
2300 * Only care about flags0, flags1, protocol and TNI,
2301 * others should be masked.
2304 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2305 rte_flow_error_set(error, EINVAL,
2306 RTE_FLOW_ERROR_TYPE_ITEM,
2307 item, "Not supported by fdir filter");
2310 /*Not supported last point for range*/
2312 rte_flow_error_set(error, EINVAL,
2313 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2314 item, "Not supported last point for range");
2317 rule->b_mask = TRUE;
2319 /* Tunnel type is always meaningful. */
2320 rule->mask.tunnel_type_mask = 1;
2323 (const struct rte_flow_item_nvgre *)item->mask;
2324 if (nvgre_mask->flow_id) {
2325 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2326 rte_flow_error_set(error, EINVAL,
2327 RTE_FLOW_ERROR_TYPE_ITEM,
2328 item, "Not supported by fdir filter");
2331 if (nvgre_mask->c_k_s_rsvd0_ver !=
2332 rte_cpu_to_be_16(0x3000) ||
2333 nvgre_mask->protocol != 0xFFFF) {
2334 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2335 rte_flow_error_set(error, EINVAL,
2336 RTE_FLOW_ERROR_TYPE_ITEM,
2337 item, "Not supported by fdir filter");
2340 /* TNI must be totally masked or not. */
2341 if (nvgre_mask->tni[0] &&
2342 ((nvgre_mask->tni[0] != 0xFF) ||
2343 (nvgre_mask->tni[1] != 0xFF) ||
2344 (nvgre_mask->tni[2] != 0xFF))) {
2345 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2346 rte_flow_error_set(error, EINVAL,
2347 RTE_FLOW_ERROR_TYPE_ITEM,
2348 item, "Not supported by fdir filter");
2351 /* tni is a 24-bits bit field */
2352 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2353 RTE_DIM(nvgre_mask->tni));
2354 rule->mask.tunnel_id_mask <<= 8;
2357 rule->b_spec = TRUE;
2359 (const struct rte_flow_item_nvgre *)item->spec;
2360 if (nvgre_spec->c_k_s_rsvd0_ver !=
2361 rte_cpu_to_be_16(0x2000) ||
2362 nvgre_spec->protocol !=
2363 rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2364 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2365 rte_flow_error_set(error, EINVAL,
2366 RTE_FLOW_ERROR_TYPE_ITEM,
2367 item, "Not supported by fdir filter");
2370 /* tni is a 24-bits bit field */
2371 rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2372 nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2373 rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2377 /* check if the next not void item is MAC */
2378 item = next_no_void_pattern(pattern, item);
2379 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2380 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2381 rte_flow_error_set(error, EINVAL,
2382 RTE_FLOW_ERROR_TYPE_ITEM,
2383 item, "Not supported by fdir filter");
2388 * Only support vlan and dst MAC address,
2389 * others should be masked.
2393 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2394 rte_flow_error_set(error, EINVAL,
2395 RTE_FLOW_ERROR_TYPE_ITEM,
2396 item, "Not supported by fdir filter");
2399 /*Not supported last point for range*/
2401 rte_flow_error_set(error, EINVAL,
2402 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2403 item, "Not supported last point for range");
2406 rule->b_mask = TRUE;
2407 eth_mask = (const struct rte_flow_item_eth *)item->mask;
2409 /* Ether type should be masked. */
2410 if (eth_mask->type) {
2411 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2412 rte_flow_error_set(error, EINVAL,
2413 RTE_FLOW_ERROR_TYPE_ITEM,
2414 item, "Not supported by fdir filter");
2418 /* src MAC address should be masked. */
2419 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2420 if (eth_mask->src.addr_bytes[j]) {
2422 sizeof(struct ixgbe_fdir_rule));
2423 rte_flow_error_set(error, EINVAL,
2424 RTE_FLOW_ERROR_TYPE_ITEM,
2425 item, "Not supported by fdir filter");
2429 rule->mask.mac_addr_byte_mask = 0;
2430 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2431 /* It's a per byte mask. */
2432 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2433 rule->mask.mac_addr_byte_mask |= 0x1 << j;
2434 } else if (eth_mask->dst.addr_bytes[j]) {
2435 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2436 rte_flow_error_set(error, EINVAL,
2437 RTE_FLOW_ERROR_TYPE_ITEM,
2438 item, "Not supported by fdir filter");
2443 /* When no vlan, considered as full mask. */
2444 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2447 rule->b_spec = TRUE;
2448 eth_spec = (const struct rte_flow_item_eth *)item->spec;
2450 /* Get the dst MAC. */
2451 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2452 rule->ixgbe_fdir.formatted.inner_mac[j] =
2453 eth_spec->dst.addr_bytes[j];
2458 * Check if the next not void item is vlan or ipv4.
2459 * IPv6 is not supported.
2461 item = next_no_void_pattern(pattern, item);
2462 if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2463 (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
2464 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2465 rte_flow_error_set(error, EINVAL,
2466 RTE_FLOW_ERROR_TYPE_ITEM,
2467 item, "Not supported by fdir filter");
2470 /*Not supported last point for range*/
2472 rte_flow_error_set(error, EINVAL,
2473 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2474 item, "Not supported last point for range");
2478 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2479 if (!(item->spec && item->mask)) {
2480 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2481 rte_flow_error_set(error, EINVAL,
2482 RTE_FLOW_ERROR_TYPE_ITEM,
2483 item, "Not supported by fdir filter");
2487 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
2488 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
2490 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2492 rule->mask.vlan_tci_mask = vlan_mask->tci;
2493 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
2494 /* More than one tags are not supported. */
2496 /* check if the next not void item is END */
2497 item = next_no_void_pattern(pattern, item);
2499 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2500 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2501 rte_flow_error_set(error, EINVAL,
2502 RTE_FLOW_ERROR_TYPE_ITEM,
2503 item, "Not supported by fdir filter");
2509 * If the tags is 0, it means don't care about the VLAN.
2513 return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2517 ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2518 const struct rte_flow_attr *attr,
2519 const struct rte_flow_item pattern[],
2520 const struct rte_flow_action actions[],
2521 struct ixgbe_fdir_rule *rule,
2522 struct rte_flow_error *error)
2525 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2526 enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2528 if (hw->mac.type != ixgbe_mac_82599EB &&
2529 hw->mac.type != ixgbe_mac_X540 &&
2530 hw->mac.type != ixgbe_mac_X550 &&
2531 hw->mac.type != ixgbe_mac_X550EM_x &&
2532 hw->mac.type != ixgbe_mac_X550EM_a)
2535 ret = ixgbe_parse_fdir_filter_normal(attr, pattern,
2536 actions, rule, error);
2541 ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2542 actions, rule, error);
2549 if (hw->mac.type == ixgbe_mac_82599EB &&
2550 rule->fdirflags == IXGBE_FDIRCMD_DROP &&
2551 (rule->mask.src_port_mask != 0 ||
2552 rule->mask.dst_port_mask != 0))
2555 if (fdir_mode == RTE_FDIR_MODE_NONE ||
2556 fdir_mode != rule->mode)
2559 if (rule->queue >= dev->data->nb_rx_queues)
2566 ixgbe_filterlist_flush(void)
2568 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2569 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2570 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2571 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2572 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2573 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2575 while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2576 TAILQ_REMOVE(&filter_ntuple_list,
2579 rte_free(ntuple_filter_ptr);
2582 while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2583 TAILQ_REMOVE(&filter_ethertype_list,
2584 ethertype_filter_ptr,
2586 rte_free(ethertype_filter_ptr);
2589 while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2590 TAILQ_REMOVE(&filter_syn_list,
2593 rte_free(syn_filter_ptr);
2596 while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2597 TAILQ_REMOVE(&filter_l2_tunnel_list,
2600 rte_free(l2_tn_filter_ptr);
2603 while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2604 TAILQ_REMOVE(&filter_fdir_list,
2607 rte_free(fdir_rule_ptr);
2610 while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
2611 TAILQ_REMOVE(&ixgbe_flow_list,
2614 rte_free(ixgbe_flow_mem_ptr->flow);
2615 rte_free(ixgbe_flow_mem_ptr);
2620 * Create or destroy a flow rule.
2621 * Theorically one rule can match more than one filters.
2622 * We will let it use the filter which it hitt first.
2623 * So, the sequence matters.
2625 static struct rte_flow *
2626 ixgbe_flow_create(struct rte_eth_dev *dev,
2627 const struct rte_flow_attr *attr,
2628 const struct rte_flow_item pattern[],
2629 const struct rte_flow_action actions[],
2630 struct rte_flow_error *error)
2633 struct rte_eth_ntuple_filter ntuple_filter;
2634 struct rte_eth_ethertype_filter ethertype_filter;
2635 struct rte_eth_syn_filter syn_filter;
2636 struct ixgbe_fdir_rule fdir_rule;
2637 struct rte_eth_l2_tunnel_conf l2_tn_filter;
2638 struct ixgbe_hw_fdir_info *fdir_info =
2639 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2640 struct rte_flow *flow = NULL;
2641 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2642 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2643 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2644 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2645 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2646 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2648 flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
2650 PMD_DRV_LOG(ERR, "failed to allocate memory");
2651 return (struct rte_flow *)flow;
2653 ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
2654 sizeof(struct ixgbe_flow_mem), 0);
2655 if (!ixgbe_flow_mem_ptr) {
2656 PMD_DRV_LOG(ERR, "failed to allocate memory");
2660 ixgbe_flow_mem_ptr->flow = flow;
2661 TAILQ_INSERT_TAIL(&ixgbe_flow_list,
2662 ixgbe_flow_mem_ptr, entries);
2664 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2665 ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2666 actions, &ntuple_filter, error);
2668 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2670 ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
2671 sizeof(struct ixgbe_ntuple_filter_ele), 0);
2672 (void)rte_memcpy(&ntuple_filter_ptr->filter_info,
2674 sizeof(struct rte_eth_ntuple_filter));
2675 TAILQ_INSERT_TAIL(&filter_ntuple_list,
2676 ntuple_filter_ptr, entries);
2677 flow->rule = ntuple_filter_ptr;
2678 flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2684 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2685 ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2686 actions, ðertype_filter, error);
2688 ret = ixgbe_add_del_ethertype_filter(dev,
2689 ðertype_filter, TRUE);
2691 ethertype_filter_ptr = rte_zmalloc(
2692 "ixgbe_ethertype_filter",
2693 sizeof(struct ixgbe_ethertype_filter_ele), 0);
2694 (void)rte_memcpy(ðertype_filter_ptr->filter_info,
2696 sizeof(struct rte_eth_ethertype_filter));
2697 TAILQ_INSERT_TAIL(&filter_ethertype_list,
2698 ethertype_filter_ptr, entries);
2699 flow->rule = ethertype_filter_ptr;
2700 flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2706 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2707 ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2708 actions, &syn_filter, error);
2710 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
2712 syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
2713 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
2714 (void)rte_memcpy(&syn_filter_ptr->filter_info,
2716 sizeof(struct rte_eth_syn_filter));
2717 TAILQ_INSERT_TAIL(&filter_syn_list,
2720 flow->rule = syn_filter_ptr;
2721 flow->filter_type = RTE_ETH_FILTER_SYN;
2727 memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2728 ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2729 actions, &fdir_rule, error);
2731 /* A mask cannot be deleted. */
2732 if (fdir_rule.b_mask) {
2733 if (!fdir_info->mask_added) {
2734 /* It's the first time the mask is set. */
2735 rte_memcpy(&fdir_info->mask,
2737 sizeof(struct ixgbe_hw_fdir_mask));
2738 fdir_info->flex_bytes_offset =
2739 fdir_rule.flex_bytes_offset;
2741 if (fdir_rule.mask.flex_bytes_mask)
2742 ixgbe_fdir_set_flexbytes_offset(dev,
2743 fdir_rule.flex_bytes_offset);
2745 ret = ixgbe_fdir_set_input_mask(dev);
2749 fdir_info->mask_added = TRUE;
2752 * Only support one global mask,
2753 * all the masks should be the same.
2755 ret = memcmp(&fdir_info->mask,
2757 sizeof(struct ixgbe_hw_fdir_mask));
2761 if (fdir_info->flex_bytes_offset !=
2762 fdir_rule.flex_bytes_offset)
2767 if (fdir_rule.b_spec) {
2768 ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
2771 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
2772 sizeof(struct ixgbe_fdir_rule_ele), 0);
2773 (void)rte_memcpy(&fdir_rule_ptr->filter_info,
2775 sizeof(struct ixgbe_fdir_rule));
2776 TAILQ_INSERT_TAIL(&filter_fdir_list,
2777 fdir_rule_ptr, entries);
2778 flow->rule = fdir_rule_ptr;
2779 flow->filter_type = RTE_ETH_FILTER_FDIR;
2791 memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2792 ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2793 actions, &l2_tn_filter, error);
2795 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
2797 l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
2798 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
2799 (void)rte_memcpy(&l2_tn_filter_ptr->filter_info,
2801 sizeof(struct rte_eth_l2_tunnel_conf));
2802 TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
2803 l2_tn_filter_ptr, entries);
2804 flow->rule = l2_tn_filter_ptr;
2805 flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
2811 TAILQ_REMOVE(&ixgbe_flow_list,
2812 ixgbe_flow_mem_ptr, entries);
2813 rte_flow_error_set(error, -ret,
2814 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2815 "Failed to create flow.");
2816 rte_free(ixgbe_flow_mem_ptr);
2822 * Check if the flow rule is supported by ixgbe.
2823 * It only checkes the format. Don't guarantee the rule can be programmed into
2824 * the HW. Because there can be no enough room for the rule.
2827 ixgbe_flow_validate(struct rte_eth_dev *dev,
2828 const struct rte_flow_attr *attr,
2829 const struct rte_flow_item pattern[],
2830 const struct rte_flow_action actions[],
2831 struct rte_flow_error *error)
2833 struct rte_eth_ntuple_filter ntuple_filter;
2834 struct rte_eth_ethertype_filter ethertype_filter;
2835 struct rte_eth_syn_filter syn_filter;
2836 struct rte_eth_l2_tunnel_conf l2_tn_filter;
2837 struct ixgbe_fdir_rule fdir_rule;
2840 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2841 ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2842 actions, &ntuple_filter, error);
2846 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2847 ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2848 actions, ðertype_filter, error);
2852 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2853 ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2854 actions, &syn_filter, error);
2858 memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2859 ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2860 actions, &fdir_rule, error);
2864 memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2865 ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2866 actions, &l2_tn_filter, error);
2871 /* Destroy a flow rule on ixgbe. */
2873 ixgbe_flow_destroy(struct rte_eth_dev *dev,
2874 struct rte_flow *flow,
2875 struct rte_flow_error *error)
2878 struct rte_flow *pmd_flow = flow;
2879 enum rte_filter_type filter_type = pmd_flow->filter_type;
2880 struct rte_eth_ntuple_filter ntuple_filter;
2881 struct rte_eth_ethertype_filter ethertype_filter;
2882 struct rte_eth_syn_filter syn_filter;
2883 struct ixgbe_fdir_rule fdir_rule;
2884 struct rte_eth_l2_tunnel_conf l2_tn_filter;
2885 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2886 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2887 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2888 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2889 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2890 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2891 struct ixgbe_hw_fdir_info *fdir_info =
2892 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2894 switch (filter_type) {
2895 case RTE_ETH_FILTER_NTUPLE:
2896 ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
2898 (void)rte_memcpy(&ntuple_filter,
2899 &ntuple_filter_ptr->filter_info,
2900 sizeof(struct rte_eth_ntuple_filter));
2901 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
2903 TAILQ_REMOVE(&filter_ntuple_list,
2904 ntuple_filter_ptr, entries);
2905 rte_free(ntuple_filter_ptr);
2908 case RTE_ETH_FILTER_ETHERTYPE:
2909 ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
2911 (void)rte_memcpy(ðertype_filter,
2912 ðertype_filter_ptr->filter_info,
2913 sizeof(struct rte_eth_ethertype_filter));
2914 ret = ixgbe_add_del_ethertype_filter(dev,
2915 ðertype_filter, FALSE);
2917 TAILQ_REMOVE(&filter_ethertype_list,
2918 ethertype_filter_ptr, entries);
2919 rte_free(ethertype_filter_ptr);
2922 case RTE_ETH_FILTER_SYN:
2923 syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
2925 (void)rte_memcpy(&syn_filter,
2926 &syn_filter_ptr->filter_info,
2927 sizeof(struct rte_eth_syn_filter));
2928 ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
2930 TAILQ_REMOVE(&filter_syn_list,
2931 syn_filter_ptr, entries);
2932 rte_free(syn_filter_ptr);
2935 case RTE_ETH_FILTER_FDIR:
2936 fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
2937 (void)rte_memcpy(&fdir_rule,
2938 &fdir_rule_ptr->filter_info,
2939 sizeof(struct ixgbe_fdir_rule));
2940 ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
2942 TAILQ_REMOVE(&filter_fdir_list,
2943 fdir_rule_ptr, entries);
2944 rte_free(fdir_rule_ptr);
2945 if (TAILQ_EMPTY(&filter_fdir_list))
2946 fdir_info->mask_added = false;
2949 case RTE_ETH_FILTER_L2_TUNNEL:
2950 l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
2952 (void)rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
2953 sizeof(struct rte_eth_l2_tunnel_conf));
2954 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
2956 TAILQ_REMOVE(&filter_l2_tunnel_list,
2957 l2_tn_filter_ptr, entries);
2958 rte_free(l2_tn_filter_ptr);
2962 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
2969 rte_flow_error_set(error, EINVAL,
2970 RTE_FLOW_ERROR_TYPE_HANDLE,
2971 NULL, "Failed to destroy flow");
2975 TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
2976 if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
2977 TAILQ_REMOVE(&ixgbe_flow_list,
2978 ixgbe_flow_mem_ptr, entries);
2979 rte_free(ixgbe_flow_mem_ptr);
2987 /* Destroy all flow rules associated with a port on ixgbe. */
2989 ixgbe_flow_flush(struct rte_eth_dev *dev,
2990 struct rte_flow_error *error)
2994 ixgbe_clear_all_ntuple_filter(dev);
2995 ixgbe_clear_all_ethertype_filter(dev);
2996 ixgbe_clear_syn_filter(dev);
2998 ret = ixgbe_clear_all_fdir_filter(dev);
3000 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3001 NULL, "Failed to flush rule");
3005 ret = ixgbe_clear_all_l2_tn_filter(dev);
3007 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3008 NULL, "Failed to flush rule");
3012 ixgbe_filterlist_flush();
3017 const struct rte_flow_ops ixgbe_flow_ops = {
3018 .validate = ixgbe_flow_validate,
3019 .create = ixgbe_flow_create,
3020 .destroy = ixgbe_flow_destroy,
3021 .flush = ixgbe_flow_flush,