4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
47 #include <rte_interrupts.h>
49 #include <rte_debug.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
55 #include <rte_alarm.h>
56 #include <rte_ether.h>
57 #include <rte_ethdev.h>
58 #include <rte_malloc.h>
59 #include <rte_random.h>
61 #include <rte_hash_crc.h>
63 #include <rte_flow_driver.h>
65 #include "ixgbe_logs.h"
66 #include "base/ixgbe_api.h"
67 #include "base/ixgbe_vf.h"
68 #include "base/ixgbe_common.h"
69 #include "ixgbe_ethdev.h"
70 #include "ixgbe_bypass.h"
71 #include "ixgbe_rxtx.h"
72 #include "base/ixgbe_type.h"
73 #include "base/ixgbe_phy.h"
74 #include "rte_pmd_ixgbe.h"
77 #define IXGBE_MIN_N_TUPLE_PRIO 1
78 #define IXGBE_MAX_N_TUPLE_PRIO 7
79 #define IXGBE_MAX_FLX_SOURCE_OFF 62
81 /* ntuple filter list structure */
82 struct ixgbe_ntuple_filter_ele {
83 TAILQ_ENTRY(ixgbe_ntuple_filter_ele) entries;
84 struct rte_eth_ntuple_filter filter_info;
86 /* ethertype filter list structure */
87 struct ixgbe_ethertype_filter_ele {
88 TAILQ_ENTRY(ixgbe_ethertype_filter_ele) entries;
89 struct rte_eth_ethertype_filter filter_info;
91 /* syn filter list structure */
92 struct ixgbe_eth_syn_filter_ele {
93 TAILQ_ENTRY(ixgbe_eth_syn_filter_ele) entries;
94 struct rte_eth_syn_filter filter_info;
96 /* fdir filter list structure */
97 struct ixgbe_fdir_rule_ele {
98 TAILQ_ENTRY(ixgbe_fdir_rule_ele) entries;
99 struct ixgbe_fdir_rule filter_info;
101 /* l2_tunnel filter list structure */
102 struct ixgbe_eth_l2_tunnel_conf_ele {
103 TAILQ_ENTRY(ixgbe_eth_l2_tunnel_conf_ele) entries;
104 struct rte_eth_l2_tunnel_conf filter_info;
106 /* ixgbe_flow memory list structure */
107 struct ixgbe_flow_mem {
108 TAILQ_ENTRY(ixgbe_flow_mem) entries;
109 struct rte_flow *flow;
112 TAILQ_HEAD(ixgbe_ntuple_filter_list, ixgbe_ntuple_filter_ele);
113 TAILQ_HEAD(ixgbe_ethertype_filter_list, ixgbe_ethertype_filter_ele);
114 TAILQ_HEAD(ixgbe_syn_filter_list, ixgbe_eth_syn_filter_ele);
115 TAILQ_HEAD(ixgbe_fdir_rule_filter_list, ixgbe_fdir_rule_ele);
116 TAILQ_HEAD(ixgbe_l2_tunnel_filter_list, ixgbe_eth_l2_tunnel_conf_ele);
117 TAILQ_HEAD(ixgbe_flow_mem_list, ixgbe_flow_mem);
119 static struct ixgbe_ntuple_filter_list filter_ntuple_list;
120 static struct ixgbe_ethertype_filter_list filter_ethertype_list;
121 static struct ixgbe_syn_filter_list filter_syn_list;
122 static struct ixgbe_fdir_rule_filter_list filter_fdir_list;
123 static struct ixgbe_l2_tunnel_filter_list filter_l2_tunnel_list;
124 static struct ixgbe_flow_mem_list ixgbe_flow_list;
127 * Endless loop will never happen with below assumption
128 * 1. there is at least one no-void item(END)
129 * 2. cur is before END.
132 const struct rte_flow_item *next_no_void_pattern(
133 const struct rte_flow_item pattern[],
134 const struct rte_flow_item *cur)
136 const struct rte_flow_item *next =
137 cur ? cur + 1 : &pattern[0];
139 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
146 const struct rte_flow_action *next_no_void_action(
147 const struct rte_flow_action actions[],
148 const struct rte_flow_action *cur)
150 const struct rte_flow_action *next =
151 cur ? cur + 1 : &actions[0];
153 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
160 * Please aware there's an asumption for all the parsers.
161 * rte_flow_item is using big endian, rte_flow_attr and
162 * rte_flow_action are using CPU order.
163 * Because the pattern is used to describe the packets,
164 * normally the packets should use network order.
168 * Parse the rule to see if it is a n-tuple rule.
169 * And get the n-tuple filter info BTW.
171 * The first not void item can be ETH or IPV4.
172 * The second not void item must be IPV4 if the first one is ETH.
173 * The third not void item must be UDP or TCP.
174 * The next not void item must be END.
176 * The first not void action should be QUEUE.
177 * The next not void action should be END.
181 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
182 * dst_addr 192.167.3.50 0xFFFFFFFF
183 * next_proto_id 17 0xFF
184 * UDP/TCP/ src_port 80 0xFFFF
185 * SCTP dst_port 80 0xFFFF
187 * other members in mask and spec should set to 0x00.
188 * item->last should be NULL.
190 * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY.
194 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
195 const struct rte_flow_item pattern[],
196 const struct rte_flow_action actions[],
197 struct rte_eth_ntuple_filter *filter,
198 struct rte_flow_error *error)
200 const struct rte_flow_item *item;
201 const struct rte_flow_action *act;
202 const struct rte_flow_item_ipv4 *ipv4_spec;
203 const struct rte_flow_item_ipv4 *ipv4_mask;
204 const struct rte_flow_item_tcp *tcp_spec;
205 const struct rte_flow_item_tcp *tcp_mask;
206 const struct rte_flow_item_udp *udp_spec;
207 const struct rte_flow_item_udp *udp_mask;
208 const struct rte_flow_item_sctp *sctp_spec;
209 const struct rte_flow_item_sctp *sctp_mask;
212 rte_flow_error_set(error,
213 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
214 NULL, "NULL pattern.");
219 rte_flow_error_set(error, EINVAL,
220 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
221 NULL, "NULL action.");
225 rte_flow_error_set(error, EINVAL,
226 RTE_FLOW_ERROR_TYPE_ATTR,
227 NULL, "NULL attribute.");
231 #ifdef RTE_LIBRTE_SECURITY
233 * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY
235 act = next_no_void_action(actions, NULL);
236 if (act->type == RTE_FLOW_ACTION_TYPE_SECURITY) {
237 const void *conf = act->conf;
238 /* check if the next not void item is END */
239 act = next_no_void_action(actions, act);
240 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
241 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
242 rte_flow_error_set(error, EINVAL,
243 RTE_FLOW_ERROR_TYPE_ACTION,
244 act, "Not supported action.");
248 /* get the IP pattern*/
249 item = next_no_void_pattern(pattern, NULL);
250 while (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
251 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
253 item->type == RTE_FLOW_ITEM_TYPE_END) {
254 rte_flow_error_set(error, EINVAL,
255 RTE_FLOW_ERROR_TYPE_ITEM,
256 item, "IP pattern missing.");
259 item = next_no_void_pattern(pattern, item);
262 filter->proto = IPPROTO_ESP;
263 return ixgbe_crypto_add_ingress_sa_from_flow(conf, item->spec,
264 item->type == RTE_FLOW_ITEM_TYPE_IPV6);
268 /* the first not void item can be MAC or IPv4 */
269 item = next_no_void_pattern(pattern, NULL);
271 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
272 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
273 rte_flow_error_set(error, EINVAL,
274 RTE_FLOW_ERROR_TYPE_ITEM,
275 item, "Not supported by ntuple filter");
279 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
280 /*Not supported last point for range*/
282 rte_flow_error_set(error,
284 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
285 item, "Not supported last point for range");
289 /* if the first item is MAC, the content should be NULL */
290 if (item->spec || item->mask) {
291 rte_flow_error_set(error, EINVAL,
292 RTE_FLOW_ERROR_TYPE_ITEM,
293 item, "Not supported by ntuple filter");
296 /* check if the next not void item is IPv4 */
297 item = next_no_void_pattern(pattern, item);
298 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
299 rte_flow_error_set(error,
300 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
301 item, "Not supported by ntuple filter");
306 /* get the IPv4 info */
307 if (!item->spec || !item->mask) {
308 rte_flow_error_set(error, EINVAL,
309 RTE_FLOW_ERROR_TYPE_ITEM,
310 item, "Invalid ntuple mask");
313 /*Not supported last point for range*/
315 rte_flow_error_set(error, EINVAL,
316 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
317 item, "Not supported last point for range");
322 ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
324 * Only support src & dst addresses, protocol,
325 * others should be masked.
327 if (ipv4_mask->hdr.version_ihl ||
328 ipv4_mask->hdr.type_of_service ||
329 ipv4_mask->hdr.total_length ||
330 ipv4_mask->hdr.packet_id ||
331 ipv4_mask->hdr.fragment_offset ||
332 ipv4_mask->hdr.time_to_live ||
333 ipv4_mask->hdr.hdr_checksum) {
334 rte_flow_error_set(error,
335 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
336 item, "Not supported by ntuple filter");
340 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
341 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
342 filter->proto_mask = ipv4_mask->hdr.next_proto_id;
344 ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
345 filter->dst_ip = ipv4_spec->hdr.dst_addr;
346 filter->src_ip = ipv4_spec->hdr.src_addr;
347 filter->proto = ipv4_spec->hdr.next_proto_id;
349 /* check if the next not void item is TCP or UDP */
350 item = next_no_void_pattern(pattern, item);
351 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
352 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
353 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
354 item->type != RTE_FLOW_ITEM_TYPE_END) {
355 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
356 rte_flow_error_set(error, EINVAL,
357 RTE_FLOW_ERROR_TYPE_ITEM,
358 item, "Not supported by ntuple filter");
362 /* get the TCP/UDP info */
363 if ((item->type != RTE_FLOW_ITEM_TYPE_END) &&
364 (!item->spec || !item->mask)) {
365 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
366 rte_flow_error_set(error, EINVAL,
367 RTE_FLOW_ERROR_TYPE_ITEM,
368 item, "Invalid ntuple mask");
372 /*Not supported last point for range*/
374 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
375 rte_flow_error_set(error, EINVAL,
376 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
377 item, "Not supported last point for range");
382 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
383 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
386 * Only support src & dst ports, tcp flags,
387 * others should be masked.
389 if (tcp_mask->hdr.sent_seq ||
390 tcp_mask->hdr.recv_ack ||
391 tcp_mask->hdr.data_off ||
392 tcp_mask->hdr.rx_win ||
393 tcp_mask->hdr.cksum ||
394 tcp_mask->hdr.tcp_urp) {
396 sizeof(struct rte_eth_ntuple_filter));
397 rte_flow_error_set(error, EINVAL,
398 RTE_FLOW_ERROR_TYPE_ITEM,
399 item, "Not supported by ntuple filter");
403 filter->dst_port_mask = tcp_mask->hdr.dst_port;
404 filter->src_port_mask = tcp_mask->hdr.src_port;
405 if (tcp_mask->hdr.tcp_flags == 0xFF) {
406 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
407 } else if (!tcp_mask->hdr.tcp_flags) {
408 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
410 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
411 rte_flow_error_set(error, EINVAL,
412 RTE_FLOW_ERROR_TYPE_ITEM,
413 item, "Not supported by ntuple filter");
417 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
418 filter->dst_port = tcp_spec->hdr.dst_port;
419 filter->src_port = tcp_spec->hdr.src_port;
420 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
421 } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
422 udp_mask = (const struct rte_flow_item_udp *)item->mask;
425 * Only support src & dst ports,
426 * others should be masked.
428 if (udp_mask->hdr.dgram_len ||
429 udp_mask->hdr.dgram_cksum) {
431 sizeof(struct rte_eth_ntuple_filter));
432 rte_flow_error_set(error, EINVAL,
433 RTE_FLOW_ERROR_TYPE_ITEM,
434 item, "Not supported by ntuple filter");
438 filter->dst_port_mask = udp_mask->hdr.dst_port;
439 filter->src_port_mask = udp_mask->hdr.src_port;
441 udp_spec = (const struct rte_flow_item_udp *)item->spec;
442 filter->dst_port = udp_spec->hdr.dst_port;
443 filter->src_port = udp_spec->hdr.src_port;
444 } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
445 sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
448 * Only support src & dst ports,
449 * others should be masked.
451 if (sctp_mask->hdr.tag ||
452 sctp_mask->hdr.cksum) {
454 sizeof(struct rte_eth_ntuple_filter));
455 rte_flow_error_set(error, EINVAL,
456 RTE_FLOW_ERROR_TYPE_ITEM,
457 item, "Not supported by ntuple filter");
461 filter->dst_port_mask = sctp_mask->hdr.dst_port;
462 filter->src_port_mask = sctp_mask->hdr.src_port;
464 sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
465 filter->dst_port = sctp_spec->hdr.dst_port;
466 filter->src_port = sctp_spec->hdr.src_port;
471 /* check if the next not void item is END */
472 item = next_no_void_pattern(pattern, item);
473 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
474 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
475 rte_flow_error_set(error, EINVAL,
476 RTE_FLOW_ERROR_TYPE_ITEM,
477 item, "Not supported by ntuple filter");
484 * n-tuple only supports forwarding,
485 * check if the first not void action is QUEUE.
487 act = next_no_void_action(actions, NULL);
488 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
489 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
490 rte_flow_error_set(error, EINVAL,
491 RTE_FLOW_ERROR_TYPE_ACTION,
492 item, "Not supported action.");
496 ((const struct rte_flow_action_queue *)act->conf)->index;
498 /* check if the next not void item is END */
499 act = next_no_void_action(actions, act);
500 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
501 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
502 rte_flow_error_set(error, EINVAL,
503 RTE_FLOW_ERROR_TYPE_ACTION,
504 act, "Not supported action.");
509 /* must be input direction */
510 if (!attr->ingress) {
511 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
512 rte_flow_error_set(error, EINVAL,
513 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
514 attr, "Only support ingress.");
520 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
521 rte_flow_error_set(error, EINVAL,
522 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
523 attr, "Not support egress.");
527 if (attr->priority > 0xFFFF) {
528 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
529 rte_flow_error_set(error, EINVAL,
530 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
531 attr, "Error priority.");
534 filter->priority = (uint16_t)attr->priority;
535 if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
536 attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
537 filter->priority = 1;
542 /* a specific function for ixgbe because the flags is specific */
544 ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
545 const struct rte_flow_attr *attr,
546 const struct rte_flow_item pattern[],
547 const struct rte_flow_action actions[],
548 struct rte_eth_ntuple_filter *filter,
549 struct rte_flow_error *error)
552 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
554 MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
556 ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
561 #ifdef RTE_LIBRTE_SECURITY
562 /* ESP flow not really a flow*/
563 if (filter->proto == IPPROTO_ESP)
567 /* Ixgbe doesn't support tcp flags. */
568 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
569 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
570 rte_flow_error_set(error, EINVAL,
571 RTE_FLOW_ERROR_TYPE_ITEM,
572 NULL, "Not supported by ntuple filter");
576 /* Ixgbe doesn't support many priorities. */
577 if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
578 filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
579 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
580 rte_flow_error_set(error, EINVAL,
581 RTE_FLOW_ERROR_TYPE_ITEM,
582 NULL, "Priority not supported by ntuple filter");
586 if (filter->queue >= dev->data->nb_rx_queues)
589 /* fixed value for ixgbe */
590 filter->flags = RTE_5TUPLE_FLAGS;
595 * Parse the rule to see if it is a ethertype rule.
596 * And get the ethertype filter info BTW.
598 * The first not void item can be ETH.
599 * The next not void item must be END.
601 * The first not void action should be QUEUE.
602 * The next not void action should be END.
605 * ETH type 0x0807 0xFFFF
607 * other members in mask and spec should set to 0x00.
608 * item->last should be NULL.
611 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
612 const struct rte_flow_item *pattern,
613 const struct rte_flow_action *actions,
614 struct rte_eth_ethertype_filter *filter,
615 struct rte_flow_error *error)
617 const struct rte_flow_item *item;
618 const struct rte_flow_action *act;
619 const struct rte_flow_item_eth *eth_spec;
620 const struct rte_flow_item_eth *eth_mask;
621 const struct rte_flow_action_queue *act_q;
624 rte_flow_error_set(error, EINVAL,
625 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
626 NULL, "NULL pattern.");
631 rte_flow_error_set(error, EINVAL,
632 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
633 NULL, "NULL action.");
638 rte_flow_error_set(error, EINVAL,
639 RTE_FLOW_ERROR_TYPE_ATTR,
640 NULL, "NULL attribute.");
644 item = next_no_void_pattern(pattern, NULL);
645 /* The first non-void item should be MAC. */
646 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
647 rte_flow_error_set(error, EINVAL,
648 RTE_FLOW_ERROR_TYPE_ITEM,
649 item, "Not supported by ethertype filter");
653 /*Not supported last point for range*/
655 rte_flow_error_set(error, EINVAL,
656 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
657 item, "Not supported last point for range");
661 /* Get the MAC info. */
662 if (!item->spec || !item->mask) {
663 rte_flow_error_set(error, EINVAL,
664 RTE_FLOW_ERROR_TYPE_ITEM,
665 item, "Not supported by ethertype filter");
669 eth_spec = (const struct rte_flow_item_eth *)item->spec;
670 eth_mask = (const struct rte_flow_item_eth *)item->mask;
672 /* Mask bits of source MAC address must be full of 0.
673 * Mask bits of destination MAC address must be full
676 if (!is_zero_ether_addr(ð_mask->src) ||
677 (!is_zero_ether_addr(ð_mask->dst) &&
678 !is_broadcast_ether_addr(ð_mask->dst))) {
679 rte_flow_error_set(error, EINVAL,
680 RTE_FLOW_ERROR_TYPE_ITEM,
681 item, "Invalid ether address mask");
685 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
686 rte_flow_error_set(error, EINVAL,
687 RTE_FLOW_ERROR_TYPE_ITEM,
688 item, "Invalid ethertype mask");
692 /* If mask bits of destination MAC address
693 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
695 if (is_broadcast_ether_addr(ð_mask->dst)) {
696 filter->mac_addr = eth_spec->dst;
697 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
699 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
701 filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
703 /* Check if the next non-void item is END. */
704 item = next_no_void_pattern(pattern, item);
705 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
706 rte_flow_error_set(error, EINVAL,
707 RTE_FLOW_ERROR_TYPE_ITEM,
708 item, "Not supported by ethertype filter.");
714 act = next_no_void_action(actions, NULL);
715 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
716 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
717 rte_flow_error_set(error, EINVAL,
718 RTE_FLOW_ERROR_TYPE_ACTION,
719 act, "Not supported action.");
723 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
724 act_q = (const struct rte_flow_action_queue *)act->conf;
725 filter->queue = act_q->index;
727 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
730 /* Check if the next non-void item is END */
731 act = next_no_void_action(actions, act);
732 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
733 rte_flow_error_set(error, EINVAL,
734 RTE_FLOW_ERROR_TYPE_ACTION,
735 act, "Not supported action.");
740 /* Must be input direction */
741 if (!attr->ingress) {
742 rte_flow_error_set(error, EINVAL,
743 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
744 attr, "Only support ingress.");
750 rte_flow_error_set(error, EINVAL,
751 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
752 attr, "Not support egress.");
757 if (attr->priority) {
758 rte_flow_error_set(error, EINVAL,
759 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
760 attr, "Not support priority.");
766 rte_flow_error_set(error, EINVAL,
767 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
768 attr, "Not support group.");
776 ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
777 const struct rte_flow_attr *attr,
778 const struct rte_flow_item pattern[],
779 const struct rte_flow_action actions[],
780 struct rte_eth_ethertype_filter *filter,
781 struct rte_flow_error *error)
784 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
786 MAC_TYPE_FILTER_SUP(hw->mac.type);
788 ret = cons_parse_ethertype_filter(attr, pattern,
789 actions, filter, error);
794 /* Ixgbe doesn't support MAC address. */
795 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
796 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
797 rte_flow_error_set(error, EINVAL,
798 RTE_FLOW_ERROR_TYPE_ITEM,
799 NULL, "Not supported by ethertype filter");
803 if (filter->queue >= dev->data->nb_rx_queues) {
804 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
805 rte_flow_error_set(error, EINVAL,
806 RTE_FLOW_ERROR_TYPE_ITEM,
807 NULL, "queue index much too big");
811 if (filter->ether_type == ETHER_TYPE_IPv4 ||
812 filter->ether_type == ETHER_TYPE_IPv6) {
813 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
814 rte_flow_error_set(error, EINVAL,
815 RTE_FLOW_ERROR_TYPE_ITEM,
816 NULL, "IPv4/IPv6 not supported by ethertype filter");
820 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
821 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
822 rte_flow_error_set(error, EINVAL,
823 RTE_FLOW_ERROR_TYPE_ITEM,
824 NULL, "mac compare is unsupported");
828 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
829 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
830 rte_flow_error_set(error, EINVAL,
831 RTE_FLOW_ERROR_TYPE_ITEM,
832 NULL, "drop option is unsupported");
840 * Parse the rule to see if it is a TCP SYN rule.
841 * And get the TCP SYN filter info BTW.
843 * The first not void item must be ETH.
844 * The second not void item must be IPV4 or IPV6.
845 * The third not void item must be TCP.
846 * The next not void item must be END.
848 * The first not void action should be QUEUE.
849 * The next not void action should be END.
853 * IPV4/IPV6 NULL NULL
854 * TCP tcp_flags 0x02 0xFF
856 * other members in mask and spec should set to 0x00.
857 * item->last should be NULL.
860 cons_parse_syn_filter(const struct rte_flow_attr *attr,
861 const struct rte_flow_item pattern[],
862 const struct rte_flow_action actions[],
863 struct rte_eth_syn_filter *filter,
864 struct rte_flow_error *error)
866 const struct rte_flow_item *item;
867 const struct rte_flow_action *act;
868 const struct rte_flow_item_tcp *tcp_spec;
869 const struct rte_flow_item_tcp *tcp_mask;
870 const struct rte_flow_action_queue *act_q;
873 rte_flow_error_set(error, EINVAL,
874 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
875 NULL, "NULL pattern.");
880 rte_flow_error_set(error, EINVAL,
881 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
882 NULL, "NULL action.");
887 rte_flow_error_set(error, EINVAL,
888 RTE_FLOW_ERROR_TYPE_ATTR,
889 NULL, "NULL attribute.");
894 /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
895 item = next_no_void_pattern(pattern, NULL);
896 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
897 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
898 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
899 item->type != RTE_FLOW_ITEM_TYPE_TCP) {
900 rte_flow_error_set(error, EINVAL,
901 RTE_FLOW_ERROR_TYPE_ITEM,
902 item, "Not supported by syn filter");
905 /*Not supported last point for range*/
907 rte_flow_error_set(error, EINVAL,
908 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
909 item, "Not supported last point for range");
914 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
915 /* if the item is MAC, the content should be NULL */
916 if (item->spec || item->mask) {
917 rte_flow_error_set(error, EINVAL,
918 RTE_FLOW_ERROR_TYPE_ITEM,
919 item, "Invalid SYN address mask");
923 /* check if the next not void item is IPv4 or IPv6 */
924 item = next_no_void_pattern(pattern, item);
925 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
926 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
927 rte_flow_error_set(error, EINVAL,
928 RTE_FLOW_ERROR_TYPE_ITEM,
929 item, "Not supported by syn filter");
935 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
936 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
937 /* if the item is IP, the content should be NULL */
938 if (item->spec || item->mask) {
939 rte_flow_error_set(error, EINVAL,
940 RTE_FLOW_ERROR_TYPE_ITEM,
941 item, "Invalid SYN mask");
945 /* check if the next not void item is TCP */
946 item = next_no_void_pattern(pattern, item);
947 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
948 rte_flow_error_set(error, EINVAL,
949 RTE_FLOW_ERROR_TYPE_ITEM,
950 item, "Not supported by syn filter");
955 /* Get the TCP info. Only support SYN. */
956 if (!item->spec || !item->mask) {
957 rte_flow_error_set(error, EINVAL,
958 RTE_FLOW_ERROR_TYPE_ITEM,
959 item, "Invalid SYN mask");
962 /*Not supported last point for range*/
964 rte_flow_error_set(error, EINVAL,
965 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
966 item, "Not supported last point for range");
970 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
971 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
972 if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
973 tcp_mask->hdr.src_port ||
974 tcp_mask->hdr.dst_port ||
975 tcp_mask->hdr.sent_seq ||
976 tcp_mask->hdr.recv_ack ||
977 tcp_mask->hdr.data_off ||
978 tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
979 tcp_mask->hdr.rx_win ||
980 tcp_mask->hdr.cksum ||
981 tcp_mask->hdr.tcp_urp) {
982 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
983 rte_flow_error_set(error, EINVAL,
984 RTE_FLOW_ERROR_TYPE_ITEM,
985 item, "Not supported by syn filter");
989 /* check if the next not void item is END */
990 item = next_no_void_pattern(pattern, item);
991 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
992 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
993 rte_flow_error_set(error, EINVAL,
994 RTE_FLOW_ERROR_TYPE_ITEM,
995 item, "Not supported by syn filter");
999 /* check if the first not void action is QUEUE. */
1000 act = next_no_void_action(actions, NULL);
1001 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1002 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1003 rte_flow_error_set(error, EINVAL,
1004 RTE_FLOW_ERROR_TYPE_ACTION,
1005 act, "Not supported action.");
1009 act_q = (const struct rte_flow_action_queue *)act->conf;
1010 filter->queue = act_q->index;
1011 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
1012 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1013 rte_flow_error_set(error, EINVAL,
1014 RTE_FLOW_ERROR_TYPE_ACTION,
1015 act, "Not supported action.");
1019 /* check if the next not void item is END */
1020 act = next_no_void_action(actions, act);
1021 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1022 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1023 rte_flow_error_set(error, EINVAL,
1024 RTE_FLOW_ERROR_TYPE_ACTION,
1025 act, "Not supported action.");
1030 /* must be input direction */
1031 if (!attr->ingress) {
1032 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1033 rte_flow_error_set(error, EINVAL,
1034 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1035 attr, "Only support ingress.");
1041 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1042 rte_flow_error_set(error, EINVAL,
1043 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1044 attr, "Not support egress.");
1048 /* Support 2 priorities, the lowest or highest. */
1049 if (!attr->priority) {
1050 filter->hig_pri = 0;
1051 } else if (attr->priority == (uint32_t)~0U) {
1052 filter->hig_pri = 1;
1054 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1055 rte_flow_error_set(error, EINVAL,
1056 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1057 attr, "Not support priority.");
1065 ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
1066 const struct rte_flow_attr *attr,
1067 const struct rte_flow_item pattern[],
1068 const struct rte_flow_action actions[],
1069 struct rte_eth_syn_filter *filter,
1070 struct rte_flow_error *error)
1073 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1075 MAC_TYPE_FILTER_SUP(hw->mac.type);
1077 ret = cons_parse_syn_filter(attr, pattern,
1078 actions, filter, error);
1080 if (filter->queue >= dev->data->nb_rx_queues)
1090 * Parse the rule to see if it is a L2 tunnel rule.
1091 * And get the L2 tunnel filter info BTW.
1092 * Only support E-tag now.
1094 * The first not void item can be E_TAG.
1095 * The next not void item must be END.
1097 * The first not void action should be VF or PF.
1098 * The next not void action should be END.
1102 e_cid_base 0x309 0xFFF
1104 * other members in mask and spec should set to 0x00.
1105 * item->last should be NULL.
1108 cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
1109 const struct rte_flow_attr *attr,
1110 const struct rte_flow_item pattern[],
1111 const struct rte_flow_action actions[],
1112 struct rte_eth_l2_tunnel_conf *filter,
1113 struct rte_flow_error *error)
1115 const struct rte_flow_item *item;
1116 const struct rte_flow_item_e_tag *e_tag_spec;
1117 const struct rte_flow_item_e_tag *e_tag_mask;
1118 const struct rte_flow_action *act;
1119 const struct rte_flow_action_vf *act_vf;
1120 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1123 rte_flow_error_set(error, EINVAL,
1124 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1125 NULL, "NULL pattern.");
1130 rte_flow_error_set(error, EINVAL,
1131 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1132 NULL, "NULL action.");
1137 rte_flow_error_set(error, EINVAL,
1138 RTE_FLOW_ERROR_TYPE_ATTR,
1139 NULL, "NULL attribute.");
1143 /* The first not void item should be e-tag. */
1144 item = next_no_void_pattern(pattern, NULL);
1145 if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1146 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1147 rte_flow_error_set(error, EINVAL,
1148 RTE_FLOW_ERROR_TYPE_ITEM,
1149 item, "Not supported by L2 tunnel filter");
1153 if (!item->spec || !item->mask) {
1154 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1155 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1156 item, "Not supported by L2 tunnel filter");
1160 /*Not supported last point for range*/
1162 rte_flow_error_set(error, EINVAL,
1163 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1164 item, "Not supported last point for range");
1168 e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
1169 e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
1171 /* Only care about GRP and E cid base. */
1172 if (e_tag_mask->epcp_edei_in_ecid_b ||
1173 e_tag_mask->in_ecid_e ||
1174 e_tag_mask->ecid_e ||
1175 e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1176 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1177 rte_flow_error_set(error, EINVAL,
1178 RTE_FLOW_ERROR_TYPE_ITEM,
1179 item, "Not supported by L2 tunnel filter");
1183 filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1185 * grp and e_cid_base are bit fields and only use 14 bits.
1186 * e-tag id is taken as little endian by HW.
1188 filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1190 /* check if the next not void item is END */
1191 item = next_no_void_pattern(pattern, item);
1192 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1193 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1194 rte_flow_error_set(error, EINVAL,
1195 RTE_FLOW_ERROR_TYPE_ITEM,
1196 item, "Not supported by L2 tunnel filter");
1201 /* must be input direction */
1202 if (!attr->ingress) {
1203 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1204 rte_flow_error_set(error, EINVAL,
1205 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1206 attr, "Only support ingress.");
1212 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1213 rte_flow_error_set(error, EINVAL,
1214 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1215 attr, "Not support egress.");
1220 if (attr->priority) {
1221 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1222 rte_flow_error_set(error, EINVAL,
1223 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1224 attr, "Not support priority.");
1228 /* check if the first not void action is VF or PF. */
1229 act = next_no_void_action(actions, NULL);
1230 if (act->type != RTE_FLOW_ACTION_TYPE_VF &&
1231 act->type != RTE_FLOW_ACTION_TYPE_PF) {
1232 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1233 rte_flow_error_set(error, EINVAL,
1234 RTE_FLOW_ERROR_TYPE_ACTION,
1235 act, "Not supported action.");
1239 if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
1240 act_vf = (const struct rte_flow_action_vf *)act->conf;
1241 filter->pool = act_vf->id;
1243 filter->pool = pci_dev->max_vfs;
1246 /* check if the next not void item is END */
1247 act = next_no_void_action(actions, act);
1248 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1249 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1250 rte_flow_error_set(error, EINVAL,
1251 RTE_FLOW_ERROR_TYPE_ACTION,
1252 act, "Not supported action.");
1260 ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1261 const struct rte_flow_attr *attr,
1262 const struct rte_flow_item pattern[],
1263 const struct rte_flow_action actions[],
1264 struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1265 struct rte_flow_error *error)
1268 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1269 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1272 ret = cons_parse_l2_tn_filter(dev, attr, pattern,
1273 actions, l2_tn_filter, error);
1275 if (hw->mac.type != ixgbe_mac_X550 &&
1276 hw->mac.type != ixgbe_mac_X550EM_x &&
1277 hw->mac.type != ixgbe_mac_X550EM_a) {
1278 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1279 rte_flow_error_set(error, EINVAL,
1280 RTE_FLOW_ERROR_TYPE_ITEM,
1281 NULL, "Not supported by L2 tunnel filter");
1285 vf_num = pci_dev->max_vfs;
1287 if (l2_tn_filter->pool > vf_num)
1293 /* Parse to get the attr and action info of flow director rule. */
1295 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1296 const struct rte_flow_action actions[],
1297 struct ixgbe_fdir_rule *rule,
1298 struct rte_flow_error *error)
1300 const struct rte_flow_action *act;
1301 const struct rte_flow_action_queue *act_q;
1302 const struct rte_flow_action_mark *mark;
1305 /* must be input direction */
1306 if (!attr->ingress) {
1307 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1308 rte_flow_error_set(error, EINVAL,
1309 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1310 attr, "Only support ingress.");
1316 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1317 rte_flow_error_set(error, EINVAL,
1318 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1319 attr, "Not support egress.");
1324 if (attr->priority) {
1325 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1326 rte_flow_error_set(error, EINVAL,
1327 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1328 attr, "Not support priority.");
1332 /* check if the first not void action is QUEUE or DROP. */
1333 act = next_no_void_action(actions, NULL);
1334 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1335 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1336 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1337 rte_flow_error_set(error, EINVAL,
1338 RTE_FLOW_ERROR_TYPE_ACTION,
1339 act, "Not supported action.");
1343 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1344 act_q = (const struct rte_flow_action_queue *)act->conf;
1345 rule->queue = act_q->index;
1347 /* signature mode does not support drop action. */
1348 if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1349 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1350 rte_flow_error_set(error, EINVAL,
1351 RTE_FLOW_ERROR_TYPE_ACTION,
1352 act, "Not supported action.");
1355 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1358 /* check if the next not void item is MARK */
1359 act = next_no_void_action(actions, act);
1360 if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1361 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1362 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1363 rte_flow_error_set(error, EINVAL,
1364 RTE_FLOW_ERROR_TYPE_ACTION,
1365 act, "Not supported action.");
1371 if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1372 mark = (const struct rte_flow_action_mark *)act->conf;
1373 rule->soft_id = mark->id;
1374 act = next_no_void_action(actions, act);
1377 /* check if the next not void item is END */
1378 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1379 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1380 rte_flow_error_set(error, EINVAL,
1381 RTE_FLOW_ERROR_TYPE_ACTION,
1382 act, "Not supported action.");
1389 /* search next no void pattern and skip fuzzy */
1391 const struct rte_flow_item *next_no_fuzzy_pattern(
1392 const struct rte_flow_item pattern[],
1393 const struct rte_flow_item *cur)
1395 const struct rte_flow_item *next =
1396 next_no_void_pattern(pattern, cur);
1398 if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
1400 next = next_no_void_pattern(pattern, next);
1404 static inline uint8_t signature_match(const struct rte_flow_item pattern[])
1406 const struct rte_flow_item_fuzzy *spec, *last, *mask;
1407 const struct rte_flow_item *item;
1408 uint32_t sh, lh, mh;
1413 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1416 if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
1418 (const struct rte_flow_item_fuzzy *)item->spec;
1420 (const struct rte_flow_item_fuzzy *)item->last;
1422 (const struct rte_flow_item_fuzzy *)item->mask;
1451 * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1452 * And get the flow director filter info BTW.
1453 * UDP/TCP/SCTP PATTERN:
1454 * The first not void item can be ETH or IPV4 or IPV6
1455 * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1456 * The next not void item could be UDP or TCP or SCTP (optional)
1457 * The next not void item could be RAW (for flexbyte, optional)
1458 * The next not void item must be END.
1459 * A Fuzzy Match pattern can appear at any place before END.
1460 * Fuzzy Match is optional for IPV4 but is required for IPV6
1462 * The first not void item must be ETH.
1463 * The second not void item must be MAC VLAN.
1464 * The next not void item must be END.
1466 * The first not void action should be QUEUE or DROP.
1467 * The second not void optional action should be MARK,
1468 * mark_id is a uint32_t number.
1469 * The next not void action should be END.
1470 * UDP/TCP/SCTP pattern example:
1473 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
1474 * dst_addr 192.167.3.50 0xFFFFFFFF
1475 * UDP/TCP/SCTP src_port 80 0xFFFF
1476 * dst_port 80 0xFFFF
1477 * FLEX relative 0 0x1
1480 * offset 12 0xFFFFFFFF
1483 * pattern[0] 0x86 0xFF
1484 * pattern[1] 0xDD 0xFF
1486 * MAC VLAN pattern example:
1489 {0xAC, 0x7B, 0xA1, {0xFF, 0xFF, 0xFF,
1490 0x2C, 0x6D, 0x36} 0xFF, 0xFF, 0xFF}
1491 * MAC VLAN tci 0x2016 0xEFFF
1493 * Other members in mask and spec should set to 0x00.
1494 * Item->last should be NULL.
1497 ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
1498 const struct rte_flow_attr *attr,
1499 const struct rte_flow_item pattern[],
1500 const struct rte_flow_action actions[],
1501 struct ixgbe_fdir_rule *rule,
1502 struct rte_flow_error *error)
1504 const struct rte_flow_item *item;
1505 const struct rte_flow_item_eth *eth_spec;
1506 const struct rte_flow_item_eth *eth_mask;
1507 const struct rte_flow_item_ipv4 *ipv4_spec;
1508 const struct rte_flow_item_ipv4 *ipv4_mask;
1509 const struct rte_flow_item_ipv6 *ipv6_spec;
1510 const struct rte_flow_item_ipv6 *ipv6_mask;
1511 const struct rte_flow_item_tcp *tcp_spec;
1512 const struct rte_flow_item_tcp *tcp_mask;
1513 const struct rte_flow_item_udp *udp_spec;
1514 const struct rte_flow_item_udp *udp_mask;
1515 const struct rte_flow_item_sctp *sctp_spec;
1516 const struct rte_flow_item_sctp *sctp_mask;
1517 const struct rte_flow_item_vlan *vlan_spec;
1518 const struct rte_flow_item_vlan *vlan_mask;
1519 const struct rte_flow_item_raw *raw_mask;
1520 const struct rte_flow_item_raw *raw_spec;
1523 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1526 rte_flow_error_set(error, EINVAL,
1527 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1528 NULL, "NULL pattern.");
1533 rte_flow_error_set(error, EINVAL,
1534 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1535 NULL, "NULL action.");
1540 rte_flow_error_set(error, EINVAL,
1541 RTE_FLOW_ERROR_TYPE_ATTR,
1542 NULL, "NULL attribute.");
1547 * Some fields may not be provided. Set spec to 0 and mask to default
1548 * value. So, we need not do anything for the not provided fields later.
1550 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1551 memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1552 rule->mask.vlan_tci_mask = 0;
1553 rule->mask.flex_bytes_mask = 0;
1556 * The first not void item should be
1557 * MAC or IPv4 or TCP or UDP or SCTP.
1559 item = next_no_fuzzy_pattern(pattern, NULL);
1560 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1561 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1562 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1563 item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1564 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1565 item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1566 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1567 rte_flow_error_set(error, EINVAL,
1568 RTE_FLOW_ERROR_TYPE_ITEM,
1569 item, "Not supported by fdir filter");
1573 if (signature_match(pattern))
1574 rule->mode = RTE_FDIR_MODE_SIGNATURE;
1576 rule->mode = RTE_FDIR_MODE_PERFECT;
1578 /*Not supported last point for range*/
1580 rte_flow_error_set(error, EINVAL,
1581 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1582 item, "Not supported last point for range");
1586 /* Get the MAC info. */
1587 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1589 * Only support vlan and dst MAC address,
1590 * others should be masked.
1592 if (item->spec && !item->mask) {
1593 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1594 rte_flow_error_set(error, EINVAL,
1595 RTE_FLOW_ERROR_TYPE_ITEM,
1596 item, "Not supported by fdir filter");
1601 rule->b_spec = TRUE;
1602 eth_spec = (const struct rte_flow_item_eth *)item->spec;
1604 /* Get the dst MAC. */
1605 for (j = 0; j < ETHER_ADDR_LEN; j++) {
1606 rule->ixgbe_fdir.formatted.inner_mac[j] =
1607 eth_spec->dst.addr_bytes[j];
1614 rule->b_mask = TRUE;
1615 eth_mask = (const struct rte_flow_item_eth *)item->mask;
1617 /* Ether type should be masked. */
1618 if (eth_mask->type ||
1619 rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1620 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1621 rte_flow_error_set(error, EINVAL,
1622 RTE_FLOW_ERROR_TYPE_ITEM,
1623 item, "Not supported by fdir filter");
1627 /* If ethernet has meaning, it means MAC VLAN mode. */
1628 rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1631 * src MAC address must be masked,
1632 * and don't support dst MAC address mask.
1634 for (j = 0; j < ETHER_ADDR_LEN; j++) {
1635 if (eth_mask->src.addr_bytes[j] ||
1636 eth_mask->dst.addr_bytes[j] != 0xFF) {
1638 sizeof(struct ixgbe_fdir_rule));
1639 rte_flow_error_set(error, EINVAL,
1640 RTE_FLOW_ERROR_TYPE_ITEM,
1641 item, "Not supported by fdir filter");
1646 /* When no VLAN, considered as full mask. */
1647 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1649 /*** If both spec and mask are item,
1650 * it means don't care about ETH.
1655 * Check if the next not void item is vlan or ipv4.
1656 * IPv6 is not supported.
1658 item = next_no_fuzzy_pattern(pattern, item);
1659 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1660 if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1661 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1662 rte_flow_error_set(error, EINVAL,
1663 RTE_FLOW_ERROR_TYPE_ITEM,
1664 item, "Not supported by fdir filter");
1668 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
1669 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1670 rte_flow_error_set(error, EINVAL,
1671 RTE_FLOW_ERROR_TYPE_ITEM,
1672 item, "Not supported by fdir filter");
1678 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1679 if (!(item->spec && item->mask)) {
1680 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1681 rte_flow_error_set(error, EINVAL,
1682 RTE_FLOW_ERROR_TYPE_ITEM,
1683 item, "Not supported by fdir filter");
1687 /*Not supported last point for range*/
1689 rte_flow_error_set(error, EINVAL,
1690 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1691 item, "Not supported last point for range");
1695 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
1696 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
1698 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1700 rule->mask.vlan_tci_mask = vlan_mask->tci;
1701 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
1702 /* More than one tags are not supported. */
1704 /* Next not void item must be END */
1705 item = next_no_fuzzy_pattern(pattern, item);
1706 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1707 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1708 rte_flow_error_set(error, EINVAL,
1709 RTE_FLOW_ERROR_TYPE_ITEM,
1710 item, "Not supported by fdir filter");
1715 /* Get the IPV4 info. */
1716 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1718 * Set the flow type even if there's no content
1719 * as we must have a flow type.
1721 rule->ixgbe_fdir.formatted.flow_type =
1722 IXGBE_ATR_FLOW_TYPE_IPV4;
1723 /*Not supported last point for range*/
1725 rte_flow_error_set(error, EINVAL,
1726 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1727 item, "Not supported last point for range");
1731 * Only care about src & dst addresses,
1732 * others should be masked.
1735 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1736 rte_flow_error_set(error, EINVAL,
1737 RTE_FLOW_ERROR_TYPE_ITEM,
1738 item, "Not supported by fdir filter");
1741 rule->b_mask = TRUE;
1743 (const struct rte_flow_item_ipv4 *)item->mask;
1744 if (ipv4_mask->hdr.version_ihl ||
1745 ipv4_mask->hdr.type_of_service ||
1746 ipv4_mask->hdr.total_length ||
1747 ipv4_mask->hdr.packet_id ||
1748 ipv4_mask->hdr.fragment_offset ||
1749 ipv4_mask->hdr.time_to_live ||
1750 ipv4_mask->hdr.next_proto_id ||
1751 ipv4_mask->hdr.hdr_checksum) {
1752 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1753 rte_flow_error_set(error, EINVAL,
1754 RTE_FLOW_ERROR_TYPE_ITEM,
1755 item, "Not supported by fdir filter");
1758 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1759 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1762 rule->b_spec = TRUE;
1764 (const struct rte_flow_item_ipv4 *)item->spec;
1765 rule->ixgbe_fdir.formatted.dst_ip[0] =
1766 ipv4_spec->hdr.dst_addr;
1767 rule->ixgbe_fdir.formatted.src_ip[0] =
1768 ipv4_spec->hdr.src_addr;
1772 * Check if the next not void item is
1773 * TCP or UDP or SCTP or END.
1775 item = next_no_fuzzy_pattern(pattern, item);
1776 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1777 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1778 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1779 item->type != RTE_FLOW_ITEM_TYPE_END &&
1780 item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1781 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1782 rte_flow_error_set(error, EINVAL,
1783 RTE_FLOW_ERROR_TYPE_ITEM,
1784 item, "Not supported by fdir filter");
1789 /* Get the IPV6 info. */
1790 if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1792 * Set the flow type even if there's no content
1793 * as we must have a flow type.
1795 rule->ixgbe_fdir.formatted.flow_type =
1796 IXGBE_ATR_FLOW_TYPE_IPV6;
1799 * 1. must signature match
1800 * 2. not support last
1801 * 3. mask must not null
1803 if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
1806 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1807 rte_flow_error_set(error, EINVAL,
1808 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1809 item, "Not supported last point for range");
1813 rule->b_mask = TRUE;
1815 (const struct rte_flow_item_ipv6 *)item->mask;
1816 if (ipv6_mask->hdr.vtc_flow ||
1817 ipv6_mask->hdr.payload_len ||
1818 ipv6_mask->hdr.proto ||
1819 ipv6_mask->hdr.hop_limits) {
1820 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1821 rte_flow_error_set(error, EINVAL,
1822 RTE_FLOW_ERROR_TYPE_ITEM,
1823 item, "Not supported by fdir filter");
1827 /* check src addr mask */
1828 for (j = 0; j < 16; j++) {
1829 if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
1830 rule->mask.src_ipv6_mask |= 1 << j;
1831 } else if (ipv6_mask->hdr.src_addr[j] != 0) {
1832 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1833 rte_flow_error_set(error, EINVAL,
1834 RTE_FLOW_ERROR_TYPE_ITEM,
1835 item, "Not supported by fdir filter");
1840 /* check dst addr mask */
1841 for (j = 0; j < 16; j++) {
1842 if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
1843 rule->mask.dst_ipv6_mask |= 1 << j;
1844 } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
1845 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1846 rte_flow_error_set(error, EINVAL,
1847 RTE_FLOW_ERROR_TYPE_ITEM,
1848 item, "Not supported by fdir filter");
1854 rule->b_spec = TRUE;
1856 (const struct rte_flow_item_ipv6 *)item->spec;
1857 rte_memcpy(rule->ixgbe_fdir.formatted.src_ip,
1858 ipv6_spec->hdr.src_addr, 16);
1859 rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip,
1860 ipv6_spec->hdr.dst_addr, 16);
1864 * Check if the next not void item is
1865 * TCP or UDP or SCTP or END.
1867 item = next_no_fuzzy_pattern(pattern, item);
1868 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1869 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1870 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1871 item->type != RTE_FLOW_ITEM_TYPE_END &&
1872 item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1873 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1874 rte_flow_error_set(error, EINVAL,
1875 RTE_FLOW_ERROR_TYPE_ITEM,
1876 item, "Not supported by fdir filter");
1881 /* Get the TCP info. */
1882 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1884 * Set the flow type even if there's no content
1885 * as we must have a flow type.
1887 rule->ixgbe_fdir.formatted.flow_type |=
1888 IXGBE_ATR_L4TYPE_TCP;
1889 /*Not supported last point for range*/
1891 rte_flow_error_set(error, EINVAL,
1892 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1893 item, "Not supported last point for range");
1897 * Only care about src & dst ports,
1898 * others should be masked.
1901 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1902 rte_flow_error_set(error, EINVAL,
1903 RTE_FLOW_ERROR_TYPE_ITEM,
1904 item, "Not supported by fdir filter");
1907 rule->b_mask = TRUE;
1908 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1909 if (tcp_mask->hdr.sent_seq ||
1910 tcp_mask->hdr.recv_ack ||
1911 tcp_mask->hdr.data_off ||
1912 tcp_mask->hdr.tcp_flags ||
1913 tcp_mask->hdr.rx_win ||
1914 tcp_mask->hdr.cksum ||
1915 tcp_mask->hdr.tcp_urp) {
1916 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1917 rte_flow_error_set(error, EINVAL,
1918 RTE_FLOW_ERROR_TYPE_ITEM,
1919 item, "Not supported by fdir filter");
1922 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1923 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1926 rule->b_spec = TRUE;
1927 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1928 rule->ixgbe_fdir.formatted.src_port =
1929 tcp_spec->hdr.src_port;
1930 rule->ixgbe_fdir.formatted.dst_port =
1931 tcp_spec->hdr.dst_port;
1934 item = next_no_fuzzy_pattern(pattern, item);
1935 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1936 item->type != RTE_FLOW_ITEM_TYPE_END) {
1937 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1938 rte_flow_error_set(error, EINVAL,
1939 RTE_FLOW_ERROR_TYPE_ITEM,
1940 item, "Not supported by fdir filter");
1946 /* Get the UDP info */
1947 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1949 * Set the flow type even if there's no content
1950 * as we must have a flow type.
1952 rule->ixgbe_fdir.formatted.flow_type |=
1953 IXGBE_ATR_L4TYPE_UDP;
1954 /*Not supported last point for range*/
1956 rte_flow_error_set(error, EINVAL,
1957 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1958 item, "Not supported last point for range");
1962 * Only care about src & dst ports,
1963 * others should be masked.
1966 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1967 rte_flow_error_set(error, EINVAL,
1968 RTE_FLOW_ERROR_TYPE_ITEM,
1969 item, "Not supported by fdir filter");
1972 rule->b_mask = TRUE;
1973 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1974 if (udp_mask->hdr.dgram_len ||
1975 udp_mask->hdr.dgram_cksum) {
1976 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1977 rte_flow_error_set(error, EINVAL,
1978 RTE_FLOW_ERROR_TYPE_ITEM,
1979 item, "Not supported by fdir filter");
1982 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1983 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1986 rule->b_spec = TRUE;
1987 udp_spec = (const struct rte_flow_item_udp *)item->spec;
1988 rule->ixgbe_fdir.formatted.src_port =
1989 udp_spec->hdr.src_port;
1990 rule->ixgbe_fdir.formatted.dst_port =
1991 udp_spec->hdr.dst_port;
1994 item = next_no_fuzzy_pattern(pattern, item);
1995 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1996 item->type != RTE_FLOW_ITEM_TYPE_END) {
1997 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1998 rte_flow_error_set(error, EINVAL,
1999 RTE_FLOW_ERROR_TYPE_ITEM,
2000 item, "Not supported by fdir filter");
2006 /* Get the SCTP info */
2007 if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
2009 * Set the flow type even if there's no content
2010 * as we must have a flow type.
2012 rule->ixgbe_fdir.formatted.flow_type |=
2013 IXGBE_ATR_L4TYPE_SCTP;
2014 /*Not supported last point for range*/
2016 rte_flow_error_set(error, EINVAL,
2017 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2018 item, "Not supported last point for range");
2022 /* only x550 family only support sctp port */
2023 if (hw->mac.type == ixgbe_mac_X550 ||
2024 hw->mac.type == ixgbe_mac_X550EM_x ||
2025 hw->mac.type == ixgbe_mac_X550EM_a) {
2027 * Only care about src & dst ports,
2028 * others should be masked.
2031 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2032 rte_flow_error_set(error, EINVAL,
2033 RTE_FLOW_ERROR_TYPE_ITEM,
2034 item, "Not supported by fdir filter");
2037 rule->b_mask = TRUE;
2039 (const struct rte_flow_item_sctp *)item->mask;
2040 if (sctp_mask->hdr.tag ||
2041 sctp_mask->hdr.cksum) {
2042 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2043 rte_flow_error_set(error, EINVAL,
2044 RTE_FLOW_ERROR_TYPE_ITEM,
2045 item, "Not supported by fdir filter");
2048 rule->mask.src_port_mask = sctp_mask->hdr.src_port;
2049 rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
2052 rule->b_spec = TRUE;
2054 (const struct rte_flow_item_sctp *)item->spec;
2055 rule->ixgbe_fdir.formatted.src_port =
2056 sctp_spec->hdr.src_port;
2057 rule->ixgbe_fdir.formatted.dst_port =
2058 sctp_spec->hdr.dst_port;
2060 /* others even sctp port is not supported */
2063 (const struct rte_flow_item_sctp *)item->mask;
2065 (sctp_mask->hdr.src_port ||
2066 sctp_mask->hdr.dst_port ||
2067 sctp_mask->hdr.tag ||
2068 sctp_mask->hdr.cksum)) {
2069 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2070 rte_flow_error_set(error, EINVAL,
2071 RTE_FLOW_ERROR_TYPE_ITEM,
2072 item, "Not supported by fdir filter");
2077 item = next_no_fuzzy_pattern(pattern, item);
2078 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
2079 item->type != RTE_FLOW_ITEM_TYPE_END) {
2080 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2081 rte_flow_error_set(error, EINVAL,
2082 RTE_FLOW_ERROR_TYPE_ITEM,
2083 item, "Not supported by fdir filter");
2088 /* Get the flex byte info */
2089 if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
2090 /* Not supported last point for range*/
2092 rte_flow_error_set(error, EINVAL,
2093 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2094 item, "Not supported last point for range");
2097 /* mask should not be null */
2098 if (!item->mask || !item->spec) {
2099 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2100 rte_flow_error_set(error, EINVAL,
2101 RTE_FLOW_ERROR_TYPE_ITEM,
2102 item, "Not supported by fdir filter");
2106 raw_mask = (const struct rte_flow_item_raw *)item->mask;
2109 if (raw_mask->relative != 0x1 ||
2110 raw_mask->search != 0x1 ||
2111 raw_mask->reserved != 0x0 ||
2112 (uint32_t)raw_mask->offset != 0xffffffff ||
2113 raw_mask->limit != 0xffff ||
2114 raw_mask->length != 0xffff) {
2115 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2116 rte_flow_error_set(error, EINVAL,
2117 RTE_FLOW_ERROR_TYPE_ITEM,
2118 item, "Not supported by fdir filter");
2122 raw_spec = (const struct rte_flow_item_raw *)item->spec;
2125 if (raw_spec->relative != 0 ||
2126 raw_spec->search != 0 ||
2127 raw_spec->reserved != 0 ||
2128 raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
2129 raw_spec->offset % 2 ||
2130 raw_spec->limit != 0 ||
2131 raw_spec->length != 2 ||
2132 /* pattern can't be 0xffff */
2133 (raw_spec->pattern[0] == 0xff &&
2134 raw_spec->pattern[1] == 0xff)) {
2135 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2136 rte_flow_error_set(error, EINVAL,
2137 RTE_FLOW_ERROR_TYPE_ITEM,
2138 item, "Not supported by fdir filter");
2142 /* check pattern mask */
2143 if (raw_mask->pattern[0] != 0xff ||
2144 raw_mask->pattern[1] != 0xff) {
2145 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2146 rte_flow_error_set(error, EINVAL,
2147 RTE_FLOW_ERROR_TYPE_ITEM,
2148 item, "Not supported by fdir filter");
2152 rule->mask.flex_bytes_mask = 0xffff;
2153 rule->ixgbe_fdir.formatted.flex_bytes =
2154 (((uint16_t)raw_spec->pattern[1]) << 8) |
2155 raw_spec->pattern[0];
2156 rule->flex_bytes_offset = raw_spec->offset;
2159 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2160 /* check if the next not void item is END */
2161 item = next_no_fuzzy_pattern(pattern, item);
2162 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2163 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2164 rte_flow_error_set(error, EINVAL,
2165 RTE_FLOW_ERROR_TYPE_ITEM,
2166 item, "Not supported by fdir filter");
2171 return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2174 #define NVGRE_PROTOCOL 0x6558
2177 * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
2178 * And get the flow director filter info BTW.
2180 * The first not void item must be ETH.
2181 * The second not void item must be IPV4/ IPV6.
2182 * The third not void item must be NVGRE.
2183 * The next not void item must be END.
2185 * The first not void item must be ETH.
2186 * The second not void item must be IPV4/ IPV6.
2187 * The third not void item must be NVGRE.
2188 * The next not void item must be END.
2190 * The first not void action should be QUEUE or DROP.
2191 * The second not void optional action should be MARK,
2192 * mark_id is a uint32_t number.
2193 * The next not void action should be END.
2194 * VxLAN pattern example:
2197 * IPV4/IPV6 NULL NULL
2199 * VxLAN vni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
2200 * MAC VLAN tci 0x2016 0xEFFF
2202 * NEGRV pattern example:
2205 * IPV4/IPV6 NULL NULL
2206 * NVGRE protocol 0x6558 0xFFFF
2207 * tni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
2208 * MAC VLAN tci 0x2016 0xEFFF
2210 * other members in mask and spec should set to 0x00.
2211 * item->last should be NULL.
2214 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
2215 const struct rte_flow_item pattern[],
2216 const struct rte_flow_action actions[],
2217 struct ixgbe_fdir_rule *rule,
2218 struct rte_flow_error *error)
2220 const struct rte_flow_item *item;
2221 const struct rte_flow_item_vxlan *vxlan_spec;
2222 const struct rte_flow_item_vxlan *vxlan_mask;
2223 const struct rte_flow_item_nvgre *nvgre_spec;
2224 const struct rte_flow_item_nvgre *nvgre_mask;
2225 const struct rte_flow_item_eth *eth_spec;
2226 const struct rte_flow_item_eth *eth_mask;
2227 const struct rte_flow_item_vlan *vlan_spec;
2228 const struct rte_flow_item_vlan *vlan_mask;
2232 rte_flow_error_set(error, EINVAL,
2233 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2234 NULL, "NULL pattern.");
2239 rte_flow_error_set(error, EINVAL,
2240 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2241 NULL, "NULL action.");
2246 rte_flow_error_set(error, EINVAL,
2247 RTE_FLOW_ERROR_TYPE_ATTR,
2248 NULL, "NULL attribute.");
2253 * Some fields may not be provided. Set spec to 0 and mask to default
2254 * value. So, we need not do anything for the not provided fields later.
2256 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2257 memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
2258 rule->mask.vlan_tci_mask = 0;
2261 * The first not void item should be
2262 * MAC or IPv4 or IPv6 or UDP or VxLAN.
2264 item = next_no_void_pattern(pattern, NULL);
2265 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
2266 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2267 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
2268 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2269 item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
2270 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2271 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2272 rte_flow_error_set(error, EINVAL,
2273 RTE_FLOW_ERROR_TYPE_ITEM,
2274 item, "Not supported by fdir filter");
2278 rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
2281 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
2282 /* Only used to describe the protocol stack. */
2283 if (item->spec || item->mask) {
2284 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2285 rte_flow_error_set(error, EINVAL,
2286 RTE_FLOW_ERROR_TYPE_ITEM,
2287 item, "Not supported by fdir filter");
2290 /* Not supported last point for range*/
2292 rte_flow_error_set(error, EINVAL,
2293 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2294 item, "Not supported last point for range");
2298 /* Check if the next not void item is IPv4 or IPv6. */
2299 item = next_no_void_pattern(pattern, item);
2300 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2301 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2302 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2303 rte_flow_error_set(error, EINVAL,
2304 RTE_FLOW_ERROR_TYPE_ITEM,
2305 item, "Not supported by fdir filter");
2311 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2312 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2313 /* Only used to describe the protocol stack. */
2314 if (item->spec || item->mask) {
2315 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2316 rte_flow_error_set(error, EINVAL,
2317 RTE_FLOW_ERROR_TYPE_ITEM,
2318 item, "Not supported by fdir filter");
2321 /*Not supported last point for range*/
2323 rte_flow_error_set(error, EINVAL,
2324 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2325 item, "Not supported last point for range");
2329 /* Check if the next not void item is UDP or NVGRE. */
2330 item = next_no_void_pattern(pattern, item);
2331 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2332 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2333 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2334 rte_flow_error_set(error, EINVAL,
2335 RTE_FLOW_ERROR_TYPE_ITEM,
2336 item, "Not supported by fdir filter");
2342 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2343 /* Only used to describe the protocol stack. */
2344 if (item->spec || item->mask) {
2345 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2346 rte_flow_error_set(error, EINVAL,
2347 RTE_FLOW_ERROR_TYPE_ITEM,
2348 item, "Not supported by fdir filter");
2351 /*Not supported last point for range*/
2353 rte_flow_error_set(error, EINVAL,
2354 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2355 item, "Not supported last point for range");
2359 /* Check if the next not void item is VxLAN. */
2360 item = next_no_void_pattern(pattern, item);
2361 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2362 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2363 rte_flow_error_set(error, EINVAL,
2364 RTE_FLOW_ERROR_TYPE_ITEM,
2365 item, "Not supported by fdir filter");
2370 /* Get the VxLAN info */
2371 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2372 rule->ixgbe_fdir.formatted.tunnel_type =
2373 RTE_FDIR_TUNNEL_TYPE_VXLAN;
2375 /* Only care about VNI, others should be masked. */
2377 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2378 rte_flow_error_set(error, EINVAL,
2379 RTE_FLOW_ERROR_TYPE_ITEM,
2380 item, "Not supported by fdir filter");
2383 /*Not supported last point for range*/
2385 rte_flow_error_set(error, EINVAL,
2386 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2387 item, "Not supported last point for range");
2390 rule->b_mask = TRUE;
2392 /* Tunnel type is always meaningful. */
2393 rule->mask.tunnel_type_mask = 1;
2396 (const struct rte_flow_item_vxlan *)item->mask;
2397 if (vxlan_mask->flags) {
2398 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2399 rte_flow_error_set(error, EINVAL,
2400 RTE_FLOW_ERROR_TYPE_ITEM,
2401 item, "Not supported by fdir filter");
2404 /* VNI must be totally masked or not. */
2405 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2406 vxlan_mask->vni[2]) &&
2407 ((vxlan_mask->vni[0] != 0xFF) ||
2408 (vxlan_mask->vni[1] != 0xFF) ||
2409 (vxlan_mask->vni[2] != 0xFF))) {
2410 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2411 rte_flow_error_set(error, EINVAL,
2412 RTE_FLOW_ERROR_TYPE_ITEM,
2413 item, "Not supported by fdir filter");
2417 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2418 RTE_DIM(vxlan_mask->vni));
2421 rule->b_spec = TRUE;
2422 vxlan_spec = (const struct rte_flow_item_vxlan *)
2424 rte_memcpy(((uint8_t *)
2425 &rule->ixgbe_fdir.formatted.tni_vni + 1),
2426 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2427 rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
2428 rule->ixgbe_fdir.formatted.tni_vni);
2432 /* Get the NVGRE info */
2433 if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2434 rule->ixgbe_fdir.formatted.tunnel_type =
2435 RTE_FDIR_TUNNEL_TYPE_NVGRE;
2438 * Only care about flags0, flags1, protocol and TNI,
2439 * others should be masked.
2442 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2443 rte_flow_error_set(error, EINVAL,
2444 RTE_FLOW_ERROR_TYPE_ITEM,
2445 item, "Not supported by fdir filter");
2448 /*Not supported last point for range*/
2450 rte_flow_error_set(error, EINVAL,
2451 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2452 item, "Not supported last point for range");
2455 rule->b_mask = TRUE;
2457 /* Tunnel type is always meaningful. */
2458 rule->mask.tunnel_type_mask = 1;
2461 (const struct rte_flow_item_nvgre *)item->mask;
2462 if (nvgre_mask->flow_id) {
2463 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2464 rte_flow_error_set(error, EINVAL,
2465 RTE_FLOW_ERROR_TYPE_ITEM,
2466 item, "Not supported by fdir filter");
2469 if (nvgre_mask->c_k_s_rsvd0_ver !=
2470 rte_cpu_to_be_16(0x3000) ||
2471 nvgre_mask->protocol != 0xFFFF) {
2472 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2473 rte_flow_error_set(error, EINVAL,
2474 RTE_FLOW_ERROR_TYPE_ITEM,
2475 item, "Not supported by fdir filter");
2478 /* TNI must be totally masked or not. */
2479 if (nvgre_mask->tni[0] &&
2480 ((nvgre_mask->tni[0] != 0xFF) ||
2481 (nvgre_mask->tni[1] != 0xFF) ||
2482 (nvgre_mask->tni[2] != 0xFF))) {
2483 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2484 rte_flow_error_set(error, EINVAL,
2485 RTE_FLOW_ERROR_TYPE_ITEM,
2486 item, "Not supported by fdir filter");
2489 /* tni is a 24-bits bit field */
2490 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2491 RTE_DIM(nvgre_mask->tni));
2492 rule->mask.tunnel_id_mask <<= 8;
2495 rule->b_spec = TRUE;
2497 (const struct rte_flow_item_nvgre *)item->spec;
2498 if (nvgre_spec->c_k_s_rsvd0_ver !=
2499 rte_cpu_to_be_16(0x2000) ||
2500 nvgre_spec->protocol !=
2501 rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2502 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2503 rte_flow_error_set(error, EINVAL,
2504 RTE_FLOW_ERROR_TYPE_ITEM,
2505 item, "Not supported by fdir filter");
2508 /* tni is a 24-bits bit field */
2509 rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2510 nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2511 rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2515 /* check if the next not void item is MAC */
2516 item = next_no_void_pattern(pattern, item);
2517 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2518 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2519 rte_flow_error_set(error, EINVAL,
2520 RTE_FLOW_ERROR_TYPE_ITEM,
2521 item, "Not supported by fdir filter");
2526 * Only support vlan and dst MAC address,
2527 * others should be masked.
2531 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2532 rte_flow_error_set(error, EINVAL,
2533 RTE_FLOW_ERROR_TYPE_ITEM,
2534 item, "Not supported by fdir filter");
2537 /*Not supported last point for range*/
2539 rte_flow_error_set(error, EINVAL,
2540 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2541 item, "Not supported last point for range");
2544 rule->b_mask = TRUE;
2545 eth_mask = (const struct rte_flow_item_eth *)item->mask;
2547 /* Ether type should be masked. */
2548 if (eth_mask->type) {
2549 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2550 rte_flow_error_set(error, EINVAL,
2551 RTE_FLOW_ERROR_TYPE_ITEM,
2552 item, "Not supported by fdir filter");
2556 /* src MAC address should be masked. */
2557 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2558 if (eth_mask->src.addr_bytes[j]) {
2560 sizeof(struct ixgbe_fdir_rule));
2561 rte_flow_error_set(error, EINVAL,
2562 RTE_FLOW_ERROR_TYPE_ITEM,
2563 item, "Not supported by fdir filter");
2567 rule->mask.mac_addr_byte_mask = 0;
2568 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2569 /* It's a per byte mask. */
2570 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2571 rule->mask.mac_addr_byte_mask |= 0x1 << j;
2572 } else if (eth_mask->dst.addr_bytes[j]) {
2573 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2574 rte_flow_error_set(error, EINVAL,
2575 RTE_FLOW_ERROR_TYPE_ITEM,
2576 item, "Not supported by fdir filter");
2581 /* When no vlan, considered as full mask. */
2582 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2585 rule->b_spec = TRUE;
2586 eth_spec = (const struct rte_flow_item_eth *)item->spec;
2588 /* Get the dst MAC. */
2589 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2590 rule->ixgbe_fdir.formatted.inner_mac[j] =
2591 eth_spec->dst.addr_bytes[j];
2596 * Check if the next not void item is vlan or ipv4.
2597 * IPv6 is not supported.
2599 item = next_no_void_pattern(pattern, item);
2600 if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2601 (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
2602 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2603 rte_flow_error_set(error, EINVAL,
2604 RTE_FLOW_ERROR_TYPE_ITEM,
2605 item, "Not supported by fdir filter");
2608 /*Not supported last point for range*/
2610 rte_flow_error_set(error, EINVAL,
2611 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2612 item, "Not supported last point for range");
2616 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2617 if (!(item->spec && item->mask)) {
2618 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2619 rte_flow_error_set(error, EINVAL,
2620 RTE_FLOW_ERROR_TYPE_ITEM,
2621 item, "Not supported by fdir filter");
2625 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
2626 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
2628 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2630 rule->mask.vlan_tci_mask = vlan_mask->tci;
2631 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
2632 /* More than one tags are not supported. */
2634 /* check if the next not void item is END */
2635 item = next_no_void_pattern(pattern, item);
2637 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2638 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2639 rte_flow_error_set(error, EINVAL,
2640 RTE_FLOW_ERROR_TYPE_ITEM,
2641 item, "Not supported by fdir filter");
2647 * If the tags is 0, it means don't care about the VLAN.
2651 return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2655 ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2656 const struct rte_flow_attr *attr,
2657 const struct rte_flow_item pattern[],
2658 const struct rte_flow_action actions[],
2659 struct ixgbe_fdir_rule *rule,
2660 struct rte_flow_error *error)
2663 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2664 enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2666 if (hw->mac.type != ixgbe_mac_82599EB &&
2667 hw->mac.type != ixgbe_mac_X540 &&
2668 hw->mac.type != ixgbe_mac_X550 &&
2669 hw->mac.type != ixgbe_mac_X550EM_x &&
2670 hw->mac.type != ixgbe_mac_X550EM_a)
2673 ret = ixgbe_parse_fdir_filter_normal(dev, attr, pattern,
2674 actions, rule, error);
2679 ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2680 actions, rule, error);
2687 if (hw->mac.type == ixgbe_mac_82599EB &&
2688 rule->fdirflags == IXGBE_FDIRCMD_DROP &&
2689 (rule->ixgbe_fdir.formatted.src_port != 0 ||
2690 rule->ixgbe_fdir.formatted.dst_port != 0))
2693 if (fdir_mode == RTE_FDIR_MODE_NONE ||
2694 fdir_mode != rule->mode)
2697 if (rule->queue >= dev->data->nb_rx_queues)
2704 ixgbe_filterlist_init(void)
2706 TAILQ_INIT(&filter_ntuple_list);
2707 TAILQ_INIT(&filter_ethertype_list);
2708 TAILQ_INIT(&filter_syn_list);
2709 TAILQ_INIT(&filter_fdir_list);
2710 TAILQ_INIT(&filter_l2_tunnel_list);
2711 TAILQ_INIT(&ixgbe_flow_list);
2715 ixgbe_filterlist_flush(void)
2717 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2718 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2719 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2720 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2721 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2722 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2724 while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2725 TAILQ_REMOVE(&filter_ntuple_list,
2728 rte_free(ntuple_filter_ptr);
2731 while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2732 TAILQ_REMOVE(&filter_ethertype_list,
2733 ethertype_filter_ptr,
2735 rte_free(ethertype_filter_ptr);
2738 while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2739 TAILQ_REMOVE(&filter_syn_list,
2742 rte_free(syn_filter_ptr);
2745 while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2746 TAILQ_REMOVE(&filter_l2_tunnel_list,
2749 rte_free(l2_tn_filter_ptr);
2752 while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2753 TAILQ_REMOVE(&filter_fdir_list,
2756 rte_free(fdir_rule_ptr);
2759 while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
2760 TAILQ_REMOVE(&ixgbe_flow_list,
2763 rte_free(ixgbe_flow_mem_ptr->flow);
2764 rte_free(ixgbe_flow_mem_ptr);
2769 * Create or destroy a flow rule.
2770 * Theorically one rule can match more than one filters.
2771 * We will let it use the filter which it hitt first.
2772 * So, the sequence matters.
2774 static struct rte_flow *
2775 ixgbe_flow_create(struct rte_eth_dev *dev,
2776 const struct rte_flow_attr *attr,
2777 const struct rte_flow_item pattern[],
2778 const struct rte_flow_action actions[],
2779 struct rte_flow_error *error)
2782 struct rte_eth_ntuple_filter ntuple_filter;
2783 struct rte_eth_ethertype_filter ethertype_filter;
2784 struct rte_eth_syn_filter syn_filter;
2785 struct ixgbe_fdir_rule fdir_rule;
2786 struct rte_eth_l2_tunnel_conf l2_tn_filter;
2787 struct ixgbe_hw_fdir_info *fdir_info =
2788 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2789 struct rte_flow *flow = NULL;
2790 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2791 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2792 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2793 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2794 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2795 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2796 uint8_t first_mask = FALSE;
2798 flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
2800 PMD_DRV_LOG(ERR, "failed to allocate memory");
2801 return (struct rte_flow *)flow;
2803 ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
2804 sizeof(struct ixgbe_flow_mem), 0);
2805 if (!ixgbe_flow_mem_ptr) {
2806 PMD_DRV_LOG(ERR, "failed to allocate memory");
2810 ixgbe_flow_mem_ptr->flow = flow;
2811 TAILQ_INSERT_TAIL(&ixgbe_flow_list,
2812 ixgbe_flow_mem_ptr, entries);
2814 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2815 ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2816 actions, &ntuple_filter, error);
2818 #ifdef RTE_LIBRTE_SECURITY
2819 /* ESP flow not really a flow*/
2820 if (ntuple_filter.proto == IPPROTO_ESP)
2825 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2827 ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
2828 sizeof(struct ixgbe_ntuple_filter_ele), 0);
2829 if (!ntuple_filter_ptr) {
2830 PMD_DRV_LOG(ERR, "failed to allocate memory");
2833 rte_memcpy(&ntuple_filter_ptr->filter_info,
2835 sizeof(struct rte_eth_ntuple_filter));
2836 TAILQ_INSERT_TAIL(&filter_ntuple_list,
2837 ntuple_filter_ptr, entries);
2838 flow->rule = ntuple_filter_ptr;
2839 flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2845 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2846 ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2847 actions, ðertype_filter, error);
2849 ret = ixgbe_add_del_ethertype_filter(dev,
2850 ðertype_filter, TRUE);
2852 ethertype_filter_ptr = rte_zmalloc(
2853 "ixgbe_ethertype_filter",
2854 sizeof(struct ixgbe_ethertype_filter_ele), 0);
2855 if (!ethertype_filter_ptr) {
2856 PMD_DRV_LOG(ERR, "failed to allocate memory");
2859 rte_memcpy(ðertype_filter_ptr->filter_info,
2861 sizeof(struct rte_eth_ethertype_filter));
2862 TAILQ_INSERT_TAIL(&filter_ethertype_list,
2863 ethertype_filter_ptr, entries);
2864 flow->rule = ethertype_filter_ptr;
2865 flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2871 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2872 ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2873 actions, &syn_filter, error);
2875 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
2877 syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
2878 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
2879 if (!syn_filter_ptr) {
2880 PMD_DRV_LOG(ERR, "failed to allocate memory");
2883 rte_memcpy(&syn_filter_ptr->filter_info,
2885 sizeof(struct rte_eth_syn_filter));
2886 TAILQ_INSERT_TAIL(&filter_syn_list,
2889 flow->rule = syn_filter_ptr;
2890 flow->filter_type = RTE_ETH_FILTER_SYN;
2896 memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2897 ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2898 actions, &fdir_rule, error);
2900 /* A mask cannot be deleted. */
2901 if (fdir_rule.b_mask) {
2902 if (!fdir_info->mask_added) {
2903 /* It's the first time the mask is set. */
2904 rte_memcpy(&fdir_info->mask,
2906 sizeof(struct ixgbe_hw_fdir_mask));
2907 fdir_info->flex_bytes_offset =
2908 fdir_rule.flex_bytes_offset;
2910 if (fdir_rule.mask.flex_bytes_mask)
2911 ixgbe_fdir_set_flexbytes_offset(dev,
2912 fdir_rule.flex_bytes_offset);
2914 ret = ixgbe_fdir_set_input_mask(dev);
2918 fdir_info->mask_added = TRUE;
2922 * Only support one global mask,
2923 * all the masks should be the same.
2925 ret = memcmp(&fdir_info->mask,
2927 sizeof(struct ixgbe_hw_fdir_mask));
2931 if (fdir_info->flex_bytes_offset !=
2932 fdir_rule.flex_bytes_offset)
2937 if (fdir_rule.b_spec) {
2938 ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
2941 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
2942 sizeof(struct ixgbe_fdir_rule_ele), 0);
2943 if (!fdir_rule_ptr) {
2944 PMD_DRV_LOG(ERR, "failed to allocate memory");
2947 rte_memcpy(&fdir_rule_ptr->filter_info,
2949 sizeof(struct ixgbe_fdir_rule));
2950 TAILQ_INSERT_TAIL(&filter_fdir_list,
2951 fdir_rule_ptr, entries);
2952 flow->rule = fdir_rule_ptr;
2953 flow->filter_type = RTE_ETH_FILTER_FDIR;
2960 * clean the mask_added flag if fail to
2964 fdir_info->mask_added = FALSE;
2972 memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2973 ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2974 actions, &l2_tn_filter, error);
2976 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
2978 l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
2979 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
2980 if (!l2_tn_filter_ptr) {
2981 PMD_DRV_LOG(ERR, "failed to allocate memory");
2984 rte_memcpy(&l2_tn_filter_ptr->filter_info,
2986 sizeof(struct rte_eth_l2_tunnel_conf));
2987 TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
2988 l2_tn_filter_ptr, entries);
2989 flow->rule = l2_tn_filter_ptr;
2990 flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
2996 TAILQ_REMOVE(&ixgbe_flow_list,
2997 ixgbe_flow_mem_ptr, entries);
2998 rte_flow_error_set(error, -ret,
2999 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
3000 "Failed to create flow.");
3001 rte_free(ixgbe_flow_mem_ptr);
3007 * Check if the flow rule is supported by ixgbe.
3008 * It only checkes the format. Don't guarantee the rule can be programmed into
3009 * the HW. Because there can be no enough room for the rule.
3012 ixgbe_flow_validate(struct rte_eth_dev *dev,
3013 const struct rte_flow_attr *attr,
3014 const struct rte_flow_item pattern[],
3015 const struct rte_flow_action actions[],
3016 struct rte_flow_error *error)
3018 struct rte_eth_ntuple_filter ntuple_filter;
3019 struct rte_eth_ethertype_filter ethertype_filter;
3020 struct rte_eth_syn_filter syn_filter;
3021 struct rte_eth_l2_tunnel_conf l2_tn_filter;
3022 struct ixgbe_fdir_rule fdir_rule;
3025 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
3026 ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
3027 actions, &ntuple_filter, error);
3031 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
3032 ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
3033 actions, ðertype_filter, error);
3037 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
3038 ret = ixgbe_parse_syn_filter(dev, attr, pattern,
3039 actions, &syn_filter, error);
3043 memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
3044 ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
3045 actions, &fdir_rule, error);
3049 memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
3050 ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
3051 actions, &l2_tn_filter, error);
3056 /* Destroy a flow rule on ixgbe. */
3058 ixgbe_flow_destroy(struct rte_eth_dev *dev,
3059 struct rte_flow *flow,
3060 struct rte_flow_error *error)
3063 struct rte_flow *pmd_flow = flow;
3064 enum rte_filter_type filter_type = pmd_flow->filter_type;
3065 struct rte_eth_ntuple_filter ntuple_filter;
3066 struct rte_eth_ethertype_filter ethertype_filter;
3067 struct rte_eth_syn_filter syn_filter;
3068 struct ixgbe_fdir_rule fdir_rule;
3069 struct rte_eth_l2_tunnel_conf l2_tn_filter;
3070 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
3071 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
3072 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
3073 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
3074 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
3075 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
3076 struct ixgbe_hw_fdir_info *fdir_info =
3077 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
3079 switch (filter_type) {
3080 case RTE_ETH_FILTER_NTUPLE:
3081 ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
3083 rte_memcpy(&ntuple_filter,
3084 &ntuple_filter_ptr->filter_info,
3085 sizeof(struct rte_eth_ntuple_filter));
3086 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
3088 TAILQ_REMOVE(&filter_ntuple_list,
3089 ntuple_filter_ptr, entries);
3090 rte_free(ntuple_filter_ptr);
3093 case RTE_ETH_FILTER_ETHERTYPE:
3094 ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
3096 rte_memcpy(ðertype_filter,
3097 ðertype_filter_ptr->filter_info,
3098 sizeof(struct rte_eth_ethertype_filter));
3099 ret = ixgbe_add_del_ethertype_filter(dev,
3100 ðertype_filter, FALSE);
3102 TAILQ_REMOVE(&filter_ethertype_list,
3103 ethertype_filter_ptr, entries);
3104 rte_free(ethertype_filter_ptr);
3107 case RTE_ETH_FILTER_SYN:
3108 syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
3110 rte_memcpy(&syn_filter,
3111 &syn_filter_ptr->filter_info,
3112 sizeof(struct rte_eth_syn_filter));
3113 ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
3115 TAILQ_REMOVE(&filter_syn_list,
3116 syn_filter_ptr, entries);
3117 rte_free(syn_filter_ptr);
3120 case RTE_ETH_FILTER_FDIR:
3121 fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
3122 rte_memcpy(&fdir_rule,
3123 &fdir_rule_ptr->filter_info,
3124 sizeof(struct ixgbe_fdir_rule));
3125 ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
3127 TAILQ_REMOVE(&filter_fdir_list,
3128 fdir_rule_ptr, entries);
3129 rte_free(fdir_rule_ptr);
3130 if (TAILQ_EMPTY(&filter_fdir_list))
3131 fdir_info->mask_added = false;
3134 case RTE_ETH_FILTER_L2_TUNNEL:
3135 l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
3137 rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
3138 sizeof(struct rte_eth_l2_tunnel_conf));
3139 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
3141 TAILQ_REMOVE(&filter_l2_tunnel_list,
3142 l2_tn_filter_ptr, entries);
3143 rte_free(l2_tn_filter_ptr);
3147 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
3154 rte_flow_error_set(error, EINVAL,
3155 RTE_FLOW_ERROR_TYPE_HANDLE,
3156 NULL, "Failed to destroy flow");
3160 TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
3161 if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
3162 TAILQ_REMOVE(&ixgbe_flow_list,
3163 ixgbe_flow_mem_ptr, entries);
3164 rte_free(ixgbe_flow_mem_ptr);
3172 /* Destroy all flow rules associated with a port on ixgbe. */
3174 ixgbe_flow_flush(struct rte_eth_dev *dev,
3175 struct rte_flow_error *error)
3179 ixgbe_clear_all_ntuple_filter(dev);
3180 ixgbe_clear_all_ethertype_filter(dev);
3181 ixgbe_clear_syn_filter(dev);
3183 ret = ixgbe_clear_all_fdir_filter(dev);
3185 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3186 NULL, "Failed to flush rule");
3190 ret = ixgbe_clear_all_l2_tn_filter(dev);
3192 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3193 NULL, "Failed to flush rule");
3197 ixgbe_filterlist_flush();
3202 const struct rte_flow_ops ixgbe_flow_ops = {
3203 .validate = ixgbe_flow_validate,
3204 .create = ixgbe_flow_create,
3205 .destroy = ixgbe_flow_destroy,
3206 .flush = ixgbe_flow_flush,