4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
47 #include <rte_interrupts.h>
49 #include <rte_debug.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
55 #include <rte_alarm.h>
56 #include <rte_ether.h>
57 #include <rte_ethdev.h>
58 #include <rte_malloc.h>
59 #include <rte_random.h>
61 #include <rte_hash_crc.h>
63 #include <rte_flow_driver.h>
65 #include "ixgbe_logs.h"
66 #include "base/ixgbe_api.h"
67 #include "base/ixgbe_vf.h"
68 #include "base/ixgbe_common.h"
69 #include "ixgbe_ethdev.h"
70 #include "ixgbe_bypass.h"
71 #include "ixgbe_rxtx.h"
72 #include "base/ixgbe_type.h"
73 #include "base/ixgbe_phy.h"
74 #include "rte_pmd_ixgbe.h"
77 #define IXGBE_MIN_N_TUPLE_PRIO 1
78 #define IXGBE_MAX_N_TUPLE_PRIO 7
79 #define IXGBE_MAX_FLX_SOURCE_OFF 62
81 /* ntuple filter list structure */
82 struct ixgbe_ntuple_filter_ele {
83 TAILQ_ENTRY(ixgbe_ntuple_filter_ele) entries;
84 struct rte_eth_ntuple_filter filter_info;
86 /* ethertype filter list structure */
87 struct ixgbe_ethertype_filter_ele {
88 TAILQ_ENTRY(ixgbe_ethertype_filter_ele) entries;
89 struct rte_eth_ethertype_filter filter_info;
91 /* syn filter list structure */
92 struct ixgbe_eth_syn_filter_ele {
93 TAILQ_ENTRY(ixgbe_eth_syn_filter_ele) entries;
94 struct rte_eth_syn_filter filter_info;
96 /* fdir filter list structure */
97 struct ixgbe_fdir_rule_ele {
98 TAILQ_ENTRY(ixgbe_fdir_rule_ele) entries;
99 struct ixgbe_fdir_rule filter_info;
101 /* l2_tunnel filter list structure */
102 struct ixgbe_eth_l2_tunnel_conf_ele {
103 TAILQ_ENTRY(ixgbe_eth_l2_tunnel_conf_ele) entries;
104 struct rte_eth_l2_tunnel_conf filter_info;
106 /* ixgbe_flow memory list structure */
107 struct ixgbe_flow_mem {
108 TAILQ_ENTRY(ixgbe_flow_mem) entries;
109 struct rte_flow *flow;
112 TAILQ_HEAD(ixgbe_ntuple_filter_list, ixgbe_ntuple_filter_ele);
113 TAILQ_HEAD(ixgbe_ethertype_filter_list, ixgbe_ethertype_filter_ele);
114 TAILQ_HEAD(ixgbe_syn_filter_list, ixgbe_eth_syn_filter_ele);
115 TAILQ_HEAD(ixgbe_fdir_rule_filter_list, ixgbe_fdir_rule_ele);
116 TAILQ_HEAD(ixgbe_l2_tunnel_filter_list, ixgbe_eth_l2_tunnel_conf_ele);
117 TAILQ_HEAD(ixgbe_flow_mem_list, ixgbe_flow_mem);
119 static struct ixgbe_ntuple_filter_list filter_ntuple_list;
120 static struct ixgbe_ethertype_filter_list filter_ethertype_list;
121 static struct ixgbe_syn_filter_list filter_syn_list;
122 static struct ixgbe_fdir_rule_filter_list filter_fdir_list;
123 static struct ixgbe_l2_tunnel_filter_list filter_l2_tunnel_list;
124 static struct ixgbe_flow_mem_list ixgbe_flow_list;
127 * Endless loop will never happen with below assumption
128 * 1. there is at least one no-void item(END)
129 * 2. cur is before END.
132 const struct rte_flow_item *next_no_void_pattern(
133 const struct rte_flow_item pattern[],
134 const struct rte_flow_item *cur)
136 const struct rte_flow_item *next =
137 cur ? cur + 1 : &pattern[0];
139 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
146 const struct rte_flow_action *next_no_void_action(
147 const struct rte_flow_action actions[],
148 const struct rte_flow_action *cur)
150 const struct rte_flow_action *next =
151 cur ? cur + 1 : &actions[0];
153 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
160 * Please aware there's an asumption for all the parsers.
161 * rte_flow_item is using big endian, rte_flow_attr and
162 * rte_flow_action are using CPU order.
163 * Because the pattern is used to describe the packets,
164 * normally the packets should use network order.
168 * Parse the rule to see if it is a n-tuple rule.
169 * And get the n-tuple filter info BTW.
171 * The first not void item can be ETH or IPV4.
172 * The second not void item must be IPV4 if the first one is ETH.
173 * The third not void item must be UDP or TCP.
174 * The next not void item must be END.
176 * The first not void action should be QUEUE.
177 * The next not void action should be END.
181 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
182 * dst_addr 192.167.3.50 0xFFFFFFFF
183 * next_proto_id 17 0xFF
184 * UDP/TCP/ src_port 80 0xFFFF
185 * SCTP dst_port 80 0xFFFF
187 * other members in mask and spec should set to 0x00.
188 * item->last should be NULL.
190 * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY.
194 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
195 const struct rte_flow_item pattern[],
196 const struct rte_flow_action actions[],
197 struct rte_eth_ntuple_filter *filter,
198 struct rte_flow_error *error)
200 const struct rte_flow_item *item;
201 const struct rte_flow_action *act;
202 const struct rte_flow_item_ipv4 *ipv4_spec;
203 const struct rte_flow_item_ipv4 *ipv4_mask;
204 const struct rte_flow_item_tcp *tcp_spec;
205 const struct rte_flow_item_tcp *tcp_mask;
206 const struct rte_flow_item_udp *udp_spec;
207 const struct rte_flow_item_udp *udp_mask;
208 const struct rte_flow_item_sctp *sctp_spec;
209 const struct rte_flow_item_sctp *sctp_mask;
212 rte_flow_error_set(error,
213 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
214 NULL, "NULL pattern.");
219 rte_flow_error_set(error, EINVAL,
220 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
221 NULL, "NULL action.");
225 rte_flow_error_set(error, EINVAL,
226 RTE_FLOW_ERROR_TYPE_ATTR,
227 NULL, "NULL attribute.");
231 #ifdef RTE_LIBRTE_SECURITY
233 * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY
235 act = next_no_void_action(actions, NULL);
236 if (act->type == RTE_FLOW_ACTION_TYPE_SECURITY) {
237 const void *conf = act->conf;
238 /* check if the next not void item is END */
239 act = next_no_void_action(actions, act);
240 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
241 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
242 rte_flow_error_set(error, EINVAL,
243 RTE_FLOW_ERROR_TYPE_ACTION,
244 act, "Not supported action.");
248 /* get the IP pattern*/
249 item = next_no_void_pattern(pattern, NULL);
250 while (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
251 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
253 item->type == RTE_FLOW_ITEM_TYPE_END) {
254 rte_flow_error_set(error, EINVAL,
255 RTE_FLOW_ERROR_TYPE_ITEM,
256 item, "IP pattern missing.");
259 item = next_no_void_pattern(pattern, item);
262 filter->proto = IPPROTO_ESP;
263 return ixgbe_crypto_add_ingress_sa_from_flow(conf, item->spec,
264 item->type == RTE_FLOW_ITEM_TYPE_IPV6);
268 /* the first not void item can be MAC or IPv4 */
269 item = next_no_void_pattern(pattern, NULL);
271 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
272 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
273 rte_flow_error_set(error, EINVAL,
274 RTE_FLOW_ERROR_TYPE_ITEM,
275 item, "Not supported by ntuple filter");
279 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
280 /*Not supported last point for range*/
282 rte_flow_error_set(error,
284 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
285 item, "Not supported last point for range");
289 /* if the first item is MAC, the content should be NULL */
290 if (item->spec || item->mask) {
291 rte_flow_error_set(error, EINVAL,
292 RTE_FLOW_ERROR_TYPE_ITEM,
293 item, "Not supported by ntuple filter");
296 /* check if the next not void item is IPv4 */
297 item = next_no_void_pattern(pattern, item);
298 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
299 rte_flow_error_set(error,
300 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
301 item, "Not supported by ntuple filter");
306 /* get the IPv4 info */
307 if (!item->spec || !item->mask) {
308 rte_flow_error_set(error, EINVAL,
309 RTE_FLOW_ERROR_TYPE_ITEM,
310 item, "Invalid ntuple mask");
313 /*Not supported last point for range*/
315 rte_flow_error_set(error, EINVAL,
316 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
317 item, "Not supported last point for range");
322 ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
324 * Only support src & dst addresses, protocol,
325 * others should be masked.
327 if (ipv4_mask->hdr.version_ihl ||
328 ipv4_mask->hdr.type_of_service ||
329 ipv4_mask->hdr.total_length ||
330 ipv4_mask->hdr.packet_id ||
331 ipv4_mask->hdr.fragment_offset ||
332 ipv4_mask->hdr.time_to_live ||
333 ipv4_mask->hdr.hdr_checksum) {
334 rte_flow_error_set(error,
335 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
336 item, "Not supported by ntuple filter");
340 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
341 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
342 filter->proto_mask = ipv4_mask->hdr.next_proto_id;
344 ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
345 filter->dst_ip = ipv4_spec->hdr.dst_addr;
346 filter->src_ip = ipv4_spec->hdr.src_addr;
347 filter->proto = ipv4_spec->hdr.next_proto_id;
349 /* check if the next not void item is TCP or UDP */
350 item = next_no_void_pattern(pattern, item);
351 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
352 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
353 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
354 item->type != RTE_FLOW_ITEM_TYPE_END) {
355 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
356 rte_flow_error_set(error, EINVAL,
357 RTE_FLOW_ERROR_TYPE_ITEM,
358 item, "Not supported by ntuple filter");
362 /* get the TCP/UDP info */
363 if ((item->type != RTE_FLOW_ITEM_TYPE_END) &&
364 (!item->spec || !item->mask)) {
365 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
366 rte_flow_error_set(error, EINVAL,
367 RTE_FLOW_ERROR_TYPE_ITEM,
368 item, "Invalid ntuple mask");
372 /*Not supported last point for range*/
374 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
375 rte_flow_error_set(error, EINVAL,
376 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
377 item, "Not supported last point for range");
382 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
383 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
386 * Only support src & dst ports, tcp flags,
387 * others should be masked.
389 if (tcp_mask->hdr.sent_seq ||
390 tcp_mask->hdr.recv_ack ||
391 tcp_mask->hdr.data_off ||
392 tcp_mask->hdr.rx_win ||
393 tcp_mask->hdr.cksum ||
394 tcp_mask->hdr.tcp_urp) {
396 sizeof(struct rte_eth_ntuple_filter));
397 rte_flow_error_set(error, EINVAL,
398 RTE_FLOW_ERROR_TYPE_ITEM,
399 item, "Not supported by ntuple filter");
403 filter->dst_port_mask = tcp_mask->hdr.dst_port;
404 filter->src_port_mask = tcp_mask->hdr.src_port;
405 if (tcp_mask->hdr.tcp_flags == 0xFF) {
406 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
407 } else if (!tcp_mask->hdr.tcp_flags) {
408 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
410 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
411 rte_flow_error_set(error, EINVAL,
412 RTE_FLOW_ERROR_TYPE_ITEM,
413 item, "Not supported by ntuple filter");
417 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
418 filter->dst_port = tcp_spec->hdr.dst_port;
419 filter->src_port = tcp_spec->hdr.src_port;
420 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
421 } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
422 udp_mask = (const struct rte_flow_item_udp *)item->mask;
425 * Only support src & dst ports,
426 * others should be masked.
428 if (udp_mask->hdr.dgram_len ||
429 udp_mask->hdr.dgram_cksum) {
431 sizeof(struct rte_eth_ntuple_filter));
432 rte_flow_error_set(error, EINVAL,
433 RTE_FLOW_ERROR_TYPE_ITEM,
434 item, "Not supported by ntuple filter");
438 filter->dst_port_mask = udp_mask->hdr.dst_port;
439 filter->src_port_mask = udp_mask->hdr.src_port;
441 udp_spec = (const struct rte_flow_item_udp *)item->spec;
442 filter->dst_port = udp_spec->hdr.dst_port;
443 filter->src_port = udp_spec->hdr.src_port;
444 } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
445 sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
448 * Only support src & dst ports,
449 * others should be masked.
451 if (sctp_mask->hdr.tag ||
452 sctp_mask->hdr.cksum) {
454 sizeof(struct rte_eth_ntuple_filter));
455 rte_flow_error_set(error, EINVAL,
456 RTE_FLOW_ERROR_TYPE_ITEM,
457 item, "Not supported by ntuple filter");
461 filter->dst_port_mask = sctp_mask->hdr.dst_port;
462 filter->src_port_mask = sctp_mask->hdr.src_port;
464 sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
465 filter->dst_port = sctp_spec->hdr.dst_port;
466 filter->src_port = sctp_spec->hdr.src_port;
471 /* check if the next not void item is END */
472 item = next_no_void_pattern(pattern, item);
473 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
474 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
475 rte_flow_error_set(error, EINVAL,
476 RTE_FLOW_ERROR_TYPE_ITEM,
477 item, "Not supported by ntuple filter");
484 * n-tuple only supports forwarding,
485 * check if the first not void action is QUEUE.
487 act = next_no_void_action(actions, NULL);
488 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
489 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
490 rte_flow_error_set(error, EINVAL,
491 RTE_FLOW_ERROR_TYPE_ACTION,
492 item, "Not supported action.");
496 ((const struct rte_flow_action_queue *)act->conf)->index;
498 /* check if the next not void item is END */
499 act = next_no_void_action(actions, act);
500 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
501 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
502 rte_flow_error_set(error, EINVAL,
503 RTE_FLOW_ERROR_TYPE_ACTION,
504 act, "Not supported action.");
509 /* must be input direction */
510 if (!attr->ingress) {
511 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
512 rte_flow_error_set(error, EINVAL,
513 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
514 attr, "Only support ingress.");
520 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
521 rte_flow_error_set(error, EINVAL,
522 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
523 attr, "Not support egress.");
527 if (attr->priority > 0xFFFF) {
528 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
529 rte_flow_error_set(error, EINVAL,
530 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
531 attr, "Error priority.");
534 filter->priority = (uint16_t)attr->priority;
535 if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
536 attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
537 filter->priority = 1;
542 /* a specific function for ixgbe because the flags is specific */
544 ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
545 const struct rte_flow_attr *attr,
546 const struct rte_flow_item pattern[],
547 const struct rte_flow_action actions[],
548 struct rte_eth_ntuple_filter *filter,
549 struct rte_flow_error *error)
552 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
554 MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
556 ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
561 #ifdef RTE_LIBRTE_SECURITY
562 /* ESP flow not really a flow*/
563 if (filter->proto == IPPROTO_ESP)
567 /* Ixgbe doesn't support tcp flags. */
568 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
569 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
570 rte_flow_error_set(error, EINVAL,
571 RTE_FLOW_ERROR_TYPE_ITEM,
572 NULL, "Not supported by ntuple filter");
576 /* Ixgbe doesn't support many priorities. */
577 if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
578 filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
579 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
580 rte_flow_error_set(error, EINVAL,
581 RTE_FLOW_ERROR_TYPE_ITEM,
582 NULL, "Priority not supported by ntuple filter");
586 if (filter->queue >= dev->data->nb_rx_queues)
589 /* fixed value for ixgbe */
590 filter->flags = RTE_5TUPLE_FLAGS;
595 * Parse the rule to see if it is a ethertype rule.
596 * And get the ethertype filter info BTW.
598 * The first not void item can be ETH.
599 * The next not void item must be END.
601 * The first not void action should be QUEUE.
602 * The next not void action should be END.
605 * ETH type 0x0807 0xFFFF
607 * other members in mask and spec should set to 0x00.
608 * item->last should be NULL.
611 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
612 const struct rte_flow_item *pattern,
613 const struct rte_flow_action *actions,
614 struct rte_eth_ethertype_filter *filter,
615 struct rte_flow_error *error)
617 const struct rte_flow_item *item;
618 const struct rte_flow_action *act;
619 const struct rte_flow_item_eth *eth_spec;
620 const struct rte_flow_item_eth *eth_mask;
621 const struct rte_flow_action_queue *act_q;
624 rte_flow_error_set(error, EINVAL,
625 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
626 NULL, "NULL pattern.");
631 rte_flow_error_set(error, EINVAL,
632 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
633 NULL, "NULL action.");
638 rte_flow_error_set(error, EINVAL,
639 RTE_FLOW_ERROR_TYPE_ATTR,
640 NULL, "NULL attribute.");
644 item = next_no_void_pattern(pattern, NULL);
645 /* The first non-void item should be MAC. */
646 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
647 rte_flow_error_set(error, EINVAL,
648 RTE_FLOW_ERROR_TYPE_ITEM,
649 item, "Not supported by ethertype filter");
653 /*Not supported last point for range*/
655 rte_flow_error_set(error, EINVAL,
656 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
657 item, "Not supported last point for range");
661 /* Get the MAC info. */
662 if (!item->spec || !item->mask) {
663 rte_flow_error_set(error, EINVAL,
664 RTE_FLOW_ERROR_TYPE_ITEM,
665 item, "Not supported by ethertype filter");
669 eth_spec = (const struct rte_flow_item_eth *)item->spec;
670 eth_mask = (const struct rte_flow_item_eth *)item->mask;
672 /* Mask bits of source MAC address must be full of 0.
673 * Mask bits of destination MAC address must be full
676 if (!is_zero_ether_addr(ð_mask->src) ||
677 (!is_zero_ether_addr(ð_mask->dst) &&
678 !is_broadcast_ether_addr(ð_mask->dst))) {
679 rte_flow_error_set(error, EINVAL,
680 RTE_FLOW_ERROR_TYPE_ITEM,
681 item, "Invalid ether address mask");
685 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
686 rte_flow_error_set(error, EINVAL,
687 RTE_FLOW_ERROR_TYPE_ITEM,
688 item, "Invalid ethertype mask");
692 /* If mask bits of destination MAC address
693 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
695 if (is_broadcast_ether_addr(ð_mask->dst)) {
696 filter->mac_addr = eth_spec->dst;
697 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
699 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
701 filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
703 /* Check if the next non-void item is END. */
704 item = next_no_void_pattern(pattern, item);
705 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
706 rte_flow_error_set(error, EINVAL,
707 RTE_FLOW_ERROR_TYPE_ITEM,
708 item, "Not supported by ethertype filter.");
714 act = next_no_void_action(actions, NULL);
715 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
716 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
717 rte_flow_error_set(error, EINVAL,
718 RTE_FLOW_ERROR_TYPE_ACTION,
719 act, "Not supported action.");
723 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
724 act_q = (const struct rte_flow_action_queue *)act->conf;
725 filter->queue = act_q->index;
727 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
730 /* Check if the next non-void item is END */
731 act = next_no_void_action(actions, act);
732 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
733 rte_flow_error_set(error, EINVAL,
734 RTE_FLOW_ERROR_TYPE_ACTION,
735 act, "Not supported action.");
740 /* Must be input direction */
741 if (!attr->ingress) {
742 rte_flow_error_set(error, EINVAL,
743 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
744 attr, "Only support ingress.");
750 rte_flow_error_set(error, EINVAL,
751 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
752 attr, "Not support egress.");
757 if (attr->priority) {
758 rte_flow_error_set(error, EINVAL,
759 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
760 attr, "Not support priority.");
766 rte_flow_error_set(error, EINVAL,
767 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
768 attr, "Not support group.");
776 ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
777 const struct rte_flow_attr *attr,
778 const struct rte_flow_item pattern[],
779 const struct rte_flow_action actions[],
780 struct rte_eth_ethertype_filter *filter,
781 struct rte_flow_error *error)
784 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
786 MAC_TYPE_FILTER_SUP(hw->mac.type);
788 ret = cons_parse_ethertype_filter(attr, pattern,
789 actions, filter, error);
794 /* Ixgbe doesn't support MAC address. */
795 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
796 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
797 rte_flow_error_set(error, EINVAL,
798 RTE_FLOW_ERROR_TYPE_ITEM,
799 NULL, "Not supported by ethertype filter");
803 if (filter->queue >= dev->data->nb_rx_queues) {
804 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
805 rte_flow_error_set(error, EINVAL,
806 RTE_FLOW_ERROR_TYPE_ITEM,
807 NULL, "queue index much too big");
811 if (filter->ether_type == ETHER_TYPE_IPv4 ||
812 filter->ether_type == ETHER_TYPE_IPv6) {
813 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
814 rte_flow_error_set(error, EINVAL,
815 RTE_FLOW_ERROR_TYPE_ITEM,
816 NULL, "IPv4/IPv6 not supported by ethertype filter");
820 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
821 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
822 rte_flow_error_set(error, EINVAL,
823 RTE_FLOW_ERROR_TYPE_ITEM,
824 NULL, "mac compare is unsupported");
828 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
829 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
830 rte_flow_error_set(error, EINVAL,
831 RTE_FLOW_ERROR_TYPE_ITEM,
832 NULL, "drop option is unsupported");
840 * Parse the rule to see if it is a TCP SYN rule.
841 * And get the TCP SYN filter info BTW.
843 * The first not void item must be ETH.
844 * The second not void item must be IPV4 or IPV6.
845 * The third not void item must be TCP.
846 * The next not void item must be END.
848 * The first not void action should be QUEUE.
849 * The next not void action should be END.
853 * IPV4/IPV6 NULL NULL
854 * TCP tcp_flags 0x02 0xFF
856 * other members in mask and spec should set to 0x00.
857 * item->last should be NULL.
860 cons_parse_syn_filter(const struct rte_flow_attr *attr,
861 const struct rte_flow_item pattern[],
862 const struct rte_flow_action actions[],
863 struct rte_eth_syn_filter *filter,
864 struct rte_flow_error *error)
866 const struct rte_flow_item *item;
867 const struct rte_flow_action *act;
868 const struct rte_flow_item_tcp *tcp_spec;
869 const struct rte_flow_item_tcp *tcp_mask;
870 const struct rte_flow_action_queue *act_q;
873 rte_flow_error_set(error, EINVAL,
874 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
875 NULL, "NULL pattern.");
880 rte_flow_error_set(error, EINVAL,
881 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
882 NULL, "NULL action.");
887 rte_flow_error_set(error, EINVAL,
888 RTE_FLOW_ERROR_TYPE_ATTR,
889 NULL, "NULL attribute.");
894 /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
895 item = next_no_void_pattern(pattern, NULL);
896 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
897 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
898 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
899 item->type != RTE_FLOW_ITEM_TYPE_TCP) {
900 rte_flow_error_set(error, EINVAL,
901 RTE_FLOW_ERROR_TYPE_ITEM,
902 item, "Not supported by syn filter");
905 /*Not supported last point for range*/
907 rte_flow_error_set(error, EINVAL,
908 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
909 item, "Not supported last point for range");
914 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
915 /* if the item is MAC, the content should be NULL */
916 if (item->spec || item->mask) {
917 rte_flow_error_set(error, EINVAL,
918 RTE_FLOW_ERROR_TYPE_ITEM,
919 item, "Invalid SYN address mask");
923 /* check if the next not void item is IPv4 or IPv6 */
924 item = next_no_void_pattern(pattern, item);
925 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
926 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
927 rte_flow_error_set(error, EINVAL,
928 RTE_FLOW_ERROR_TYPE_ITEM,
929 item, "Not supported by syn filter");
935 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
936 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
937 /* if the item is IP, the content should be NULL */
938 if (item->spec || item->mask) {
939 rte_flow_error_set(error, EINVAL,
940 RTE_FLOW_ERROR_TYPE_ITEM,
941 item, "Invalid SYN mask");
945 /* check if the next not void item is TCP */
946 item = next_no_void_pattern(pattern, item);
947 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
948 rte_flow_error_set(error, EINVAL,
949 RTE_FLOW_ERROR_TYPE_ITEM,
950 item, "Not supported by syn filter");
955 /* Get the TCP info. Only support SYN. */
956 if (!item->spec || !item->mask) {
957 rte_flow_error_set(error, EINVAL,
958 RTE_FLOW_ERROR_TYPE_ITEM,
959 item, "Invalid SYN mask");
962 /*Not supported last point for range*/
964 rte_flow_error_set(error, EINVAL,
965 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
966 item, "Not supported last point for range");
970 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
971 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
972 if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
973 tcp_mask->hdr.src_port ||
974 tcp_mask->hdr.dst_port ||
975 tcp_mask->hdr.sent_seq ||
976 tcp_mask->hdr.recv_ack ||
977 tcp_mask->hdr.data_off ||
978 tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
979 tcp_mask->hdr.rx_win ||
980 tcp_mask->hdr.cksum ||
981 tcp_mask->hdr.tcp_urp) {
982 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
983 rte_flow_error_set(error, EINVAL,
984 RTE_FLOW_ERROR_TYPE_ITEM,
985 item, "Not supported by syn filter");
989 /* check if the next not void item is END */
990 item = next_no_void_pattern(pattern, item);
991 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
992 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
993 rte_flow_error_set(error, EINVAL,
994 RTE_FLOW_ERROR_TYPE_ITEM,
995 item, "Not supported by syn filter");
999 /* check if the first not void action is QUEUE. */
1000 act = next_no_void_action(actions, NULL);
1001 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1002 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1003 rte_flow_error_set(error, EINVAL,
1004 RTE_FLOW_ERROR_TYPE_ACTION,
1005 act, "Not supported action.");
1009 act_q = (const struct rte_flow_action_queue *)act->conf;
1010 filter->queue = act_q->index;
1011 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
1012 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1013 rte_flow_error_set(error, EINVAL,
1014 RTE_FLOW_ERROR_TYPE_ACTION,
1015 act, "Not supported action.");
1019 /* check if the next not void item is END */
1020 act = next_no_void_action(actions, act);
1021 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1022 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1023 rte_flow_error_set(error, EINVAL,
1024 RTE_FLOW_ERROR_TYPE_ACTION,
1025 act, "Not supported action.");
1030 /* must be input direction */
1031 if (!attr->ingress) {
1032 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1033 rte_flow_error_set(error, EINVAL,
1034 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1035 attr, "Only support ingress.");
1041 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1042 rte_flow_error_set(error, EINVAL,
1043 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1044 attr, "Not support egress.");
1048 /* Support 2 priorities, the lowest or highest. */
1049 if (!attr->priority) {
1050 filter->hig_pri = 0;
1051 } else if (attr->priority == (uint32_t)~0U) {
1052 filter->hig_pri = 1;
1054 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1055 rte_flow_error_set(error, EINVAL,
1056 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1057 attr, "Not support priority.");
1065 ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
1066 const struct rte_flow_attr *attr,
1067 const struct rte_flow_item pattern[],
1068 const struct rte_flow_action actions[],
1069 struct rte_eth_syn_filter *filter,
1070 struct rte_flow_error *error)
1073 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1075 MAC_TYPE_FILTER_SUP(hw->mac.type);
1077 ret = cons_parse_syn_filter(attr, pattern,
1078 actions, filter, error);
1080 if (filter->queue >= dev->data->nb_rx_queues)
1090 * Parse the rule to see if it is a L2 tunnel rule.
1091 * And get the L2 tunnel filter info BTW.
1092 * Only support E-tag now.
1094 * The first not void item can be E_TAG.
1095 * The next not void item must be END.
1097 * The first not void action should be QUEUE.
1098 * The next not void action should be END.
1102 e_cid_base 0x309 0xFFF
1104 * other members in mask and spec should set to 0x00.
1105 * item->last should be NULL.
1108 cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
1109 const struct rte_flow_item pattern[],
1110 const struct rte_flow_action actions[],
1111 struct rte_eth_l2_tunnel_conf *filter,
1112 struct rte_flow_error *error)
1114 const struct rte_flow_item *item;
1115 const struct rte_flow_item_e_tag *e_tag_spec;
1116 const struct rte_flow_item_e_tag *e_tag_mask;
1117 const struct rte_flow_action *act;
1118 const struct rte_flow_action_queue *act_q;
1121 rte_flow_error_set(error, EINVAL,
1122 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1123 NULL, "NULL pattern.");
1128 rte_flow_error_set(error, EINVAL,
1129 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1130 NULL, "NULL action.");
1135 rte_flow_error_set(error, EINVAL,
1136 RTE_FLOW_ERROR_TYPE_ATTR,
1137 NULL, "NULL attribute.");
1141 /* The first not void item should be e-tag. */
1142 item = next_no_void_pattern(pattern, NULL);
1143 if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1144 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1145 rte_flow_error_set(error, EINVAL,
1146 RTE_FLOW_ERROR_TYPE_ITEM,
1147 item, "Not supported by L2 tunnel filter");
1151 if (!item->spec || !item->mask) {
1152 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1153 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1154 item, "Not supported by L2 tunnel filter");
1158 /*Not supported last point for range*/
1160 rte_flow_error_set(error, EINVAL,
1161 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1162 item, "Not supported last point for range");
1166 e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
1167 e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
1169 /* Only care about GRP and E cid base. */
1170 if (e_tag_mask->epcp_edei_in_ecid_b ||
1171 e_tag_mask->in_ecid_e ||
1172 e_tag_mask->ecid_e ||
1173 e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1174 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1175 rte_flow_error_set(error, EINVAL,
1176 RTE_FLOW_ERROR_TYPE_ITEM,
1177 item, "Not supported by L2 tunnel filter");
1181 filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1183 * grp and e_cid_base are bit fields and only use 14 bits.
1184 * e-tag id is taken as little endian by HW.
1186 filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1188 /* check if the next not void item is END */
1189 item = next_no_void_pattern(pattern, item);
1190 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1191 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1192 rte_flow_error_set(error, EINVAL,
1193 RTE_FLOW_ERROR_TYPE_ITEM,
1194 item, "Not supported by L2 tunnel filter");
1199 /* must be input direction */
1200 if (!attr->ingress) {
1201 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1202 rte_flow_error_set(error, EINVAL,
1203 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1204 attr, "Only support ingress.");
1210 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1211 rte_flow_error_set(error, EINVAL,
1212 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1213 attr, "Not support egress.");
1218 if (attr->priority) {
1219 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1220 rte_flow_error_set(error, EINVAL,
1221 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1222 attr, "Not support priority.");
1226 /* check if the first not void action is QUEUE. */
1227 act = next_no_void_action(actions, NULL);
1228 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1229 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1230 rte_flow_error_set(error, EINVAL,
1231 RTE_FLOW_ERROR_TYPE_ACTION,
1232 act, "Not supported action.");
1236 act_q = (const struct rte_flow_action_queue *)act->conf;
1237 filter->pool = act_q->index;
1239 /* check if the next not void item is END */
1240 act = next_no_void_action(actions, act);
1241 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1242 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1243 rte_flow_error_set(error, EINVAL,
1244 RTE_FLOW_ERROR_TYPE_ACTION,
1245 act, "Not supported action.");
1253 ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1254 const struct rte_flow_attr *attr,
1255 const struct rte_flow_item pattern[],
1256 const struct rte_flow_action actions[],
1257 struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1258 struct rte_flow_error *error)
1261 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1263 ret = cons_parse_l2_tn_filter(attr, pattern,
1264 actions, l2_tn_filter, error);
1266 if (hw->mac.type != ixgbe_mac_X550 &&
1267 hw->mac.type != ixgbe_mac_X550EM_x &&
1268 hw->mac.type != ixgbe_mac_X550EM_a) {
1269 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1270 rte_flow_error_set(error, EINVAL,
1271 RTE_FLOW_ERROR_TYPE_ITEM,
1272 NULL, "Not supported by L2 tunnel filter");
1276 if (l2_tn_filter->pool >= dev->data->nb_rx_queues)
1282 /* Parse to get the attr and action info of flow director rule. */
1284 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1285 const struct rte_flow_action actions[],
1286 struct ixgbe_fdir_rule *rule,
1287 struct rte_flow_error *error)
1289 const struct rte_flow_action *act;
1290 const struct rte_flow_action_queue *act_q;
1291 const struct rte_flow_action_mark *mark;
1294 /* must be input direction */
1295 if (!attr->ingress) {
1296 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1297 rte_flow_error_set(error, EINVAL,
1298 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1299 attr, "Only support ingress.");
1305 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1306 rte_flow_error_set(error, EINVAL,
1307 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1308 attr, "Not support egress.");
1313 if (attr->priority) {
1314 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1315 rte_flow_error_set(error, EINVAL,
1316 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1317 attr, "Not support priority.");
1321 /* check if the first not void action is QUEUE or DROP. */
1322 act = next_no_void_action(actions, NULL);
1323 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1324 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1325 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1326 rte_flow_error_set(error, EINVAL,
1327 RTE_FLOW_ERROR_TYPE_ACTION,
1328 act, "Not supported action.");
1332 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1333 act_q = (const struct rte_flow_action_queue *)act->conf;
1334 rule->queue = act_q->index;
1336 /* signature mode does not support drop action. */
1337 if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1338 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1339 rte_flow_error_set(error, EINVAL,
1340 RTE_FLOW_ERROR_TYPE_ACTION,
1341 act, "Not supported action.");
1344 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1347 /* check if the next not void item is MARK */
1348 act = next_no_void_action(actions, act);
1349 if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1350 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1351 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1352 rte_flow_error_set(error, EINVAL,
1353 RTE_FLOW_ERROR_TYPE_ACTION,
1354 act, "Not supported action.");
1360 if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1361 mark = (const struct rte_flow_action_mark *)act->conf;
1362 rule->soft_id = mark->id;
1363 act = next_no_void_action(actions, act);
1366 /* check if the next not void item is END */
1367 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1368 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1369 rte_flow_error_set(error, EINVAL,
1370 RTE_FLOW_ERROR_TYPE_ACTION,
1371 act, "Not supported action.");
1378 /* search next no void pattern and skip fuzzy */
1380 const struct rte_flow_item *next_no_fuzzy_pattern(
1381 const struct rte_flow_item pattern[],
1382 const struct rte_flow_item *cur)
1384 const struct rte_flow_item *next =
1385 next_no_void_pattern(pattern, cur);
1387 if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
1389 next = next_no_void_pattern(pattern, next);
1393 static inline uint8_t signature_match(const struct rte_flow_item pattern[])
1395 const struct rte_flow_item_fuzzy *spec, *last, *mask;
1396 const struct rte_flow_item *item;
1397 uint32_t sh, lh, mh;
1402 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1405 if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
1407 (const struct rte_flow_item_fuzzy *)item->spec;
1409 (const struct rte_flow_item_fuzzy *)item->last;
1411 (const struct rte_flow_item_fuzzy *)item->mask;
1440 * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1441 * And get the flow director filter info BTW.
1442 * UDP/TCP/SCTP PATTERN:
1443 * The first not void item can be ETH or IPV4 or IPV6
1444 * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1445 * The next not void item could be UDP or TCP or SCTP (optional)
1446 * The next not void item could be RAW (for flexbyte, optional)
1447 * The next not void item must be END.
1448 * A Fuzzy Match pattern can appear at any place before END.
1449 * Fuzzy Match is optional for IPV4 but is required for IPV6
1451 * The first not void item must be ETH.
1452 * The second not void item must be MAC VLAN.
1453 * The next not void item must be END.
1455 * The first not void action should be QUEUE or DROP.
1456 * The second not void optional action should be MARK,
1457 * mark_id is a uint32_t number.
1458 * The next not void action should be END.
1459 * UDP/TCP/SCTP pattern example:
1462 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
1463 * dst_addr 192.167.3.50 0xFFFFFFFF
1464 * UDP/TCP/SCTP src_port 80 0xFFFF
1465 * dst_port 80 0xFFFF
1466 * FLEX relative 0 0x1
1469 * offset 12 0xFFFFFFFF
1472 * pattern[0] 0x86 0xFF
1473 * pattern[1] 0xDD 0xFF
1475 * MAC VLAN pattern example:
1478 {0xAC, 0x7B, 0xA1, {0xFF, 0xFF, 0xFF,
1479 0x2C, 0x6D, 0x36} 0xFF, 0xFF, 0xFF}
1480 * MAC VLAN tci 0x2016 0xEFFF
1482 * Other members in mask and spec should set to 0x00.
1483 * Item->last should be NULL.
1486 ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
1487 const struct rte_flow_attr *attr,
1488 const struct rte_flow_item pattern[],
1489 const struct rte_flow_action actions[],
1490 struct ixgbe_fdir_rule *rule,
1491 struct rte_flow_error *error)
1493 const struct rte_flow_item *item;
1494 const struct rte_flow_item_eth *eth_spec;
1495 const struct rte_flow_item_eth *eth_mask;
1496 const struct rte_flow_item_ipv4 *ipv4_spec;
1497 const struct rte_flow_item_ipv4 *ipv4_mask;
1498 const struct rte_flow_item_ipv6 *ipv6_spec;
1499 const struct rte_flow_item_ipv6 *ipv6_mask;
1500 const struct rte_flow_item_tcp *tcp_spec;
1501 const struct rte_flow_item_tcp *tcp_mask;
1502 const struct rte_flow_item_udp *udp_spec;
1503 const struct rte_flow_item_udp *udp_mask;
1504 const struct rte_flow_item_sctp *sctp_spec;
1505 const struct rte_flow_item_sctp *sctp_mask;
1506 const struct rte_flow_item_vlan *vlan_spec;
1507 const struct rte_flow_item_vlan *vlan_mask;
1508 const struct rte_flow_item_raw *raw_mask;
1509 const struct rte_flow_item_raw *raw_spec;
1512 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1515 rte_flow_error_set(error, EINVAL,
1516 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1517 NULL, "NULL pattern.");
1522 rte_flow_error_set(error, EINVAL,
1523 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1524 NULL, "NULL action.");
1529 rte_flow_error_set(error, EINVAL,
1530 RTE_FLOW_ERROR_TYPE_ATTR,
1531 NULL, "NULL attribute.");
1536 * Some fields may not be provided. Set spec to 0 and mask to default
1537 * value. So, we need not do anything for the not provided fields later.
1539 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1540 memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1541 rule->mask.vlan_tci_mask = 0;
1542 rule->mask.flex_bytes_mask = 0;
1545 * The first not void item should be
1546 * MAC or IPv4 or TCP or UDP or SCTP.
1548 item = next_no_fuzzy_pattern(pattern, NULL);
1549 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1550 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1551 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1552 item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1553 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1554 item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1555 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1556 rte_flow_error_set(error, EINVAL,
1557 RTE_FLOW_ERROR_TYPE_ITEM,
1558 item, "Not supported by fdir filter");
1562 if (signature_match(pattern))
1563 rule->mode = RTE_FDIR_MODE_SIGNATURE;
1565 rule->mode = RTE_FDIR_MODE_PERFECT;
1567 /*Not supported last point for range*/
1569 rte_flow_error_set(error, EINVAL,
1570 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1571 item, "Not supported last point for range");
1575 /* Get the MAC info. */
1576 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1578 * Only support vlan and dst MAC address,
1579 * others should be masked.
1581 if (item->spec && !item->mask) {
1582 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1583 rte_flow_error_set(error, EINVAL,
1584 RTE_FLOW_ERROR_TYPE_ITEM,
1585 item, "Not supported by fdir filter");
1590 rule->b_spec = TRUE;
1591 eth_spec = (const struct rte_flow_item_eth *)item->spec;
1593 /* Get the dst MAC. */
1594 for (j = 0; j < ETHER_ADDR_LEN; j++) {
1595 rule->ixgbe_fdir.formatted.inner_mac[j] =
1596 eth_spec->dst.addr_bytes[j];
1603 rule->b_mask = TRUE;
1604 eth_mask = (const struct rte_flow_item_eth *)item->mask;
1606 /* Ether type should be masked. */
1607 if (eth_mask->type ||
1608 rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1609 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1610 rte_flow_error_set(error, EINVAL,
1611 RTE_FLOW_ERROR_TYPE_ITEM,
1612 item, "Not supported by fdir filter");
1616 /* If ethernet has meaning, it means MAC VLAN mode. */
1617 rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1620 * src MAC address must be masked,
1621 * and don't support dst MAC address mask.
1623 for (j = 0; j < ETHER_ADDR_LEN; j++) {
1624 if (eth_mask->src.addr_bytes[j] ||
1625 eth_mask->dst.addr_bytes[j] != 0xFF) {
1627 sizeof(struct ixgbe_fdir_rule));
1628 rte_flow_error_set(error, EINVAL,
1629 RTE_FLOW_ERROR_TYPE_ITEM,
1630 item, "Not supported by fdir filter");
1635 /* When no VLAN, considered as full mask. */
1636 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1638 /*** If both spec and mask are item,
1639 * it means don't care about ETH.
1644 * Check if the next not void item is vlan or ipv4.
1645 * IPv6 is not supported.
1647 item = next_no_fuzzy_pattern(pattern, item);
1648 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1649 if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1650 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1651 rte_flow_error_set(error, EINVAL,
1652 RTE_FLOW_ERROR_TYPE_ITEM,
1653 item, "Not supported by fdir filter");
1657 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
1658 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1659 rte_flow_error_set(error, EINVAL,
1660 RTE_FLOW_ERROR_TYPE_ITEM,
1661 item, "Not supported by fdir filter");
1667 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1668 if (!(item->spec && item->mask)) {
1669 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1670 rte_flow_error_set(error, EINVAL,
1671 RTE_FLOW_ERROR_TYPE_ITEM,
1672 item, "Not supported by fdir filter");
1676 /*Not supported last point for range*/
1678 rte_flow_error_set(error, EINVAL,
1679 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1680 item, "Not supported last point for range");
1684 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
1685 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
1687 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1689 rule->mask.vlan_tci_mask = vlan_mask->tci;
1690 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
1691 /* More than one tags are not supported. */
1693 /* Next not void item must be END */
1694 item = next_no_fuzzy_pattern(pattern, item);
1695 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1696 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1697 rte_flow_error_set(error, EINVAL,
1698 RTE_FLOW_ERROR_TYPE_ITEM,
1699 item, "Not supported by fdir filter");
1704 /* Get the IPV4 info. */
1705 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1707 * Set the flow type even if there's no content
1708 * as we must have a flow type.
1710 rule->ixgbe_fdir.formatted.flow_type =
1711 IXGBE_ATR_FLOW_TYPE_IPV4;
1712 /*Not supported last point for range*/
1714 rte_flow_error_set(error, EINVAL,
1715 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1716 item, "Not supported last point for range");
1720 * Only care about src & dst addresses,
1721 * others should be masked.
1724 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1725 rte_flow_error_set(error, EINVAL,
1726 RTE_FLOW_ERROR_TYPE_ITEM,
1727 item, "Not supported by fdir filter");
1730 rule->b_mask = TRUE;
1732 (const struct rte_flow_item_ipv4 *)item->mask;
1733 if (ipv4_mask->hdr.version_ihl ||
1734 ipv4_mask->hdr.type_of_service ||
1735 ipv4_mask->hdr.total_length ||
1736 ipv4_mask->hdr.packet_id ||
1737 ipv4_mask->hdr.fragment_offset ||
1738 ipv4_mask->hdr.time_to_live ||
1739 ipv4_mask->hdr.next_proto_id ||
1740 ipv4_mask->hdr.hdr_checksum) {
1741 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1742 rte_flow_error_set(error, EINVAL,
1743 RTE_FLOW_ERROR_TYPE_ITEM,
1744 item, "Not supported by fdir filter");
1747 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1748 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1751 rule->b_spec = TRUE;
1753 (const struct rte_flow_item_ipv4 *)item->spec;
1754 rule->ixgbe_fdir.formatted.dst_ip[0] =
1755 ipv4_spec->hdr.dst_addr;
1756 rule->ixgbe_fdir.formatted.src_ip[0] =
1757 ipv4_spec->hdr.src_addr;
1761 * Check if the next not void item is
1762 * TCP or UDP or SCTP or END.
1764 item = next_no_fuzzy_pattern(pattern, item);
1765 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1766 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1767 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1768 item->type != RTE_FLOW_ITEM_TYPE_END &&
1769 item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1770 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1771 rte_flow_error_set(error, EINVAL,
1772 RTE_FLOW_ERROR_TYPE_ITEM,
1773 item, "Not supported by fdir filter");
1778 /* Get the IPV6 info. */
1779 if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1781 * Set the flow type even if there's no content
1782 * as we must have a flow type.
1784 rule->ixgbe_fdir.formatted.flow_type =
1785 IXGBE_ATR_FLOW_TYPE_IPV6;
1788 * 1. must signature match
1789 * 2. not support last
1790 * 3. mask must not null
1792 if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
1795 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1796 rte_flow_error_set(error, EINVAL,
1797 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1798 item, "Not supported last point for range");
1802 rule->b_mask = TRUE;
1804 (const struct rte_flow_item_ipv6 *)item->mask;
1805 if (ipv6_mask->hdr.vtc_flow ||
1806 ipv6_mask->hdr.payload_len ||
1807 ipv6_mask->hdr.proto ||
1808 ipv6_mask->hdr.hop_limits) {
1809 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1810 rte_flow_error_set(error, EINVAL,
1811 RTE_FLOW_ERROR_TYPE_ITEM,
1812 item, "Not supported by fdir filter");
1816 /* check src addr mask */
1817 for (j = 0; j < 16; j++) {
1818 if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
1819 rule->mask.src_ipv6_mask |= 1 << j;
1820 } else if (ipv6_mask->hdr.src_addr[j] != 0) {
1821 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1822 rte_flow_error_set(error, EINVAL,
1823 RTE_FLOW_ERROR_TYPE_ITEM,
1824 item, "Not supported by fdir filter");
1829 /* check dst addr mask */
1830 for (j = 0; j < 16; j++) {
1831 if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
1832 rule->mask.dst_ipv6_mask |= 1 << j;
1833 } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
1834 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1835 rte_flow_error_set(error, EINVAL,
1836 RTE_FLOW_ERROR_TYPE_ITEM,
1837 item, "Not supported by fdir filter");
1843 rule->b_spec = TRUE;
1845 (const struct rte_flow_item_ipv6 *)item->spec;
1846 rte_memcpy(rule->ixgbe_fdir.formatted.src_ip,
1847 ipv6_spec->hdr.src_addr, 16);
1848 rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip,
1849 ipv6_spec->hdr.dst_addr, 16);
1853 * Check if the next not void item is
1854 * TCP or UDP or SCTP or END.
1856 item = next_no_fuzzy_pattern(pattern, item);
1857 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1858 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1859 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1860 item->type != RTE_FLOW_ITEM_TYPE_END &&
1861 item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1862 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1863 rte_flow_error_set(error, EINVAL,
1864 RTE_FLOW_ERROR_TYPE_ITEM,
1865 item, "Not supported by fdir filter");
1870 /* Get the TCP info. */
1871 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1873 * Set the flow type even if there's no content
1874 * as we must have a flow type.
1876 rule->ixgbe_fdir.formatted.flow_type |=
1877 IXGBE_ATR_L4TYPE_TCP;
1878 /*Not supported last point for range*/
1880 rte_flow_error_set(error, EINVAL,
1881 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1882 item, "Not supported last point for range");
1886 * Only care about src & dst ports,
1887 * others should be masked.
1890 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1891 rte_flow_error_set(error, EINVAL,
1892 RTE_FLOW_ERROR_TYPE_ITEM,
1893 item, "Not supported by fdir filter");
1896 rule->b_mask = TRUE;
1897 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1898 if (tcp_mask->hdr.sent_seq ||
1899 tcp_mask->hdr.recv_ack ||
1900 tcp_mask->hdr.data_off ||
1901 tcp_mask->hdr.tcp_flags ||
1902 tcp_mask->hdr.rx_win ||
1903 tcp_mask->hdr.cksum ||
1904 tcp_mask->hdr.tcp_urp) {
1905 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1906 rte_flow_error_set(error, EINVAL,
1907 RTE_FLOW_ERROR_TYPE_ITEM,
1908 item, "Not supported by fdir filter");
1911 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1912 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1915 rule->b_spec = TRUE;
1916 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1917 rule->ixgbe_fdir.formatted.src_port =
1918 tcp_spec->hdr.src_port;
1919 rule->ixgbe_fdir.formatted.dst_port =
1920 tcp_spec->hdr.dst_port;
1923 item = next_no_fuzzy_pattern(pattern, item);
1924 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1925 item->type != RTE_FLOW_ITEM_TYPE_END) {
1926 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1927 rte_flow_error_set(error, EINVAL,
1928 RTE_FLOW_ERROR_TYPE_ITEM,
1929 item, "Not supported by fdir filter");
1935 /* Get the UDP info */
1936 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1938 * Set the flow type even if there's no content
1939 * as we must have a flow type.
1941 rule->ixgbe_fdir.formatted.flow_type |=
1942 IXGBE_ATR_L4TYPE_UDP;
1943 /*Not supported last point for range*/
1945 rte_flow_error_set(error, EINVAL,
1946 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1947 item, "Not supported last point for range");
1951 * Only care about src & dst ports,
1952 * others should be masked.
1955 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1956 rte_flow_error_set(error, EINVAL,
1957 RTE_FLOW_ERROR_TYPE_ITEM,
1958 item, "Not supported by fdir filter");
1961 rule->b_mask = TRUE;
1962 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1963 if (udp_mask->hdr.dgram_len ||
1964 udp_mask->hdr.dgram_cksum) {
1965 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1966 rte_flow_error_set(error, EINVAL,
1967 RTE_FLOW_ERROR_TYPE_ITEM,
1968 item, "Not supported by fdir filter");
1971 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1972 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1975 rule->b_spec = TRUE;
1976 udp_spec = (const struct rte_flow_item_udp *)item->spec;
1977 rule->ixgbe_fdir.formatted.src_port =
1978 udp_spec->hdr.src_port;
1979 rule->ixgbe_fdir.formatted.dst_port =
1980 udp_spec->hdr.dst_port;
1983 item = next_no_fuzzy_pattern(pattern, item);
1984 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1985 item->type != RTE_FLOW_ITEM_TYPE_END) {
1986 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1987 rte_flow_error_set(error, EINVAL,
1988 RTE_FLOW_ERROR_TYPE_ITEM,
1989 item, "Not supported by fdir filter");
1995 /* Get the SCTP info */
1996 if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
1998 * Set the flow type even if there's no content
1999 * as we must have a flow type.
2001 rule->ixgbe_fdir.formatted.flow_type |=
2002 IXGBE_ATR_L4TYPE_SCTP;
2003 /*Not supported last point for range*/
2005 rte_flow_error_set(error, EINVAL,
2006 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2007 item, "Not supported last point for range");
2011 /* only x550 family only support sctp port */
2012 if (hw->mac.type == ixgbe_mac_X550 ||
2013 hw->mac.type == ixgbe_mac_X550EM_x ||
2014 hw->mac.type == ixgbe_mac_X550EM_a) {
2016 * Only care about src & dst ports,
2017 * others should be masked.
2020 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2021 rte_flow_error_set(error, EINVAL,
2022 RTE_FLOW_ERROR_TYPE_ITEM,
2023 item, "Not supported by fdir filter");
2026 rule->b_mask = TRUE;
2028 (const struct rte_flow_item_sctp *)item->mask;
2029 if (sctp_mask->hdr.tag ||
2030 sctp_mask->hdr.cksum) {
2031 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2032 rte_flow_error_set(error, EINVAL,
2033 RTE_FLOW_ERROR_TYPE_ITEM,
2034 item, "Not supported by fdir filter");
2037 rule->mask.src_port_mask = sctp_mask->hdr.src_port;
2038 rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
2041 rule->b_spec = TRUE;
2043 (const struct rte_flow_item_sctp *)item->spec;
2044 rule->ixgbe_fdir.formatted.src_port =
2045 sctp_spec->hdr.src_port;
2046 rule->ixgbe_fdir.formatted.dst_port =
2047 sctp_spec->hdr.dst_port;
2049 /* others even sctp port is not supported */
2052 (const struct rte_flow_item_sctp *)item->mask;
2054 (sctp_mask->hdr.src_port ||
2055 sctp_mask->hdr.dst_port ||
2056 sctp_mask->hdr.tag ||
2057 sctp_mask->hdr.cksum)) {
2058 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2059 rte_flow_error_set(error, EINVAL,
2060 RTE_FLOW_ERROR_TYPE_ITEM,
2061 item, "Not supported by fdir filter");
2066 item = next_no_fuzzy_pattern(pattern, item);
2067 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
2068 item->type != RTE_FLOW_ITEM_TYPE_END) {
2069 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2070 rte_flow_error_set(error, EINVAL,
2071 RTE_FLOW_ERROR_TYPE_ITEM,
2072 item, "Not supported by fdir filter");
2077 /* Get the flex byte info */
2078 if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
2079 /* Not supported last point for range*/
2081 rte_flow_error_set(error, EINVAL,
2082 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2083 item, "Not supported last point for range");
2086 /* mask should not be null */
2087 if (!item->mask || !item->spec) {
2088 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2089 rte_flow_error_set(error, EINVAL,
2090 RTE_FLOW_ERROR_TYPE_ITEM,
2091 item, "Not supported by fdir filter");
2095 raw_mask = (const struct rte_flow_item_raw *)item->mask;
2098 if (raw_mask->relative != 0x1 ||
2099 raw_mask->search != 0x1 ||
2100 raw_mask->reserved != 0x0 ||
2101 (uint32_t)raw_mask->offset != 0xffffffff ||
2102 raw_mask->limit != 0xffff ||
2103 raw_mask->length != 0xffff) {
2104 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2105 rte_flow_error_set(error, EINVAL,
2106 RTE_FLOW_ERROR_TYPE_ITEM,
2107 item, "Not supported by fdir filter");
2111 raw_spec = (const struct rte_flow_item_raw *)item->spec;
2114 if (raw_spec->relative != 0 ||
2115 raw_spec->search != 0 ||
2116 raw_spec->reserved != 0 ||
2117 raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
2118 raw_spec->offset % 2 ||
2119 raw_spec->limit != 0 ||
2120 raw_spec->length != 2 ||
2121 /* pattern can't be 0xffff */
2122 (raw_spec->pattern[0] == 0xff &&
2123 raw_spec->pattern[1] == 0xff)) {
2124 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2125 rte_flow_error_set(error, EINVAL,
2126 RTE_FLOW_ERROR_TYPE_ITEM,
2127 item, "Not supported by fdir filter");
2131 /* check pattern mask */
2132 if (raw_mask->pattern[0] != 0xff ||
2133 raw_mask->pattern[1] != 0xff) {
2134 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2135 rte_flow_error_set(error, EINVAL,
2136 RTE_FLOW_ERROR_TYPE_ITEM,
2137 item, "Not supported by fdir filter");
2141 rule->mask.flex_bytes_mask = 0xffff;
2142 rule->ixgbe_fdir.formatted.flex_bytes =
2143 (((uint16_t)raw_spec->pattern[1]) << 8) |
2144 raw_spec->pattern[0];
2145 rule->flex_bytes_offset = raw_spec->offset;
2148 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2149 /* check if the next not void item is END */
2150 item = next_no_fuzzy_pattern(pattern, item);
2151 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2152 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2153 rte_flow_error_set(error, EINVAL,
2154 RTE_FLOW_ERROR_TYPE_ITEM,
2155 item, "Not supported by fdir filter");
2160 return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2163 #define NVGRE_PROTOCOL 0x6558
2166 * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
2167 * And get the flow director filter info BTW.
2169 * The first not void item must be ETH.
2170 * The second not void item must be IPV4/ IPV6.
2171 * The third not void item must be NVGRE.
2172 * The next not void item must be END.
2174 * The first not void item must be ETH.
2175 * The second not void item must be IPV4/ IPV6.
2176 * The third not void item must be NVGRE.
2177 * The next not void item must be END.
2179 * The first not void action should be QUEUE or DROP.
2180 * The second not void optional action should be MARK,
2181 * mark_id is a uint32_t number.
2182 * The next not void action should be END.
2183 * VxLAN pattern example:
2186 * IPV4/IPV6 NULL NULL
2188 * VxLAN vni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
2189 * MAC VLAN tci 0x2016 0xEFFF
2191 * NEGRV pattern example:
2194 * IPV4/IPV6 NULL NULL
2195 * NVGRE protocol 0x6558 0xFFFF
2196 * tni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
2197 * MAC VLAN tci 0x2016 0xEFFF
2199 * other members in mask and spec should set to 0x00.
2200 * item->last should be NULL.
2203 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
2204 const struct rte_flow_item pattern[],
2205 const struct rte_flow_action actions[],
2206 struct ixgbe_fdir_rule *rule,
2207 struct rte_flow_error *error)
2209 const struct rte_flow_item *item;
2210 const struct rte_flow_item_vxlan *vxlan_spec;
2211 const struct rte_flow_item_vxlan *vxlan_mask;
2212 const struct rte_flow_item_nvgre *nvgre_spec;
2213 const struct rte_flow_item_nvgre *nvgre_mask;
2214 const struct rte_flow_item_eth *eth_spec;
2215 const struct rte_flow_item_eth *eth_mask;
2216 const struct rte_flow_item_vlan *vlan_spec;
2217 const struct rte_flow_item_vlan *vlan_mask;
2221 rte_flow_error_set(error, EINVAL,
2222 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2223 NULL, "NULL pattern.");
2228 rte_flow_error_set(error, EINVAL,
2229 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2230 NULL, "NULL action.");
2235 rte_flow_error_set(error, EINVAL,
2236 RTE_FLOW_ERROR_TYPE_ATTR,
2237 NULL, "NULL attribute.");
2242 * Some fields may not be provided. Set spec to 0 and mask to default
2243 * value. So, we need not do anything for the not provided fields later.
2245 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2246 memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
2247 rule->mask.vlan_tci_mask = 0;
2250 * The first not void item should be
2251 * MAC or IPv4 or IPv6 or UDP or VxLAN.
2253 item = next_no_void_pattern(pattern, NULL);
2254 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
2255 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2256 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
2257 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2258 item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
2259 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2260 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2261 rte_flow_error_set(error, EINVAL,
2262 RTE_FLOW_ERROR_TYPE_ITEM,
2263 item, "Not supported by fdir filter");
2267 rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
2270 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
2271 /* Only used to describe the protocol stack. */
2272 if (item->spec || item->mask) {
2273 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2274 rte_flow_error_set(error, EINVAL,
2275 RTE_FLOW_ERROR_TYPE_ITEM,
2276 item, "Not supported by fdir filter");
2279 /* Not supported last point for range*/
2281 rte_flow_error_set(error, EINVAL,
2282 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2283 item, "Not supported last point for range");
2287 /* Check if the next not void item is IPv4 or IPv6. */
2288 item = next_no_void_pattern(pattern, item);
2289 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2290 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2291 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2292 rte_flow_error_set(error, EINVAL,
2293 RTE_FLOW_ERROR_TYPE_ITEM,
2294 item, "Not supported by fdir filter");
2300 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2301 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2302 /* Only used to describe the protocol stack. */
2303 if (item->spec || item->mask) {
2304 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2305 rte_flow_error_set(error, EINVAL,
2306 RTE_FLOW_ERROR_TYPE_ITEM,
2307 item, "Not supported by fdir filter");
2310 /*Not supported last point for range*/
2312 rte_flow_error_set(error, EINVAL,
2313 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2314 item, "Not supported last point for range");
2318 /* Check if the next not void item is UDP or NVGRE. */
2319 item = next_no_void_pattern(pattern, item);
2320 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2321 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2322 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2323 rte_flow_error_set(error, EINVAL,
2324 RTE_FLOW_ERROR_TYPE_ITEM,
2325 item, "Not supported by fdir filter");
2331 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2332 /* Only used to describe the protocol stack. */
2333 if (item->spec || item->mask) {
2334 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2335 rte_flow_error_set(error, EINVAL,
2336 RTE_FLOW_ERROR_TYPE_ITEM,
2337 item, "Not supported by fdir filter");
2340 /*Not supported last point for range*/
2342 rte_flow_error_set(error, EINVAL,
2343 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2344 item, "Not supported last point for range");
2348 /* Check if the next not void item is VxLAN. */
2349 item = next_no_void_pattern(pattern, item);
2350 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2351 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2352 rte_flow_error_set(error, EINVAL,
2353 RTE_FLOW_ERROR_TYPE_ITEM,
2354 item, "Not supported by fdir filter");
2359 /* Get the VxLAN info */
2360 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2361 rule->ixgbe_fdir.formatted.tunnel_type =
2362 RTE_FDIR_TUNNEL_TYPE_VXLAN;
2364 /* Only care about VNI, others should be masked. */
2366 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2367 rte_flow_error_set(error, EINVAL,
2368 RTE_FLOW_ERROR_TYPE_ITEM,
2369 item, "Not supported by fdir filter");
2372 /*Not supported last point for range*/
2374 rte_flow_error_set(error, EINVAL,
2375 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2376 item, "Not supported last point for range");
2379 rule->b_mask = TRUE;
2381 /* Tunnel type is always meaningful. */
2382 rule->mask.tunnel_type_mask = 1;
2385 (const struct rte_flow_item_vxlan *)item->mask;
2386 if (vxlan_mask->flags) {
2387 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2388 rte_flow_error_set(error, EINVAL,
2389 RTE_FLOW_ERROR_TYPE_ITEM,
2390 item, "Not supported by fdir filter");
2393 /* VNI must be totally masked or not. */
2394 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2395 vxlan_mask->vni[2]) &&
2396 ((vxlan_mask->vni[0] != 0xFF) ||
2397 (vxlan_mask->vni[1] != 0xFF) ||
2398 (vxlan_mask->vni[2] != 0xFF))) {
2399 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2400 rte_flow_error_set(error, EINVAL,
2401 RTE_FLOW_ERROR_TYPE_ITEM,
2402 item, "Not supported by fdir filter");
2406 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2407 RTE_DIM(vxlan_mask->vni));
2410 rule->b_spec = TRUE;
2411 vxlan_spec = (const struct rte_flow_item_vxlan *)
2413 rte_memcpy(((uint8_t *)
2414 &rule->ixgbe_fdir.formatted.tni_vni + 1),
2415 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2416 rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
2417 rule->ixgbe_fdir.formatted.tni_vni);
2421 /* Get the NVGRE info */
2422 if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2423 rule->ixgbe_fdir.formatted.tunnel_type =
2424 RTE_FDIR_TUNNEL_TYPE_NVGRE;
2427 * Only care about flags0, flags1, protocol and TNI,
2428 * others should be masked.
2431 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2432 rte_flow_error_set(error, EINVAL,
2433 RTE_FLOW_ERROR_TYPE_ITEM,
2434 item, "Not supported by fdir filter");
2437 /*Not supported last point for range*/
2439 rte_flow_error_set(error, EINVAL,
2440 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2441 item, "Not supported last point for range");
2444 rule->b_mask = TRUE;
2446 /* Tunnel type is always meaningful. */
2447 rule->mask.tunnel_type_mask = 1;
2450 (const struct rte_flow_item_nvgre *)item->mask;
2451 if (nvgre_mask->flow_id) {
2452 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2453 rte_flow_error_set(error, EINVAL,
2454 RTE_FLOW_ERROR_TYPE_ITEM,
2455 item, "Not supported by fdir filter");
2458 if (nvgre_mask->c_k_s_rsvd0_ver !=
2459 rte_cpu_to_be_16(0x3000) ||
2460 nvgre_mask->protocol != 0xFFFF) {
2461 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2462 rte_flow_error_set(error, EINVAL,
2463 RTE_FLOW_ERROR_TYPE_ITEM,
2464 item, "Not supported by fdir filter");
2467 /* TNI must be totally masked or not. */
2468 if (nvgre_mask->tni[0] &&
2469 ((nvgre_mask->tni[0] != 0xFF) ||
2470 (nvgre_mask->tni[1] != 0xFF) ||
2471 (nvgre_mask->tni[2] != 0xFF))) {
2472 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2473 rte_flow_error_set(error, EINVAL,
2474 RTE_FLOW_ERROR_TYPE_ITEM,
2475 item, "Not supported by fdir filter");
2478 /* tni is a 24-bits bit field */
2479 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2480 RTE_DIM(nvgre_mask->tni));
2481 rule->mask.tunnel_id_mask <<= 8;
2484 rule->b_spec = TRUE;
2486 (const struct rte_flow_item_nvgre *)item->spec;
2487 if (nvgre_spec->c_k_s_rsvd0_ver !=
2488 rte_cpu_to_be_16(0x2000) ||
2489 nvgre_spec->protocol !=
2490 rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2491 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2492 rte_flow_error_set(error, EINVAL,
2493 RTE_FLOW_ERROR_TYPE_ITEM,
2494 item, "Not supported by fdir filter");
2497 /* tni is a 24-bits bit field */
2498 rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2499 nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2500 rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2504 /* check if the next not void item is MAC */
2505 item = next_no_void_pattern(pattern, item);
2506 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2507 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2508 rte_flow_error_set(error, EINVAL,
2509 RTE_FLOW_ERROR_TYPE_ITEM,
2510 item, "Not supported by fdir filter");
2515 * Only support vlan and dst MAC address,
2516 * others should be masked.
2520 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2521 rte_flow_error_set(error, EINVAL,
2522 RTE_FLOW_ERROR_TYPE_ITEM,
2523 item, "Not supported by fdir filter");
2526 /*Not supported last point for range*/
2528 rte_flow_error_set(error, EINVAL,
2529 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2530 item, "Not supported last point for range");
2533 rule->b_mask = TRUE;
2534 eth_mask = (const struct rte_flow_item_eth *)item->mask;
2536 /* Ether type should be masked. */
2537 if (eth_mask->type) {
2538 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2539 rte_flow_error_set(error, EINVAL,
2540 RTE_FLOW_ERROR_TYPE_ITEM,
2541 item, "Not supported by fdir filter");
2545 /* src MAC address should be masked. */
2546 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2547 if (eth_mask->src.addr_bytes[j]) {
2549 sizeof(struct ixgbe_fdir_rule));
2550 rte_flow_error_set(error, EINVAL,
2551 RTE_FLOW_ERROR_TYPE_ITEM,
2552 item, "Not supported by fdir filter");
2556 rule->mask.mac_addr_byte_mask = 0;
2557 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2558 /* It's a per byte mask. */
2559 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2560 rule->mask.mac_addr_byte_mask |= 0x1 << j;
2561 } else if (eth_mask->dst.addr_bytes[j]) {
2562 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2563 rte_flow_error_set(error, EINVAL,
2564 RTE_FLOW_ERROR_TYPE_ITEM,
2565 item, "Not supported by fdir filter");
2570 /* When no vlan, considered as full mask. */
2571 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2574 rule->b_spec = TRUE;
2575 eth_spec = (const struct rte_flow_item_eth *)item->spec;
2577 /* Get the dst MAC. */
2578 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2579 rule->ixgbe_fdir.formatted.inner_mac[j] =
2580 eth_spec->dst.addr_bytes[j];
2585 * Check if the next not void item is vlan or ipv4.
2586 * IPv6 is not supported.
2588 item = next_no_void_pattern(pattern, item);
2589 if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2590 (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
2591 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2592 rte_flow_error_set(error, EINVAL,
2593 RTE_FLOW_ERROR_TYPE_ITEM,
2594 item, "Not supported by fdir filter");
2597 /*Not supported last point for range*/
2599 rte_flow_error_set(error, EINVAL,
2600 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2601 item, "Not supported last point for range");
2605 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2606 if (!(item->spec && item->mask)) {
2607 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2608 rte_flow_error_set(error, EINVAL,
2609 RTE_FLOW_ERROR_TYPE_ITEM,
2610 item, "Not supported by fdir filter");
2614 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
2615 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
2617 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2619 rule->mask.vlan_tci_mask = vlan_mask->tci;
2620 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
2621 /* More than one tags are not supported. */
2623 /* check if the next not void item is END */
2624 item = next_no_void_pattern(pattern, item);
2626 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2627 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2628 rte_flow_error_set(error, EINVAL,
2629 RTE_FLOW_ERROR_TYPE_ITEM,
2630 item, "Not supported by fdir filter");
2636 * If the tags is 0, it means don't care about the VLAN.
2640 return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2644 ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2645 const struct rte_flow_attr *attr,
2646 const struct rte_flow_item pattern[],
2647 const struct rte_flow_action actions[],
2648 struct ixgbe_fdir_rule *rule,
2649 struct rte_flow_error *error)
2652 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2653 enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2655 if (hw->mac.type != ixgbe_mac_82599EB &&
2656 hw->mac.type != ixgbe_mac_X540 &&
2657 hw->mac.type != ixgbe_mac_X550 &&
2658 hw->mac.type != ixgbe_mac_X550EM_x &&
2659 hw->mac.type != ixgbe_mac_X550EM_a)
2662 ret = ixgbe_parse_fdir_filter_normal(dev, attr, pattern,
2663 actions, rule, error);
2668 ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2669 actions, rule, error);
2676 if (hw->mac.type == ixgbe_mac_82599EB &&
2677 rule->fdirflags == IXGBE_FDIRCMD_DROP &&
2678 (rule->ixgbe_fdir.formatted.src_port != 0 ||
2679 rule->ixgbe_fdir.formatted.dst_port != 0))
2682 if (fdir_mode == RTE_FDIR_MODE_NONE ||
2683 fdir_mode != rule->mode)
2686 if (rule->queue >= dev->data->nb_rx_queues)
2693 ixgbe_filterlist_init(void)
2695 TAILQ_INIT(&filter_ntuple_list);
2696 TAILQ_INIT(&filter_ethertype_list);
2697 TAILQ_INIT(&filter_syn_list);
2698 TAILQ_INIT(&filter_fdir_list);
2699 TAILQ_INIT(&filter_l2_tunnel_list);
2700 TAILQ_INIT(&ixgbe_flow_list);
2704 ixgbe_filterlist_flush(void)
2706 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2707 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2708 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2709 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2710 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2711 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2713 while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2714 TAILQ_REMOVE(&filter_ntuple_list,
2717 rte_free(ntuple_filter_ptr);
2720 while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2721 TAILQ_REMOVE(&filter_ethertype_list,
2722 ethertype_filter_ptr,
2724 rte_free(ethertype_filter_ptr);
2727 while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2728 TAILQ_REMOVE(&filter_syn_list,
2731 rte_free(syn_filter_ptr);
2734 while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2735 TAILQ_REMOVE(&filter_l2_tunnel_list,
2738 rte_free(l2_tn_filter_ptr);
2741 while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2742 TAILQ_REMOVE(&filter_fdir_list,
2745 rte_free(fdir_rule_ptr);
2748 while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
2749 TAILQ_REMOVE(&ixgbe_flow_list,
2752 rte_free(ixgbe_flow_mem_ptr->flow);
2753 rte_free(ixgbe_flow_mem_ptr);
2758 * Create or destroy a flow rule.
2759 * Theorically one rule can match more than one filters.
2760 * We will let it use the filter which it hitt first.
2761 * So, the sequence matters.
2763 static struct rte_flow *
2764 ixgbe_flow_create(struct rte_eth_dev *dev,
2765 const struct rte_flow_attr *attr,
2766 const struct rte_flow_item pattern[],
2767 const struct rte_flow_action actions[],
2768 struct rte_flow_error *error)
2771 struct rte_eth_ntuple_filter ntuple_filter;
2772 struct rte_eth_ethertype_filter ethertype_filter;
2773 struct rte_eth_syn_filter syn_filter;
2774 struct ixgbe_fdir_rule fdir_rule;
2775 struct rte_eth_l2_tunnel_conf l2_tn_filter;
2776 struct ixgbe_hw_fdir_info *fdir_info =
2777 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2778 struct rte_flow *flow = NULL;
2779 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2780 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2781 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2782 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2783 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2784 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2785 uint8_t first_mask = FALSE;
2787 flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
2789 PMD_DRV_LOG(ERR, "failed to allocate memory");
2790 return (struct rte_flow *)flow;
2792 ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
2793 sizeof(struct ixgbe_flow_mem), 0);
2794 if (!ixgbe_flow_mem_ptr) {
2795 PMD_DRV_LOG(ERR, "failed to allocate memory");
2799 ixgbe_flow_mem_ptr->flow = flow;
2800 TAILQ_INSERT_TAIL(&ixgbe_flow_list,
2801 ixgbe_flow_mem_ptr, entries);
2803 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2804 ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2805 actions, &ntuple_filter, error);
2807 #ifdef RTE_LIBRTE_SECURITY
2808 /* ESP flow not really a flow*/
2809 if (ntuple_filter.proto == IPPROTO_ESP)
2814 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2816 ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
2817 sizeof(struct ixgbe_ntuple_filter_ele), 0);
2818 if (!ntuple_filter_ptr) {
2819 PMD_DRV_LOG(ERR, "failed to allocate memory");
2822 rte_memcpy(&ntuple_filter_ptr->filter_info,
2824 sizeof(struct rte_eth_ntuple_filter));
2825 TAILQ_INSERT_TAIL(&filter_ntuple_list,
2826 ntuple_filter_ptr, entries);
2827 flow->rule = ntuple_filter_ptr;
2828 flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2834 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2835 ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2836 actions, ðertype_filter, error);
2838 ret = ixgbe_add_del_ethertype_filter(dev,
2839 ðertype_filter, TRUE);
2841 ethertype_filter_ptr = rte_zmalloc(
2842 "ixgbe_ethertype_filter",
2843 sizeof(struct ixgbe_ethertype_filter_ele), 0);
2844 if (!ethertype_filter_ptr) {
2845 PMD_DRV_LOG(ERR, "failed to allocate memory");
2848 rte_memcpy(ðertype_filter_ptr->filter_info,
2850 sizeof(struct rte_eth_ethertype_filter));
2851 TAILQ_INSERT_TAIL(&filter_ethertype_list,
2852 ethertype_filter_ptr, entries);
2853 flow->rule = ethertype_filter_ptr;
2854 flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2860 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2861 ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2862 actions, &syn_filter, error);
2864 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
2866 syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
2867 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
2868 if (!syn_filter_ptr) {
2869 PMD_DRV_LOG(ERR, "failed to allocate memory");
2872 rte_memcpy(&syn_filter_ptr->filter_info,
2874 sizeof(struct rte_eth_syn_filter));
2875 TAILQ_INSERT_TAIL(&filter_syn_list,
2878 flow->rule = syn_filter_ptr;
2879 flow->filter_type = RTE_ETH_FILTER_SYN;
2885 memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2886 ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2887 actions, &fdir_rule, error);
2889 /* A mask cannot be deleted. */
2890 if (fdir_rule.b_mask) {
2891 if (!fdir_info->mask_added) {
2892 /* It's the first time the mask is set. */
2893 rte_memcpy(&fdir_info->mask,
2895 sizeof(struct ixgbe_hw_fdir_mask));
2896 fdir_info->flex_bytes_offset =
2897 fdir_rule.flex_bytes_offset;
2899 if (fdir_rule.mask.flex_bytes_mask)
2900 ixgbe_fdir_set_flexbytes_offset(dev,
2901 fdir_rule.flex_bytes_offset);
2903 ret = ixgbe_fdir_set_input_mask(dev);
2907 fdir_info->mask_added = TRUE;
2911 * Only support one global mask,
2912 * all the masks should be the same.
2914 ret = memcmp(&fdir_info->mask,
2916 sizeof(struct ixgbe_hw_fdir_mask));
2920 if (fdir_info->flex_bytes_offset !=
2921 fdir_rule.flex_bytes_offset)
2926 if (fdir_rule.b_spec) {
2927 ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
2930 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
2931 sizeof(struct ixgbe_fdir_rule_ele), 0);
2932 if (!fdir_rule_ptr) {
2933 PMD_DRV_LOG(ERR, "failed to allocate memory");
2936 rte_memcpy(&fdir_rule_ptr->filter_info,
2938 sizeof(struct ixgbe_fdir_rule));
2939 TAILQ_INSERT_TAIL(&filter_fdir_list,
2940 fdir_rule_ptr, entries);
2941 flow->rule = fdir_rule_ptr;
2942 flow->filter_type = RTE_ETH_FILTER_FDIR;
2949 * clean the mask_added flag if fail to
2953 fdir_info->mask_added = FALSE;
2961 memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2962 ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2963 actions, &l2_tn_filter, error);
2965 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
2967 l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
2968 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
2969 if (!l2_tn_filter_ptr) {
2970 PMD_DRV_LOG(ERR, "failed to allocate memory");
2973 rte_memcpy(&l2_tn_filter_ptr->filter_info,
2975 sizeof(struct rte_eth_l2_tunnel_conf));
2976 TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
2977 l2_tn_filter_ptr, entries);
2978 flow->rule = l2_tn_filter_ptr;
2979 flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
2985 TAILQ_REMOVE(&ixgbe_flow_list,
2986 ixgbe_flow_mem_ptr, entries);
2987 rte_flow_error_set(error, -ret,
2988 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2989 "Failed to create flow.");
2990 rte_free(ixgbe_flow_mem_ptr);
2996 * Check if the flow rule is supported by ixgbe.
2997 * It only checkes the format. Don't guarantee the rule can be programmed into
2998 * the HW. Because there can be no enough room for the rule.
3001 ixgbe_flow_validate(struct rte_eth_dev *dev,
3002 const struct rte_flow_attr *attr,
3003 const struct rte_flow_item pattern[],
3004 const struct rte_flow_action actions[],
3005 struct rte_flow_error *error)
3007 struct rte_eth_ntuple_filter ntuple_filter;
3008 struct rte_eth_ethertype_filter ethertype_filter;
3009 struct rte_eth_syn_filter syn_filter;
3010 struct rte_eth_l2_tunnel_conf l2_tn_filter;
3011 struct ixgbe_fdir_rule fdir_rule;
3014 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
3015 ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
3016 actions, &ntuple_filter, error);
3020 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
3021 ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
3022 actions, ðertype_filter, error);
3026 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
3027 ret = ixgbe_parse_syn_filter(dev, attr, pattern,
3028 actions, &syn_filter, error);
3032 memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
3033 ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
3034 actions, &fdir_rule, error);
3038 memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
3039 ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
3040 actions, &l2_tn_filter, error);
3045 /* Destroy a flow rule on ixgbe. */
3047 ixgbe_flow_destroy(struct rte_eth_dev *dev,
3048 struct rte_flow *flow,
3049 struct rte_flow_error *error)
3052 struct rte_flow *pmd_flow = flow;
3053 enum rte_filter_type filter_type = pmd_flow->filter_type;
3054 struct rte_eth_ntuple_filter ntuple_filter;
3055 struct rte_eth_ethertype_filter ethertype_filter;
3056 struct rte_eth_syn_filter syn_filter;
3057 struct ixgbe_fdir_rule fdir_rule;
3058 struct rte_eth_l2_tunnel_conf l2_tn_filter;
3059 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
3060 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
3061 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
3062 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
3063 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
3064 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
3065 struct ixgbe_hw_fdir_info *fdir_info =
3066 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
3068 switch (filter_type) {
3069 case RTE_ETH_FILTER_NTUPLE:
3070 ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
3072 rte_memcpy(&ntuple_filter,
3073 &ntuple_filter_ptr->filter_info,
3074 sizeof(struct rte_eth_ntuple_filter));
3075 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
3077 TAILQ_REMOVE(&filter_ntuple_list,
3078 ntuple_filter_ptr, entries);
3079 rte_free(ntuple_filter_ptr);
3082 case RTE_ETH_FILTER_ETHERTYPE:
3083 ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
3085 rte_memcpy(ðertype_filter,
3086 ðertype_filter_ptr->filter_info,
3087 sizeof(struct rte_eth_ethertype_filter));
3088 ret = ixgbe_add_del_ethertype_filter(dev,
3089 ðertype_filter, FALSE);
3091 TAILQ_REMOVE(&filter_ethertype_list,
3092 ethertype_filter_ptr, entries);
3093 rte_free(ethertype_filter_ptr);
3096 case RTE_ETH_FILTER_SYN:
3097 syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
3099 rte_memcpy(&syn_filter,
3100 &syn_filter_ptr->filter_info,
3101 sizeof(struct rte_eth_syn_filter));
3102 ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
3104 TAILQ_REMOVE(&filter_syn_list,
3105 syn_filter_ptr, entries);
3106 rte_free(syn_filter_ptr);
3109 case RTE_ETH_FILTER_FDIR:
3110 fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
3111 rte_memcpy(&fdir_rule,
3112 &fdir_rule_ptr->filter_info,
3113 sizeof(struct ixgbe_fdir_rule));
3114 ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
3116 TAILQ_REMOVE(&filter_fdir_list,
3117 fdir_rule_ptr, entries);
3118 rte_free(fdir_rule_ptr);
3119 if (TAILQ_EMPTY(&filter_fdir_list))
3120 fdir_info->mask_added = false;
3123 case RTE_ETH_FILTER_L2_TUNNEL:
3124 l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
3126 rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
3127 sizeof(struct rte_eth_l2_tunnel_conf));
3128 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
3130 TAILQ_REMOVE(&filter_l2_tunnel_list,
3131 l2_tn_filter_ptr, entries);
3132 rte_free(l2_tn_filter_ptr);
3136 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
3143 rte_flow_error_set(error, EINVAL,
3144 RTE_FLOW_ERROR_TYPE_HANDLE,
3145 NULL, "Failed to destroy flow");
3149 TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
3150 if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
3151 TAILQ_REMOVE(&ixgbe_flow_list,
3152 ixgbe_flow_mem_ptr, entries);
3153 rte_free(ixgbe_flow_mem_ptr);
3161 /* Destroy all flow rules associated with a port on ixgbe. */
3163 ixgbe_flow_flush(struct rte_eth_dev *dev,
3164 struct rte_flow_error *error)
3168 ixgbe_clear_all_ntuple_filter(dev);
3169 ixgbe_clear_all_ethertype_filter(dev);
3170 ixgbe_clear_syn_filter(dev);
3172 ret = ixgbe_clear_all_fdir_filter(dev);
3174 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3175 NULL, "Failed to flush rule");
3179 ret = ixgbe_clear_all_l2_tn_filter(dev);
3181 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3182 NULL, "Failed to flush rule");
3186 ixgbe_filterlist_flush();
3191 const struct rte_flow_ops ixgbe_flow_ops = {
3192 .validate = ixgbe_flow_validate,
3193 .create = ixgbe_flow_create,
3194 .destroy = ixgbe_flow_destroy,
3195 .flush = ixgbe_flow_flush,