4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
47 #include <rte_interrupts.h>
49 #include <rte_debug.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_malloc.h>
60 #include <rte_random.h>
62 #include <rte_hash_crc.h>
64 #include <rte_flow_driver.h>
66 #include "ixgbe_logs.h"
67 #include "base/ixgbe_api.h"
68 #include "base/ixgbe_vf.h"
69 #include "base/ixgbe_common.h"
70 #include "ixgbe_ethdev.h"
71 #include "ixgbe_bypass.h"
72 #include "ixgbe_rxtx.h"
73 #include "base/ixgbe_type.h"
74 #include "base/ixgbe_phy.h"
75 #include "rte_pmd_ixgbe.h"
78 #define IXGBE_MIN_N_TUPLE_PRIO 1
79 #define IXGBE_MAX_N_TUPLE_PRIO 7
80 #define IXGBE_MAX_FLX_SOURCE_OFF 62
82 /* ntuple filter list structure */
83 struct ixgbe_ntuple_filter_ele {
84 TAILQ_ENTRY(ixgbe_ntuple_filter_ele) entries;
85 struct rte_eth_ntuple_filter filter_info;
87 /* ethertype filter list structure */
88 struct ixgbe_ethertype_filter_ele {
89 TAILQ_ENTRY(ixgbe_ethertype_filter_ele) entries;
90 struct rte_eth_ethertype_filter filter_info;
92 /* syn filter list structure */
93 struct ixgbe_eth_syn_filter_ele {
94 TAILQ_ENTRY(ixgbe_eth_syn_filter_ele) entries;
95 struct rte_eth_syn_filter filter_info;
97 /* fdir filter list structure */
98 struct ixgbe_fdir_rule_ele {
99 TAILQ_ENTRY(ixgbe_fdir_rule_ele) entries;
100 struct ixgbe_fdir_rule filter_info;
102 /* l2_tunnel filter list structure */
103 struct ixgbe_eth_l2_tunnel_conf_ele {
104 TAILQ_ENTRY(ixgbe_eth_l2_tunnel_conf_ele) entries;
105 struct rte_eth_l2_tunnel_conf filter_info;
107 /* ixgbe_flow memory list structure */
108 struct ixgbe_flow_mem {
109 TAILQ_ENTRY(ixgbe_flow_mem) entries;
110 struct rte_flow *flow;
113 TAILQ_HEAD(ixgbe_ntuple_filter_list, ixgbe_ntuple_filter_ele);
114 TAILQ_HEAD(ixgbe_ethertype_filter_list, ixgbe_ethertype_filter_ele);
115 TAILQ_HEAD(ixgbe_syn_filter_list, ixgbe_eth_syn_filter_ele);
116 TAILQ_HEAD(ixgbe_fdir_rule_filter_list, ixgbe_fdir_rule_ele);
117 TAILQ_HEAD(ixgbe_l2_tunnel_filter_list, ixgbe_eth_l2_tunnel_conf_ele);
118 TAILQ_HEAD(ixgbe_flow_mem_list, ixgbe_flow_mem);
120 static struct ixgbe_ntuple_filter_list filter_ntuple_list;
121 static struct ixgbe_ethertype_filter_list filter_ethertype_list;
122 static struct ixgbe_syn_filter_list filter_syn_list;
123 static struct ixgbe_fdir_rule_filter_list filter_fdir_list;
124 static struct ixgbe_l2_tunnel_filter_list filter_l2_tunnel_list;
125 static struct ixgbe_flow_mem_list ixgbe_flow_list;
128 * Endless loop will never happen with below assumption
129 * 1. there is at least one no-void item(END)
130 * 2. cur is before END.
133 const struct rte_flow_item *next_no_void_pattern(
134 const struct rte_flow_item pattern[],
135 const struct rte_flow_item *cur)
137 const struct rte_flow_item *next =
138 cur ? cur + 1 : &pattern[0];
140 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
147 const struct rte_flow_action *next_no_void_action(
148 const struct rte_flow_action actions[],
149 const struct rte_flow_action *cur)
151 const struct rte_flow_action *next =
152 cur ? cur + 1 : &actions[0];
154 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
161 * Please aware there's an asumption for all the parsers.
162 * rte_flow_item is using big endian, rte_flow_attr and
163 * rte_flow_action are using CPU order.
164 * Because the pattern is used to describe the packets,
165 * normally the packets should use network order.
169 * Parse the rule to see if it is a n-tuple rule.
170 * And get the n-tuple filter info BTW.
172 * The first not void item can be ETH or IPV4.
173 * The second not void item must be IPV4 if the first one is ETH.
174 * The third not void item must be UDP or TCP.
175 * The next not void item must be END.
177 * The first not void action should be QUEUE.
178 * The next not void action should be END.
182 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
183 * dst_addr 192.167.3.50 0xFFFFFFFF
184 * next_proto_id 17 0xFF
185 * UDP/TCP/ src_port 80 0xFFFF
186 * SCTP dst_port 80 0xFFFF
188 * other members in mask and spec should set to 0x00.
189 * item->last should be NULL.
191 * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY.
195 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
196 const struct rte_flow_item pattern[],
197 const struct rte_flow_action actions[],
198 struct rte_eth_ntuple_filter *filter,
199 struct rte_flow_error *error)
201 const struct rte_flow_item *item;
202 const struct rte_flow_action *act;
203 const struct rte_flow_item_ipv4 *ipv4_spec;
204 const struct rte_flow_item_ipv4 *ipv4_mask;
205 const struct rte_flow_item_tcp *tcp_spec;
206 const struct rte_flow_item_tcp *tcp_mask;
207 const struct rte_flow_item_udp *udp_spec;
208 const struct rte_flow_item_udp *udp_mask;
209 const struct rte_flow_item_sctp *sctp_spec;
210 const struct rte_flow_item_sctp *sctp_mask;
213 rte_flow_error_set(error,
214 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
215 NULL, "NULL pattern.");
220 rte_flow_error_set(error, EINVAL,
221 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
222 NULL, "NULL action.");
226 rte_flow_error_set(error, EINVAL,
227 RTE_FLOW_ERROR_TYPE_ATTR,
228 NULL, "NULL attribute.");
232 #ifdef RTE_LIBRTE_SECURITY
234 * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY
236 act = next_no_void_action(actions, NULL);
237 if (act->type == RTE_FLOW_ACTION_TYPE_SECURITY) {
238 const void *conf = act->conf;
239 /* check if the next not void item is END */
240 act = next_no_void_action(actions, act);
241 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
242 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
243 rte_flow_error_set(error, EINVAL,
244 RTE_FLOW_ERROR_TYPE_ACTION,
245 act, "Not supported action.");
249 /* get the IP pattern*/
250 item = next_no_void_pattern(pattern, NULL);
251 while (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
252 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
254 item->type == RTE_FLOW_ITEM_TYPE_END) {
255 rte_flow_error_set(error, EINVAL,
256 RTE_FLOW_ERROR_TYPE_ITEM,
257 item, "IP pattern missing.");
260 item = next_no_void_pattern(pattern, item);
263 filter->proto = IPPROTO_ESP;
264 return ixgbe_crypto_add_ingress_sa_from_flow(conf, item->spec,
265 item->type == RTE_FLOW_ITEM_TYPE_IPV6);
269 /* the first not void item can be MAC or IPv4 */
270 item = next_no_void_pattern(pattern, NULL);
272 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
273 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
274 rte_flow_error_set(error, EINVAL,
275 RTE_FLOW_ERROR_TYPE_ITEM,
276 item, "Not supported by ntuple filter");
280 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
281 /*Not supported last point for range*/
283 rte_flow_error_set(error,
285 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
286 item, "Not supported last point for range");
290 /* if the first item is MAC, the content should be NULL */
291 if (item->spec || item->mask) {
292 rte_flow_error_set(error, EINVAL,
293 RTE_FLOW_ERROR_TYPE_ITEM,
294 item, "Not supported by ntuple filter");
297 /* check if the next not void item is IPv4 */
298 item = next_no_void_pattern(pattern, item);
299 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
300 rte_flow_error_set(error,
301 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
302 item, "Not supported by ntuple filter");
307 /* get the IPv4 info */
308 if (!item->spec || !item->mask) {
309 rte_flow_error_set(error, EINVAL,
310 RTE_FLOW_ERROR_TYPE_ITEM,
311 item, "Invalid ntuple mask");
314 /*Not supported last point for range*/
316 rte_flow_error_set(error, EINVAL,
317 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
318 item, "Not supported last point for range");
323 ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
325 * Only support src & dst addresses, protocol,
326 * others should be masked.
328 if (ipv4_mask->hdr.version_ihl ||
329 ipv4_mask->hdr.type_of_service ||
330 ipv4_mask->hdr.total_length ||
331 ipv4_mask->hdr.packet_id ||
332 ipv4_mask->hdr.fragment_offset ||
333 ipv4_mask->hdr.time_to_live ||
334 ipv4_mask->hdr.hdr_checksum) {
335 rte_flow_error_set(error,
336 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
337 item, "Not supported by ntuple filter");
341 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
342 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
343 filter->proto_mask = ipv4_mask->hdr.next_proto_id;
345 ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
346 filter->dst_ip = ipv4_spec->hdr.dst_addr;
347 filter->src_ip = ipv4_spec->hdr.src_addr;
348 filter->proto = ipv4_spec->hdr.next_proto_id;
350 /* check if the next not void item is TCP or UDP */
351 item = next_no_void_pattern(pattern, item);
352 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
353 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
354 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
355 item->type != RTE_FLOW_ITEM_TYPE_END) {
356 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
357 rte_flow_error_set(error, EINVAL,
358 RTE_FLOW_ERROR_TYPE_ITEM,
359 item, "Not supported by ntuple filter");
363 /* get the TCP/UDP info */
364 if ((item->type != RTE_FLOW_ITEM_TYPE_END) &&
365 (!item->spec || !item->mask)) {
366 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
367 rte_flow_error_set(error, EINVAL,
368 RTE_FLOW_ERROR_TYPE_ITEM,
369 item, "Invalid ntuple mask");
373 /*Not supported last point for range*/
375 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
376 rte_flow_error_set(error, EINVAL,
377 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
378 item, "Not supported last point for range");
383 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
384 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
387 * Only support src & dst ports, tcp flags,
388 * others should be masked.
390 if (tcp_mask->hdr.sent_seq ||
391 tcp_mask->hdr.recv_ack ||
392 tcp_mask->hdr.data_off ||
393 tcp_mask->hdr.rx_win ||
394 tcp_mask->hdr.cksum ||
395 tcp_mask->hdr.tcp_urp) {
397 sizeof(struct rte_eth_ntuple_filter));
398 rte_flow_error_set(error, EINVAL,
399 RTE_FLOW_ERROR_TYPE_ITEM,
400 item, "Not supported by ntuple filter");
404 filter->dst_port_mask = tcp_mask->hdr.dst_port;
405 filter->src_port_mask = tcp_mask->hdr.src_port;
406 if (tcp_mask->hdr.tcp_flags == 0xFF) {
407 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
408 } else if (!tcp_mask->hdr.tcp_flags) {
409 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
411 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
412 rte_flow_error_set(error, EINVAL,
413 RTE_FLOW_ERROR_TYPE_ITEM,
414 item, "Not supported by ntuple filter");
418 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
419 filter->dst_port = tcp_spec->hdr.dst_port;
420 filter->src_port = tcp_spec->hdr.src_port;
421 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
422 } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
423 udp_mask = (const struct rte_flow_item_udp *)item->mask;
426 * Only support src & dst ports,
427 * others should be masked.
429 if (udp_mask->hdr.dgram_len ||
430 udp_mask->hdr.dgram_cksum) {
432 sizeof(struct rte_eth_ntuple_filter));
433 rte_flow_error_set(error, EINVAL,
434 RTE_FLOW_ERROR_TYPE_ITEM,
435 item, "Not supported by ntuple filter");
439 filter->dst_port_mask = udp_mask->hdr.dst_port;
440 filter->src_port_mask = udp_mask->hdr.src_port;
442 udp_spec = (const struct rte_flow_item_udp *)item->spec;
443 filter->dst_port = udp_spec->hdr.dst_port;
444 filter->src_port = udp_spec->hdr.src_port;
445 } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
446 sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
449 * Only support src & dst ports,
450 * others should be masked.
452 if (sctp_mask->hdr.tag ||
453 sctp_mask->hdr.cksum) {
455 sizeof(struct rte_eth_ntuple_filter));
456 rte_flow_error_set(error, EINVAL,
457 RTE_FLOW_ERROR_TYPE_ITEM,
458 item, "Not supported by ntuple filter");
462 filter->dst_port_mask = sctp_mask->hdr.dst_port;
463 filter->src_port_mask = sctp_mask->hdr.src_port;
465 sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
466 filter->dst_port = sctp_spec->hdr.dst_port;
467 filter->src_port = sctp_spec->hdr.src_port;
472 /* check if the next not void item is END */
473 item = next_no_void_pattern(pattern, item);
474 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
475 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
476 rte_flow_error_set(error, EINVAL,
477 RTE_FLOW_ERROR_TYPE_ITEM,
478 item, "Not supported by ntuple filter");
485 * n-tuple only supports forwarding,
486 * check if the first not void action is QUEUE.
488 act = next_no_void_action(actions, NULL);
489 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
490 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
491 rte_flow_error_set(error, EINVAL,
492 RTE_FLOW_ERROR_TYPE_ACTION,
493 item, "Not supported action.");
497 ((const struct rte_flow_action_queue *)act->conf)->index;
499 /* check if the next not void item is END */
500 act = next_no_void_action(actions, act);
501 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
502 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
503 rte_flow_error_set(error, EINVAL,
504 RTE_FLOW_ERROR_TYPE_ACTION,
505 act, "Not supported action.");
510 /* must be input direction */
511 if (!attr->ingress) {
512 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
513 rte_flow_error_set(error, EINVAL,
514 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
515 attr, "Only support ingress.");
521 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
522 rte_flow_error_set(error, EINVAL,
523 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
524 attr, "Not support egress.");
528 if (attr->priority > 0xFFFF) {
529 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
530 rte_flow_error_set(error, EINVAL,
531 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
532 attr, "Error priority.");
535 filter->priority = (uint16_t)attr->priority;
536 if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
537 attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
538 filter->priority = 1;
543 /* a specific function for ixgbe because the flags is specific */
545 ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
546 const struct rte_flow_attr *attr,
547 const struct rte_flow_item pattern[],
548 const struct rte_flow_action actions[],
549 struct rte_eth_ntuple_filter *filter,
550 struct rte_flow_error *error)
553 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
555 MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
557 ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
562 #ifdef RTE_LIBRTE_SECURITY
563 /* ESP flow not really a flow*/
564 if (filter->proto == IPPROTO_ESP)
568 /* Ixgbe doesn't support tcp flags. */
569 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
570 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
571 rte_flow_error_set(error, EINVAL,
572 RTE_FLOW_ERROR_TYPE_ITEM,
573 NULL, "Not supported by ntuple filter");
577 /* Ixgbe doesn't support many priorities. */
578 if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
579 filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
580 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
581 rte_flow_error_set(error, EINVAL,
582 RTE_FLOW_ERROR_TYPE_ITEM,
583 NULL, "Priority not supported by ntuple filter");
587 if (filter->queue >= dev->data->nb_rx_queues)
590 /* fixed value for ixgbe */
591 filter->flags = RTE_5TUPLE_FLAGS;
596 * Parse the rule to see if it is a ethertype rule.
597 * And get the ethertype filter info BTW.
599 * The first not void item can be ETH.
600 * The next not void item must be END.
602 * The first not void action should be QUEUE.
603 * The next not void action should be END.
606 * ETH type 0x0807 0xFFFF
608 * other members in mask and spec should set to 0x00.
609 * item->last should be NULL.
612 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
613 const struct rte_flow_item *pattern,
614 const struct rte_flow_action *actions,
615 struct rte_eth_ethertype_filter *filter,
616 struct rte_flow_error *error)
618 const struct rte_flow_item *item;
619 const struct rte_flow_action *act;
620 const struct rte_flow_item_eth *eth_spec;
621 const struct rte_flow_item_eth *eth_mask;
622 const struct rte_flow_action_queue *act_q;
625 rte_flow_error_set(error, EINVAL,
626 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
627 NULL, "NULL pattern.");
632 rte_flow_error_set(error, EINVAL,
633 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
634 NULL, "NULL action.");
639 rte_flow_error_set(error, EINVAL,
640 RTE_FLOW_ERROR_TYPE_ATTR,
641 NULL, "NULL attribute.");
645 item = next_no_void_pattern(pattern, NULL);
646 /* The first non-void item should be MAC. */
647 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
648 rte_flow_error_set(error, EINVAL,
649 RTE_FLOW_ERROR_TYPE_ITEM,
650 item, "Not supported by ethertype filter");
654 /*Not supported last point for range*/
656 rte_flow_error_set(error, EINVAL,
657 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
658 item, "Not supported last point for range");
662 /* Get the MAC info. */
663 if (!item->spec || !item->mask) {
664 rte_flow_error_set(error, EINVAL,
665 RTE_FLOW_ERROR_TYPE_ITEM,
666 item, "Not supported by ethertype filter");
670 eth_spec = (const struct rte_flow_item_eth *)item->spec;
671 eth_mask = (const struct rte_flow_item_eth *)item->mask;
673 /* Mask bits of source MAC address must be full of 0.
674 * Mask bits of destination MAC address must be full
677 if (!is_zero_ether_addr(ð_mask->src) ||
678 (!is_zero_ether_addr(ð_mask->dst) &&
679 !is_broadcast_ether_addr(ð_mask->dst))) {
680 rte_flow_error_set(error, EINVAL,
681 RTE_FLOW_ERROR_TYPE_ITEM,
682 item, "Invalid ether address mask");
686 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
687 rte_flow_error_set(error, EINVAL,
688 RTE_FLOW_ERROR_TYPE_ITEM,
689 item, "Invalid ethertype mask");
693 /* If mask bits of destination MAC address
694 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
696 if (is_broadcast_ether_addr(ð_mask->dst)) {
697 filter->mac_addr = eth_spec->dst;
698 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
700 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
702 filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
704 /* Check if the next non-void item is END. */
705 item = next_no_void_pattern(pattern, item);
706 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
707 rte_flow_error_set(error, EINVAL,
708 RTE_FLOW_ERROR_TYPE_ITEM,
709 item, "Not supported by ethertype filter.");
715 act = next_no_void_action(actions, NULL);
716 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
717 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
718 rte_flow_error_set(error, EINVAL,
719 RTE_FLOW_ERROR_TYPE_ACTION,
720 act, "Not supported action.");
724 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
725 act_q = (const struct rte_flow_action_queue *)act->conf;
726 filter->queue = act_q->index;
728 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
731 /* Check if the next non-void item is END */
732 act = next_no_void_action(actions, act);
733 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
734 rte_flow_error_set(error, EINVAL,
735 RTE_FLOW_ERROR_TYPE_ACTION,
736 act, "Not supported action.");
741 /* Must be input direction */
742 if (!attr->ingress) {
743 rte_flow_error_set(error, EINVAL,
744 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
745 attr, "Only support ingress.");
751 rte_flow_error_set(error, EINVAL,
752 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
753 attr, "Not support egress.");
758 if (attr->priority) {
759 rte_flow_error_set(error, EINVAL,
760 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
761 attr, "Not support priority.");
767 rte_flow_error_set(error, EINVAL,
768 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
769 attr, "Not support group.");
777 ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
778 const struct rte_flow_attr *attr,
779 const struct rte_flow_item pattern[],
780 const struct rte_flow_action actions[],
781 struct rte_eth_ethertype_filter *filter,
782 struct rte_flow_error *error)
785 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
787 MAC_TYPE_FILTER_SUP(hw->mac.type);
789 ret = cons_parse_ethertype_filter(attr, pattern,
790 actions, filter, error);
795 /* Ixgbe doesn't support MAC address. */
796 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
797 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
798 rte_flow_error_set(error, EINVAL,
799 RTE_FLOW_ERROR_TYPE_ITEM,
800 NULL, "Not supported by ethertype filter");
804 if (filter->queue >= dev->data->nb_rx_queues) {
805 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
806 rte_flow_error_set(error, EINVAL,
807 RTE_FLOW_ERROR_TYPE_ITEM,
808 NULL, "queue index much too big");
812 if (filter->ether_type == ETHER_TYPE_IPv4 ||
813 filter->ether_type == ETHER_TYPE_IPv6) {
814 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
815 rte_flow_error_set(error, EINVAL,
816 RTE_FLOW_ERROR_TYPE_ITEM,
817 NULL, "IPv4/IPv6 not supported by ethertype filter");
821 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
822 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
823 rte_flow_error_set(error, EINVAL,
824 RTE_FLOW_ERROR_TYPE_ITEM,
825 NULL, "mac compare is unsupported");
829 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
830 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
831 rte_flow_error_set(error, EINVAL,
832 RTE_FLOW_ERROR_TYPE_ITEM,
833 NULL, "drop option is unsupported");
841 * Parse the rule to see if it is a TCP SYN rule.
842 * And get the TCP SYN filter info BTW.
844 * The first not void item must be ETH.
845 * The second not void item must be IPV4 or IPV6.
846 * The third not void item must be TCP.
847 * The next not void item must be END.
849 * The first not void action should be QUEUE.
850 * The next not void action should be END.
854 * IPV4/IPV6 NULL NULL
855 * TCP tcp_flags 0x02 0xFF
857 * other members in mask and spec should set to 0x00.
858 * item->last should be NULL.
861 cons_parse_syn_filter(const struct rte_flow_attr *attr,
862 const struct rte_flow_item pattern[],
863 const struct rte_flow_action actions[],
864 struct rte_eth_syn_filter *filter,
865 struct rte_flow_error *error)
867 const struct rte_flow_item *item;
868 const struct rte_flow_action *act;
869 const struct rte_flow_item_tcp *tcp_spec;
870 const struct rte_flow_item_tcp *tcp_mask;
871 const struct rte_flow_action_queue *act_q;
874 rte_flow_error_set(error, EINVAL,
875 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
876 NULL, "NULL pattern.");
881 rte_flow_error_set(error, EINVAL,
882 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
883 NULL, "NULL action.");
888 rte_flow_error_set(error, EINVAL,
889 RTE_FLOW_ERROR_TYPE_ATTR,
890 NULL, "NULL attribute.");
895 /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
896 item = next_no_void_pattern(pattern, NULL);
897 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
898 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
899 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
900 item->type != RTE_FLOW_ITEM_TYPE_TCP) {
901 rte_flow_error_set(error, EINVAL,
902 RTE_FLOW_ERROR_TYPE_ITEM,
903 item, "Not supported by syn filter");
906 /*Not supported last point for range*/
908 rte_flow_error_set(error, EINVAL,
909 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
910 item, "Not supported last point for range");
915 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
916 /* if the item is MAC, the content should be NULL */
917 if (item->spec || item->mask) {
918 rte_flow_error_set(error, EINVAL,
919 RTE_FLOW_ERROR_TYPE_ITEM,
920 item, "Invalid SYN address mask");
924 /* check if the next not void item is IPv4 or IPv6 */
925 item = next_no_void_pattern(pattern, item);
926 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
927 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
928 rte_flow_error_set(error, EINVAL,
929 RTE_FLOW_ERROR_TYPE_ITEM,
930 item, "Not supported by syn filter");
936 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
937 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
938 /* if the item is IP, the content should be NULL */
939 if (item->spec || item->mask) {
940 rte_flow_error_set(error, EINVAL,
941 RTE_FLOW_ERROR_TYPE_ITEM,
942 item, "Invalid SYN mask");
946 /* check if the next not void item is TCP */
947 item = next_no_void_pattern(pattern, item);
948 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
949 rte_flow_error_set(error, EINVAL,
950 RTE_FLOW_ERROR_TYPE_ITEM,
951 item, "Not supported by syn filter");
956 /* Get the TCP info. Only support SYN. */
957 if (!item->spec || !item->mask) {
958 rte_flow_error_set(error, EINVAL,
959 RTE_FLOW_ERROR_TYPE_ITEM,
960 item, "Invalid SYN mask");
963 /*Not supported last point for range*/
965 rte_flow_error_set(error, EINVAL,
966 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
967 item, "Not supported last point for range");
971 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
972 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
973 if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
974 tcp_mask->hdr.src_port ||
975 tcp_mask->hdr.dst_port ||
976 tcp_mask->hdr.sent_seq ||
977 tcp_mask->hdr.recv_ack ||
978 tcp_mask->hdr.data_off ||
979 tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
980 tcp_mask->hdr.rx_win ||
981 tcp_mask->hdr.cksum ||
982 tcp_mask->hdr.tcp_urp) {
983 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
984 rte_flow_error_set(error, EINVAL,
985 RTE_FLOW_ERROR_TYPE_ITEM,
986 item, "Not supported by syn filter");
990 /* check if the next not void item is END */
991 item = next_no_void_pattern(pattern, item);
992 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
993 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
994 rte_flow_error_set(error, EINVAL,
995 RTE_FLOW_ERROR_TYPE_ITEM,
996 item, "Not supported by syn filter");
1000 /* check if the first not void action is QUEUE. */
1001 act = next_no_void_action(actions, NULL);
1002 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1003 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1004 rte_flow_error_set(error, EINVAL,
1005 RTE_FLOW_ERROR_TYPE_ACTION,
1006 act, "Not supported action.");
1010 act_q = (const struct rte_flow_action_queue *)act->conf;
1011 filter->queue = act_q->index;
1012 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
1013 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1014 rte_flow_error_set(error, EINVAL,
1015 RTE_FLOW_ERROR_TYPE_ACTION,
1016 act, "Not supported action.");
1020 /* check if the next not void item is END */
1021 act = next_no_void_action(actions, act);
1022 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1023 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1024 rte_flow_error_set(error, EINVAL,
1025 RTE_FLOW_ERROR_TYPE_ACTION,
1026 act, "Not supported action.");
1031 /* must be input direction */
1032 if (!attr->ingress) {
1033 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1034 rte_flow_error_set(error, EINVAL,
1035 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1036 attr, "Only support ingress.");
1042 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1043 rte_flow_error_set(error, EINVAL,
1044 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1045 attr, "Not support egress.");
1049 /* Support 2 priorities, the lowest or highest. */
1050 if (!attr->priority) {
1051 filter->hig_pri = 0;
1052 } else if (attr->priority == (uint32_t)~0U) {
1053 filter->hig_pri = 1;
1055 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1056 rte_flow_error_set(error, EINVAL,
1057 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1058 attr, "Not support priority.");
1066 ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
1067 const struct rte_flow_attr *attr,
1068 const struct rte_flow_item pattern[],
1069 const struct rte_flow_action actions[],
1070 struct rte_eth_syn_filter *filter,
1071 struct rte_flow_error *error)
1074 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1076 MAC_TYPE_FILTER_SUP(hw->mac.type);
1078 ret = cons_parse_syn_filter(attr, pattern,
1079 actions, filter, error);
1081 if (filter->queue >= dev->data->nb_rx_queues)
1091 * Parse the rule to see if it is a L2 tunnel rule.
1092 * And get the L2 tunnel filter info BTW.
1093 * Only support E-tag now.
1095 * The first not void item can be E_TAG.
1096 * The next not void item must be END.
1098 * The first not void action should be QUEUE.
1099 * The next not void action should be END.
1103 e_cid_base 0x309 0xFFF
1105 * other members in mask and spec should set to 0x00.
1106 * item->last should be NULL.
1109 cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
1110 const struct rte_flow_item pattern[],
1111 const struct rte_flow_action actions[],
1112 struct rte_eth_l2_tunnel_conf *filter,
1113 struct rte_flow_error *error)
1115 const struct rte_flow_item *item;
1116 const struct rte_flow_item_e_tag *e_tag_spec;
1117 const struct rte_flow_item_e_tag *e_tag_mask;
1118 const struct rte_flow_action *act;
1119 const struct rte_flow_action_queue *act_q;
1122 rte_flow_error_set(error, EINVAL,
1123 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1124 NULL, "NULL pattern.");
1129 rte_flow_error_set(error, EINVAL,
1130 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1131 NULL, "NULL action.");
1136 rte_flow_error_set(error, EINVAL,
1137 RTE_FLOW_ERROR_TYPE_ATTR,
1138 NULL, "NULL attribute.");
1142 /* The first not void item should be e-tag. */
1143 item = next_no_void_pattern(pattern, NULL);
1144 if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1145 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1146 rte_flow_error_set(error, EINVAL,
1147 RTE_FLOW_ERROR_TYPE_ITEM,
1148 item, "Not supported by L2 tunnel filter");
1152 if (!item->spec || !item->mask) {
1153 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1154 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1155 item, "Not supported by L2 tunnel filter");
1159 /*Not supported last point for range*/
1161 rte_flow_error_set(error, EINVAL,
1162 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1163 item, "Not supported last point for range");
1167 e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
1168 e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
1170 /* Only care about GRP and E cid base. */
1171 if (e_tag_mask->epcp_edei_in_ecid_b ||
1172 e_tag_mask->in_ecid_e ||
1173 e_tag_mask->ecid_e ||
1174 e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1175 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1176 rte_flow_error_set(error, EINVAL,
1177 RTE_FLOW_ERROR_TYPE_ITEM,
1178 item, "Not supported by L2 tunnel filter");
1182 filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1184 * grp and e_cid_base are bit fields and only use 14 bits.
1185 * e-tag id is taken as little endian by HW.
1187 filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1189 /* check if the next not void item is END */
1190 item = next_no_void_pattern(pattern, item);
1191 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1192 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1193 rte_flow_error_set(error, EINVAL,
1194 RTE_FLOW_ERROR_TYPE_ITEM,
1195 item, "Not supported by L2 tunnel filter");
1200 /* must be input direction */
1201 if (!attr->ingress) {
1202 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1203 rte_flow_error_set(error, EINVAL,
1204 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1205 attr, "Only support ingress.");
1211 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1212 rte_flow_error_set(error, EINVAL,
1213 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1214 attr, "Not support egress.");
1219 if (attr->priority) {
1220 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1221 rte_flow_error_set(error, EINVAL,
1222 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1223 attr, "Not support priority.");
1227 /* check if the first not void action is QUEUE. */
1228 act = next_no_void_action(actions, NULL);
1229 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1230 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1231 rte_flow_error_set(error, EINVAL,
1232 RTE_FLOW_ERROR_TYPE_ACTION,
1233 act, "Not supported action.");
1237 act_q = (const struct rte_flow_action_queue *)act->conf;
1238 filter->pool = act_q->index;
1240 /* check if the next not void item is END */
1241 act = next_no_void_action(actions, act);
1242 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1243 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1244 rte_flow_error_set(error, EINVAL,
1245 RTE_FLOW_ERROR_TYPE_ACTION,
1246 act, "Not supported action.");
1254 ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1255 const struct rte_flow_attr *attr,
1256 const struct rte_flow_item pattern[],
1257 const struct rte_flow_action actions[],
1258 struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1259 struct rte_flow_error *error)
1262 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1264 ret = cons_parse_l2_tn_filter(attr, pattern,
1265 actions, l2_tn_filter, error);
1267 if (hw->mac.type != ixgbe_mac_X550 &&
1268 hw->mac.type != ixgbe_mac_X550EM_x &&
1269 hw->mac.type != ixgbe_mac_X550EM_a) {
1270 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1271 rte_flow_error_set(error, EINVAL,
1272 RTE_FLOW_ERROR_TYPE_ITEM,
1273 NULL, "Not supported by L2 tunnel filter");
1277 if (l2_tn_filter->pool >= dev->data->nb_rx_queues)
1283 /* Parse to get the attr and action info of flow director rule. */
1285 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1286 const struct rte_flow_action actions[],
1287 struct ixgbe_fdir_rule *rule,
1288 struct rte_flow_error *error)
1290 const struct rte_flow_action *act;
1291 const struct rte_flow_action_queue *act_q;
1292 const struct rte_flow_action_mark *mark;
1295 /* must be input direction */
1296 if (!attr->ingress) {
1297 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1298 rte_flow_error_set(error, EINVAL,
1299 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1300 attr, "Only support ingress.");
1306 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1307 rte_flow_error_set(error, EINVAL,
1308 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1309 attr, "Not support egress.");
1314 if (attr->priority) {
1315 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1316 rte_flow_error_set(error, EINVAL,
1317 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1318 attr, "Not support priority.");
1322 /* check if the first not void action is QUEUE or DROP. */
1323 act = next_no_void_action(actions, NULL);
1324 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1325 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1326 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1327 rte_flow_error_set(error, EINVAL,
1328 RTE_FLOW_ERROR_TYPE_ACTION,
1329 act, "Not supported action.");
1333 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1334 act_q = (const struct rte_flow_action_queue *)act->conf;
1335 rule->queue = act_q->index;
1337 /* signature mode does not support drop action. */
1338 if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1339 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1340 rte_flow_error_set(error, EINVAL,
1341 RTE_FLOW_ERROR_TYPE_ACTION,
1342 act, "Not supported action.");
1345 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1348 /* check if the next not void item is MARK */
1349 act = next_no_void_action(actions, act);
1350 if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1351 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1352 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1353 rte_flow_error_set(error, EINVAL,
1354 RTE_FLOW_ERROR_TYPE_ACTION,
1355 act, "Not supported action.");
1361 if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1362 mark = (const struct rte_flow_action_mark *)act->conf;
1363 rule->soft_id = mark->id;
1364 act = next_no_void_action(actions, act);
1367 /* check if the next not void item is END */
1368 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1369 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1370 rte_flow_error_set(error, EINVAL,
1371 RTE_FLOW_ERROR_TYPE_ACTION,
1372 act, "Not supported action.");
1379 /* search next no void pattern and skip fuzzy */
1381 const struct rte_flow_item *next_no_fuzzy_pattern(
1382 const struct rte_flow_item pattern[],
1383 const struct rte_flow_item *cur)
1385 const struct rte_flow_item *next =
1386 next_no_void_pattern(pattern, cur);
1388 if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
1390 next = next_no_void_pattern(pattern, next);
1394 static inline uint8_t signature_match(const struct rte_flow_item pattern[])
1396 const struct rte_flow_item_fuzzy *spec, *last, *mask;
1397 const struct rte_flow_item *item;
1398 uint32_t sh, lh, mh;
1403 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1406 if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
1408 (const struct rte_flow_item_fuzzy *)item->spec;
1410 (const struct rte_flow_item_fuzzy *)item->last;
1412 (const struct rte_flow_item_fuzzy *)item->mask;
1441 * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1442 * And get the flow director filter info BTW.
1443 * UDP/TCP/SCTP PATTERN:
1444 * The first not void item can be ETH or IPV4 or IPV6
1445 * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1446 * The next not void item could be UDP or TCP or SCTP (optional)
1447 * The next not void item could be RAW (for flexbyte, optional)
1448 * The next not void item must be END.
1449 * A Fuzzy Match pattern can appear at any place before END.
1450 * Fuzzy Match is optional for IPV4 but is required for IPV6
1452 * The first not void item must be ETH.
1453 * The second not void item must be MAC VLAN.
1454 * The next not void item must be END.
1456 * The first not void action should be QUEUE or DROP.
1457 * The second not void optional action should be MARK,
1458 * mark_id is a uint32_t number.
1459 * The next not void action should be END.
1460 * UDP/TCP/SCTP pattern example:
1463 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
1464 * dst_addr 192.167.3.50 0xFFFFFFFF
1465 * UDP/TCP/SCTP src_port 80 0xFFFF
1466 * dst_port 80 0xFFFF
1467 * FLEX relative 0 0x1
1470 * offset 12 0xFFFFFFFF
1473 * pattern[0] 0x86 0xFF
1474 * pattern[1] 0xDD 0xFF
1476 * MAC VLAN pattern example:
1479 {0xAC, 0x7B, 0xA1, {0xFF, 0xFF, 0xFF,
1480 0x2C, 0x6D, 0x36} 0xFF, 0xFF, 0xFF}
1481 * MAC VLAN tci 0x2016 0xEFFF
1483 * Other members in mask and spec should set to 0x00.
1484 * Item->last should be NULL.
1487 ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
1488 const struct rte_flow_attr *attr,
1489 const struct rte_flow_item pattern[],
1490 const struct rte_flow_action actions[],
1491 struct ixgbe_fdir_rule *rule,
1492 struct rte_flow_error *error)
1494 const struct rte_flow_item *item;
1495 const struct rte_flow_item_eth *eth_spec;
1496 const struct rte_flow_item_eth *eth_mask;
1497 const struct rte_flow_item_ipv4 *ipv4_spec;
1498 const struct rte_flow_item_ipv4 *ipv4_mask;
1499 const struct rte_flow_item_ipv6 *ipv6_spec;
1500 const struct rte_flow_item_ipv6 *ipv6_mask;
1501 const struct rte_flow_item_tcp *tcp_spec;
1502 const struct rte_flow_item_tcp *tcp_mask;
1503 const struct rte_flow_item_udp *udp_spec;
1504 const struct rte_flow_item_udp *udp_mask;
1505 const struct rte_flow_item_sctp *sctp_spec;
1506 const struct rte_flow_item_sctp *sctp_mask;
1507 const struct rte_flow_item_vlan *vlan_spec;
1508 const struct rte_flow_item_vlan *vlan_mask;
1509 const struct rte_flow_item_raw *raw_mask;
1510 const struct rte_flow_item_raw *raw_spec;
1513 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1516 rte_flow_error_set(error, EINVAL,
1517 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1518 NULL, "NULL pattern.");
1523 rte_flow_error_set(error, EINVAL,
1524 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1525 NULL, "NULL action.");
1530 rte_flow_error_set(error, EINVAL,
1531 RTE_FLOW_ERROR_TYPE_ATTR,
1532 NULL, "NULL attribute.");
1537 * Some fields may not be provided. Set spec to 0 and mask to default
1538 * value. So, we need not do anything for the not provided fields later.
1540 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1541 memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1542 rule->mask.vlan_tci_mask = 0;
1543 rule->mask.flex_bytes_mask = 0;
1546 * The first not void item should be
1547 * MAC or IPv4 or TCP or UDP or SCTP.
1549 item = next_no_fuzzy_pattern(pattern, NULL);
1550 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1551 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1552 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1553 item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1554 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1555 item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1556 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1557 rte_flow_error_set(error, EINVAL,
1558 RTE_FLOW_ERROR_TYPE_ITEM,
1559 item, "Not supported by fdir filter");
1563 if (signature_match(pattern))
1564 rule->mode = RTE_FDIR_MODE_SIGNATURE;
1566 rule->mode = RTE_FDIR_MODE_PERFECT;
1568 /*Not supported last point for range*/
1570 rte_flow_error_set(error, EINVAL,
1571 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1572 item, "Not supported last point for range");
1576 /* Get the MAC info. */
1577 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1579 * Only support vlan and dst MAC address,
1580 * others should be masked.
1582 if (item->spec && !item->mask) {
1583 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1584 rte_flow_error_set(error, EINVAL,
1585 RTE_FLOW_ERROR_TYPE_ITEM,
1586 item, "Not supported by fdir filter");
1591 rule->b_spec = TRUE;
1592 eth_spec = (const struct rte_flow_item_eth *)item->spec;
1594 /* Get the dst MAC. */
1595 for (j = 0; j < ETHER_ADDR_LEN; j++) {
1596 rule->ixgbe_fdir.formatted.inner_mac[j] =
1597 eth_spec->dst.addr_bytes[j];
1604 rule->b_mask = TRUE;
1605 eth_mask = (const struct rte_flow_item_eth *)item->mask;
1607 /* Ether type should be masked. */
1608 if (eth_mask->type ||
1609 rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1610 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1611 rte_flow_error_set(error, EINVAL,
1612 RTE_FLOW_ERROR_TYPE_ITEM,
1613 item, "Not supported by fdir filter");
1617 /* If ethernet has meaning, it means MAC VLAN mode. */
1618 rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1621 * src MAC address must be masked,
1622 * and don't support dst MAC address mask.
1624 for (j = 0; j < ETHER_ADDR_LEN; j++) {
1625 if (eth_mask->src.addr_bytes[j] ||
1626 eth_mask->dst.addr_bytes[j] != 0xFF) {
1628 sizeof(struct ixgbe_fdir_rule));
1629 rte_flow_error_set(error, EINVAL,
1630 RTE_FLOW_ERROR_TYPE_ITEM,
1631 item, "Not supported by fdir filter");
1636 /* When no VLAN, considered as full mask. */
1637 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1639 /*** If both spec and mask are item,
1640 * it means don't care about ETH.
1645 * Check if the next not void item is vlan or ipv4.
1646 * IPv6 is not supported.
1648 item = next_no_fuzzy_pattern(pattern, item);
1649 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1650 if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1651 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1652 rte_flow_error_set(error, EINVAL,
1653 RTE_FLOW_ERROR_TYPE_ITEM,
1654 item, "Not supported by fdir filter");
1658 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
1659 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1660 rte_flow_error_set(error, EINVAL,
1661 RTE_FLOW_ERROR_TYPE_ITEM,
1662 item, "Not supported by fdir filter");
1668 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1669 if (!(item->spec && item->mask)) {
1670 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1671 rte_flow_error_set(error, EINVAL,
1672 RTE_FLOW_ERROR_TYPE_ITEM,
1673 item, "Not supported by fdir filter");
1677 /*Not supported last point for range*/
1679 rte_flow_error_set(error, EINVAL,
1680 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1681 item, "Not supported last point for range");
1685 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
1686 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
1688 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1690 rule->mask.vlan_tci_mask = vlan_mask->tci;
1691 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
1692 /* More than one tags are not supported. */
1694 /* Next not void item must be END */
1695 item = next_no_fuzzy_pattern(pattern, item);
1696 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1697 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1698 rte_flow_error_set(error, EINVAL,
1699 RTE_FLOW_ERROR_TYPE_ITEM,
1700 item, "Not supported by fdir filter");
1705 /* Get the IPV4 info. */
1706 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1708 * Set the flow type even if there's no content
1709 * as we must have a flow type.
1711 rule->ixgbe_fdir.formatted.flow_type =
1712 IXGBE_ATR_FLOW_TYPE_IPV4;
1713 /*Not supported last point for range*/
1715 rte_flow_error_set(error, EINVAL,
1716 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1717 item, "Not supported last point for range");
1721 * Only care about src & dst addresses,
1722 * others should be masked.
1725 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1726 rte_flow_error_set(error, EINVAL,
1727 RTE_FLOW_ERROR_TYPE_ITEM,
1728 item, "Not supported by fdir filter");
1731 rule->b_mask = TRUE;
1733 (const struct rte_flow_item_ipv4 *)item->mask;
1734 if (ipv4_mask->hdr.version_ihl ||
1735 ipv4_mask->hdr.type_of_service ||
1736 ipv4_mask->hdr.total_length ||
1737 ipv4_mask->hdr.packet_id ||
1738 ipv4_mask->hdr.fragment_offset ||
1739 ipv4_mask->hdr.time_to_live ||
1740 ipv4_mask->hdr.next_proto_id ||
1741 ipv4_mask->hdr.hdr_checksum) {
1742 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1743 rte_flow_error_set(error, EINVAL,
1744 RTE_FLOW_ERROR_TYPE_ITEM,
1745 item, "Not supported by fdir filter");
1748 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1749 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1752 rule->b_spec = TRUE;
1754 (const struct rte_flow_item_ipv4 *)item->spec;
1755 rule->ixgbe_fdir.formatted.dst_ip[0] =
1756 ipv4_spec->hdr.dst_addr;
1757 rule->ixgbe_fdir.formatted.src_ip[0] =
1758 ipv4_spec->hdr.src_addr;
1762 * Check if the next not void item is
1763 * TCP or UDP or SCTP or END.
1765 item = next_no_fuzzy_pattern(pattern, item);
1766 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1767 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1768 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1769 item->type != RTE_FLOW_ITEM_TYPE_END &&
1770 item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1771 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1772 rte_flow_error_set(error, EINVAL,
1773 RTE_FLOW_ERROR_TYPE_ITEM,
1774 item, "Not supported by fdir filter");
1779 /* Get the IPV6 info. */
1780 if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1782 * Set the flow type even if there's no content
1783 * as we must have a flow type.
1785 rule->ixgbe_fdir.formatted.flow_type =
1786 IXGBE_ATR_FLOW_TYPE_IPV6;
1789 * 1. must signature match
1790 * 2. not support last
1791 * 3. mask must not null
1793 if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
1796 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1797 rte_flow_error_set(error, EINVAL,
1798 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1799 item, "Not supported last point for range");
1803 rule->b_mask = TRUE;
1805 (const struct rte_flow_item_ipv6 *)item->mask;
1806 if (ipv6_mask->hdr.vtc_flow ||
1807 ipv6_mask->hdr.payload_len ||
1808 ipv6_mask->hdr.proto ||
1809 ipv6_mask->hdr.hop_limits) {
1810 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1811 rte_flow_error_set(error, EINVAL,
1812 RTE_FLOW_ERROR_TYPE_ITEM,
1813 item, "Not supported by fdir filter");
1817 /* check src addr mask */
1818 for (j = 0; j < 16; j++) {
1819 if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
1820 rule->mask.src_ipv6_mask |= 1 << j;
1821 } else if (ipv6_mask->hdr.src_addr[j] != 0) {
1822 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1823 rte_flow_error_set(error, EINVAL,
1824 RTE_FLOW_ERROR_TYPE_ITEM,
1825 item, "Not supported by fdir filter");
1830 /* check dst addr mask */
1831 for (j = 0; j < 16; j++) {
1832 if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
1833 rule->mask.dst_ipv6_mask |= 1 << j;
1834 } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
1835 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1836 rte_flow_error_set(error, EINVAL,
1837 RTE_FLOW_ERROR_TYPE_ITEM,
1838 item, "Not supported by fdir filter");
1844 rule->b_spec = TRUE;
1846 (const struct rte_flow_item_ipv6 *)item->spec;
1847 rte_memcpy(rule->ixgbe_fdir.formatted.src_ip,
1848 ipv6_spec->hdr.src_addr, 16);
1849 rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip,
1850 ipv6_spec->hdr.dst_addr, 16);
1854 * Check if the next not void item is
1855 * TCP or UDP or SCTP or END.
1857 item = next_no_fuzzy_pattern(pattern, item);
1858 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1859 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1860 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1861 item->type != RTE_FLOW_ITEM_TYPE_END &&
1862 item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1863 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1864 rte_flow_error_set(error, EINVAL,
1865 RTE_FLOW_ERROR_TYPE_ITEM,
1866 item, "Not supported by fdir filter");
1871 /* Get the TCP info. */
1872 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1874 * Set the flow type even if there's no content
1875 * as we must have a flow type.
1877 rule->ixgbe_fdir.formatted.flow_type |=
1878 IXGBE_ATR_L4TYPE_TCP;
1879 /*Not supported last point for range*/
1881 rte_flow_error_set(error, EINVAL,
1882 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1883 item, "Not supported last point for range");
1887 * Only care about src & dst ports,
1888 * others should be masked.
1891 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1892 rte_flow_error_set(error, EINVAL,
1893 RTE_FLOW_ERROR_TYPE_ITEM,
1894 item, "Not supported by fdir filter");
1897 rule->b_mask = TRUE;
1898 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1899 if (tcp_mask->hdr.sent_seq ||
1900 tcp_mask->hdr.recv_ack ||
1901 tcp_mask->hdr.data_off ||
1902 tcp_mask->hdr.tcp_flags ||
1903 tcp_mask->hdr.rx_win ||
1904 tcp_mask->hdr.cksum ||
1905 tcp_mask->hdr.tcp_urp) {
1906 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1907 rte_flow_error_set(error, EINVAL,
1908 RTE_FLOW_ERROR_TYPE_ITEM,
1909 item, "Not supported by fdir filter");
1912 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1913 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1916 rule->b_spec = TRUE;
1917 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1918 rule->ixgbe_fdir.formatted.src_port =
1919 tcp_spec->hdr.src_port;
1920 rule->ixgbe_fdir.formatted.dst_port =
1921 tcp_spec->hdr.dst_port;
1924 item = next_no_fuzzy_pattern(pattern, item);
1925 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1926 item->type != RTE_FLOW_ITEM_TYPE_END) {
1927 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1928 rte_flow_error_set(error, EINVAL,
1929 RTE_FLOW_ERROR_TYPE_ITEM,
1930 item, "Not supported by fdir filter");
1936 /* Get the UDP info */
1937 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1939 * Set the flow type even if there's no content
1940 * as we must have a flow type.
1942 rule->ixgbe_fdir.formatted.flow_type |=
1943 IXGBE_ATR_L4TYPE_UDP;
1944 /*Not supported last point for range*/
1946 rte_flow_error_set(error, EINVAL,
1947 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1948 item, "Not supported last point for range");
1952 * Only care about src & dst ports,
1953 * others should be masked.
1956 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1957 rte_flow_error_set(error, EINVAL,
1958 RTE_FLOW_ERROR_TYPE_ITEM,
1959 item, "Not supported by fdir filter");
1962 rule->b_mask = TRUE;
1963 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1964 if (udp_mask->hdr.dgram_len ||
1965 udp_mask->hdr.dgram_cksum) {
1966 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1967 rte_flow_error_set(error, EINVAL,
1968 RTE_FLOW_ERROR_TYPE_ITEM,
1969 item, "Not supported by fdir filter");
1972 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1973 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1976 rule->b_spec = TRUE;
1977 udp_spec = (const struct rte_flow_item_udp *)item->spec;
1978 rule->ixgbe_fdir.formatted.src_port =
1979 udp_spec->hdr.src_port;
1980 rule->ixgbe_fdir.formatted.dst_port =
1981 udp_spec->hdr.dst_port;
1984 item = next_no_fuzzy_pattern(pattern, item);
1985 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1986 item->type != RTE_FLOW_ITEM_TYPE_END) {
1987 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1988 rte_flow_error_set(error, EINVAL,
1989 RTE_FLOW_ERROR_TYPE_ITEM,
1990 item, "Not supported by fdir filter");
1996 /* Get the SCTP info */
1997 if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
1999 * Set the flow type even if there's no content
2000 * as we must have a flow type.
2002 rule->ixgbe_fdir.formatted.flow_type |=
2003 IXGBE_ATR_L4TYPE_SCTP;
2004 /*Not supported last point for range*/
2006 rte_flow_error_set(error, EINVAL,
2007 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2008 item, "Not supported last point for range");
2012 /* only x550 family only support sctp port */
2013 if (hw->mac.type == ixgbe_mac_X550 ||
2014 hw->mac.type == ixgbe_mac_X550EM_x ||
2015 hw->mac.type == ixgbe_mac_X550EM_a) {
2017 * Only care about src & dst ports,
2018 * others should be masked.
2021 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2022 rte_flow_error_set(error, EINVAL,
2023 RTE_FLOW_ERROR_TYPE_ITEM,
2024 item, "Not supported by fdir filter");
2027 rule->b_mask = TRUE;
2029 (const struct rte_flow_item_sctp *)item->mask;
2030 if (sctp_mask->hdr.tag ||
2031 sctp_mask->hdr.cksum) {
2032 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2033 rte_flow_error_set(error, EINVAL,
2034 RTE_FLOW_ERROR_TYPE_ITEM,
2035 item, "Not supported by fdir filter");
2038 rule->mask.src_port_mask = sctp_mask->hdr.src_port;
2039 rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
2042 rule->b_spec = TRUE;
2044 (const struct rte_flow_item_sctp *)item->spec;
2045 rule->ixgbe_fdir.formatted.src_port =
2046 sctp_spec->hdr.src_port;
2047 rule->ixgbe_fdir.formatted.dst_port =
2048 sctp_spec->hdr.dst_port;
2050 /* others even sctp port is not supported */
2053 (const struct rte_flow_item_sctp *)item->mask;
2055 (sctp_mask->hdr.src_port ||
2056 sctp_mask->hdr.dst_port ||
2057 sctp_mask->hdr.tag ||
2058 sctp_mask->hdr.cksum)) {
2059 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2060 rte_flow_error_set(error, EINVAL,
2061 RTE_FLOW_ERROR_TYPE_ITEM,
2062 item, "Not supported by fdir filter");
2067 item = next_no_fuzzy_pattern(pattern, item);
2068 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
2069 item->type != RTE_FLOW_ITEM_TYPE_END) {
2070 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2071 rte_flow_error_set(error, EINVAL,
2072 RTE_FLOW_ERROR_TYPE_ITEM,
2073 item, "Not supported by fdir filter");
2078 /* Get the flex byte info */
2079 if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
2080 /* Not supported last point for range*/
2082 rte_flow_error_set(error, EINVAL,
2083 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2084 item, "Not supported last point for range");
2087 /* mask should not be null */
2088 if (!item->mask || !item->spec) {
2089 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2090 rte_flow_error_set(error, EINVAL,
2091 RTE_FLOW_ERROR_TYPE_ITEM,
2092 item, "Not supported by fdir filter");
2096 raw_mask = (const struct rte_flow_item_raw *)item->mask;
2099 if (raw_mask->relative != 0x1 ||
2100 raw_mask->search != 0x1 ||
2101 raw_mask->reserved != 0x0 ||
2102 (uint32_t)raw_mask->offset != 0xffffffff ||
2103 raw_mask->limit != 0xffff ||
2104 raw_mask->length != 0xffff) {
2105 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2106 rte_flow_error_set(error, EINVAL,
2107 RTE_FLOW_ERROR_TYPE_ITEM,
2108 item, "Not supported by fdir filter");
2112 raw_spec = (const struct rte_flow_item_raw *)item->spec;
2115 if (raw_spec->relative != 0 ||
2116 raw_spec->search != 0 ||
2117 raw_spec->reserved != 0 ||
2118 raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
2119 raw_spec->offset % 2 ||
2120 raw_spec->limit != 0 ||
2121 raw_spec->length != 2 ||
2122 /* pattern can't be 0xffff */
2123 (raw_spec->pattern[0] == 0xff &&
2124 raw_spec->pattern[1] == 0xff)) {
2125 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2126 rte_flow_error_set(error, EINVAL,
2127 RTE_FLOW_ERROR_TYPE_ITEM,
2128 item, "Not supported by fdir filter");
2132 /* check pattern mask */
2133 if (raw_mask->pattern[0] != 0xff ||
2134 raw_mask->pattern[1] != 0xff) {
2135 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2136 rte_flow_error_set(error, EINVAL,
2137 RTE_FLOW_ERROR_TYPE_ITEM,
2138 item, "Not supported by fdir filter");
2142 rule->mask.flex_bytes_mask = 0xffff;
2143 rule->ixgbe_fdir.formatted.flex_bytes =
2144 (((uint16_t)raw_spec->pattern[1]) << 8) |
2145 raw_spec->pattern[0];
2146 rule->flex_bytes_offset = raw_spec->offset;
2149 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2150 /* check if the next not void item is END */
2151 item = next_no_fuzzy_pattern(pattern, item);
2152 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2153 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2154 rte_flow_error_set(error, EINVAL,
2155 RTE_FLOW_ERROR_TYPE_ITEM,
2156 item, "Not supported by fdir filter");
2161 return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2164 #define NVGRE_PROTOCOL 0x6558
2167 * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
2168 * And get the flow director filter info BTW.
2170 * The first not void item must be ETH.
2171 * The second not void item must be IPV4/ IPV6.
2172 * The third not void item must be NVGRE.
2173 * The next not void item must be END.
2175 * The first not void item must be ETH.
2176 * The second not void item must be IPV4/ IPV6.
2177 * The third not void item must be NVGRE.
2178 * The next not void item must be END.
2180 * The first not void action should be QUEUE or DROP.
2181 * The second not void optional action should be MARK,
2182 * mark_id is a uint32_t number.
2183 * The next not void action should be END.
2184 * VxLAN pattern example:
2187 * IPV4/IPV6 NULL NULL
2189 * VxLAN vni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
2190 * MAC VLAN tci 0x2016 0xEFFF
2192 * NEGRV pattern example:
2195 * IPV4/IPV6 NULL NULL
2196 * NVGRE protocol 0x6558 0xFFFF
2197 * tni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
2198 * MAC VLAN tci 0x2016 0xEFFF
2200 * other members in mask and spec should set to 0x00.
2201 * item->last should be NULL.
2204 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
2205 const struct rte_flow_item pattern[],
2206 const struct rte_flow_action actions[],
2207 struct ixgbe_fdir_rule *rule,
2208 struct rte_flow_error *error)
2210 const struct rte_flow_item *item;
2211 const struct rte_flow_item_vxlan *vxlan_spec;
2212 const struct rte_flow_item_vxlan *vxlan_mask;
2213 const struct rte_flow_item_nvgre *nvgre_spec;
2214 const struct rte_flow_item_nvgre *nvgre_mask;
2215 const struct rte_flow_item_eth *eth_spec;
2216 const struct rte_flow_item_eth *eth_mask;
2217 const struct rte_flow_item_vlan *vlan_spec;
2218 const struct rte_flow_item_vlan *vlan_mask;
2222 rte_flow_error_set(error, EINVAL,
2223 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2224 NULL, "NULL pattern.");
2229 rte_flow_error_set(error, EINVAL,
2230 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2231 NULL, "NULL action.");
2236 rte_flow_error_set(error, EINVAL,
2237 RTE_FLOW_ERROR_TYPE_ATTR,
2238 NULL, "NULL attribute.");
2243 * Some fields may not be provided. Set spec to 0 and mask to default
2244 * value. So, we need not do anything for the not provided fields later.
2246 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2247 memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
2248 rule->mask.vlan_tci_mask = 0;
2251 * The first not void item should be
2252 * MAC or IPv4 or IPv6 or UDP or VxLAN.
2254 item = next_no_void_pattern(pattern, NULL);
2255 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
2256 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2257 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
2258 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2259 item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
2260 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2261 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2262 rte_flow_error_set(error, EINVAL,
2263 RTE_FLOW_ERROR_TYPE_ITEM,
2264 item, "Not supported by fdir filter");
2268 rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
2271 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
2272 /* Only used to describe the protocol stack. */
2273 if (item->spec || item->mask) {
2274 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2275 rte_flow_error_set(error, EINVAL,
2276 RTE_FLOW_ERROR_TYPE_ITEM,
2277 item, "Not supported by fdir filter");
2280 /* Not supported last point for range*/
2282 rte_flow_error_set(error, EINVAL,
2283 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2284 item, "Not supported last point for range");
2288 /* Check if the next not void item is IPv4 or IPv6. */
2289 item = next_no_void_pattern(pattern, item);
2290 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2291 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2292 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2293 rte_flow_error_set(error, EINVAL,
2294 RTE_FLOW_ERROR_TYPE_ITEM,
2295 item, "Not supported by fdir filter");
2301 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2302 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2303 /* Only used to describe the protocol stack. */
2304 if (item->spec || item->mask) {
2305 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2306 rte_flow_error_set(error, EINVAL,
2307 RTE_FLOW_ERROR_TYPE_ITEM,
2308 item, "Not supported by fdir filter");
2311 /*Not supported last point for range*/
2313 rte_flow_error_set(error, EINVAL,
2314 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2315 item, "Not supported last point for range");
2319 /* Check if the next not void item is UDP or NVGRE. */
2320 item = next_no_void_pattern(pattern, item);
2321 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2322 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2323 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2324 rte_flow_error_set(error, EINVAL,
2325 RTE_FLOW_ERROR_TYPE_ITEM,
2326 item, "Not supported by fdir filter");
2332 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2333 /* Only used to describe the protocol stack. */
2334 if (item->spec || item->mask) {
2335 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2336 rte_flow_error_set(error, EINVAL,
2337 RTE_FLOW_ERROR_TYPE_ITEM,
2338 item, "Not supported by fdir filter");
2341 /*Not supported last point for range*/
2343 rte_flow_error_set(error, EINVAL,
2344 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2345 item, "Not supported last point for range");
2349 /* Check if the next not void item is VxLAN. */
2350 item = next_no_void_pattern(pattern, item);
2351 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2352 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2353 rte_flow_error_set(error, EINVAL,
2354 RTE_FLOW_ERROR_TYPE_ITEM,
2355 item, "Not supported by fdir filter");
2360 /* Get the VxLAN info */
2361 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2362 rule->ixgbe_fdir.formatted.tunnel_type =
2363 RTE_FDIR_TUNNEL_TYPE_VXLAN;
2365 /* Only care about VNI, others should be masked. */
2367 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2368 rte_flow_error_set(error, EINVAL,
2369 RTE_FLOW_ERROR_TYPE_ITEM,
2370 item, "Not supported by fdir filter");
2373 /*Not supported last point for range*/
2375 rte_flow_error_set(error, EINVAL,
2376 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2377 item, "Not supported last point for range");
2380 rule->b_mask = TRUE;
2382 /* Tunnel type is always meaningful. */
2383 rule->mask.tunnel_type_mask = 1;
2386 (const struct rte_flow_item_vxlan *)item->mask;
2387 if (vxlan_mask->flags) {
2388 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2389 rte_flow_error_set(error, EINVAL,
2390 RTE_FLOW_ERROR_TYPE_ITEM,
2391 item, "Not supported by fdir filter");
2394 /* VNI must be totally masked or not. */
2395 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2396 vxlan_mask->vni[2]) &&
2397 ((vxlan_mask->vni[0] != 0xFF) ||
2398 (vxlan_mask->vni[1] != 0xFF) ||
2399 (vxlan_mask->vni[2] != 0xFF))) {
2400 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2401 rte_flow_error_set(error, EINVAL,
2402 RTE_FLOW_ERROR_TYPE_ITEM,
2403 item, "Not supported by fdir filter");
2407 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2408 RTE_DIM(vxlan_mask->vni));
2411 rule->b_spec = TRUE;
2412 vxlan_spec = (const struct rte_flow_item_vxlan *)
2414 rte_memcpy(((uint8_t *)
2415 &rule->ixgbe_fdir.formatted.tni_vni + 1),
2416 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2417 rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
2418 rule->ixgbe_fdir.formatted.tni_vni);
2422 /* Get the NVGRE info */
2423 if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2424 rule->ixgbe_fdir.formatted.tunnel_type =
2425 RTE_FDIR_TUNNEL_TYPE_NVGRE;
2428 * Only care about flags0, flags1, protocol and TNI,
2429 * others should be masked.
2432 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2433 rte_flow_error_set(error, EINVAL,
2434 RTE_FLOW_ERROR_TYPE_ITEM,
2435 item, "Not supported by fdir filter");
2438 /*Not supported last point for range*/
2440 rte_flow_error_set(error, EINVAL,
2441 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2442 item, "Not supported last point for range");
2445 rule->b_mask = TRUE;
2447 /* Tunnel type is always meaningful. */
2448 rule->mask.tunnel_type_mask = 1;
2451 (const struct rte_flow_item_nvgre *)item->mask;
2452 if (nvgre_mask->flow_id) {
2453 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2454 rte_flow_error_set(error, EINVAL,
2455 RTE_FLOW_ERROR_TYPE_ITEM,
2456 item, "Not supported by fdir filter");
2459 if (nvgre_mask->c_k_s_rsvd0_ver !=
2460 rte_cpu_to_be_16(0x3000) ||
2461 nvgre_mask->protocol != 0xFFFF) {
2462 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2463 rte_flow_error_set(error, EINVAL,
2464 RTE_FLOW_ERROR_TYPE_ITEM,
2465 item, "Not supported by fdir filter");
2468 /* TNI must be totally masked or not. */
2469 if (nvgre_mask->tni[0] &&
2470 ((nvgre_mask->tni[0] != 0xFF) ||
2471 (nvgre_mask->tni[1] != 0xFF) ||
2472 (nvgre_mask->tni[2] != 0xFF))) {
2473 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2474 rte_flow_error_set(error, EINVAL,
2475 RTE_FLOW_ERROR_TYPE_ITEM,
2476 item, "Not supported by fdir filter");
2479 /* tni is a 24-bits bit field */
2480 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2481 RTE_DIM(nvgre_mask->tni));
2482 rule->mask.tunnel_id_mask <<= 8;
2485 rule->b_spec = TRUE;
2487 (const struct rte_flow_item_nvgre *)item->spec;
2488 if (nvgre_spec->c_k_s_rsvd0_ver !=
2489 rte_cpu_to_be_16(0x2000) ||
2490 nvgre_spec->protocol !=
2491 rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2492 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2493 rte_flow_error_set(error, EINVAL,
2494 RTE_FLOW_ERROR_TYPE_ITEM,
2495 item, "Not supported by fdir filter");
2498 /* tni is a 24-bits bit field */
2499 rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2500 nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2501 rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2505 /* check if the next not void item is MAC */
2506 item = next_no_void_pattern(pattern, item);
2507 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2508 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2509 rte_flow_error_set(error, EINVAL,
2510 RTE_FLOW_ERROR_TYPE_ITEM,
2511 item, "Not supported by fdir filter");
2516 * Only support vlan and dst MAC address,
2517 * others should be masked.
2521 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2522 rte_flow_error_set(error, EINVAL,
2523 RTE_FLOW_ERROR_TYPE_ITEM,
2524 item, "Not supported by fdir filter");
2527 /*Not supported last point for range*/
2529 rte_flow_error_set(error, EINVAL,
2530 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2531 item, "Not supported last point for range");
2534 rule->b_mask = TRUE;
2535 eth_mask = (const struct rte_flow_item_eth *)item->mask;
2537 /* Ether type should be masked. */
2538 if (eth_mask->type) {
2539 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2540 rte_flow_error_set(error, EINVAL,
2541 RTE_FLOW_ERROR_TYPE_ITEM,
2542 item, "Not supported by fdir filter");
2546 /* src MAC address should be masked. */
2547 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2548 if (eth_mask->src.addr_bytes[j]) {
2550 sizeof(struct ixgbe_fdir_rule));
2551 rte_flow_error_set(error, EINVAL,
2552 RTE_FLOW_ERROR_TYPE_ITEM,
2553 item, "Not supported by fdir filter");
2557 rule->mask.mac_addr_byte_mask = 0;
2558 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2559 /* It's a per byte mask. */
2560 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2561 rule->mask.mac_addr_byte_mask |= 0x1 << j;
2562 } else if (eth_mask->dst.addr_bytes[j]) {
2563 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2564 rte_flow_error_set(error, EINVAL,
2565 RTE_FLOW_ERROR_TYPE_ITEM,
2566 item, "Not supported by fdir filter");
2571 /* When no vlan, considered as full mask. */
2572 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2575 rule->b_spec = TRUE;
2576 eth_spec = (const struct rte_flow_item_eth *)item->spec;
2578 /* Get the dst MAC. */
2579 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2580 rule->ixgbe_fdir.formatted.inner_mac[j] =
2581 eth_spec->dst.addr_bytes[j];
2586 * Check if the next not void item is vlan or ipv4.
2587 * IPv6 is not supported.
2589 item = next_no_void_pattern(pattern, item);
2590 if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2591 (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
2592 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2593 rte_flow_error_set(error, EINVAL,
2594 RTE_FLOW_ERROR_TYPE_ITEM,
2595 item, "Not supported by fdir filter");
2598 /*Not supported last point for range*/
2600 rte_flow_error_set(error, EINVAL,
2601 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2602 item, "Not supported last point for range");
2606 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2607 if (!(item->spec && item->mask)) {
2608 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2609 rte_flow_error_set(error, EINVAL,
2610 RTE_FLOW_ERROR_TYPE_ITEM,
2611 item, "Not supported by fdir filter");
2615 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
2616 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
2618 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2620 rule->mask.vlan_tci_mask = vlan_mask->tci;
2621 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
2622 /* More than one tags are not supported. */
2624 /* check if the next not void item is END */
2625 item = next_no_void_pattern(pattern, item);
2627 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2628 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2629 rte_flow_error_set(error, EINVAL,
2630 RTE_FLOW_ERROR_TYPE_ITEM,
2631 item, "Not supported by fdir filter");
2637 * If the tags is 0, it means don't care about the VLAN.
2641 return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2645 ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2646 const struct rte_flow_attr *attr,
2647 const struct rte_flow_item pattern[],
2648 const struct rte_flow_action actions[],
2649 struct ixgbe_fdir_rule *rule,
2650 struct rte_flow_error *error)
2653 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2654 enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2656 if (hw->mac.type != ixgbe_mac_82599EB &&
2657 hw->mac.type != ixgbe_mac_X540 &&
2658 hw->mac.type != ixgbe_mac_X550 &&
2659 hw->mac.type != ixgbe_mac_X550EM_x &&
2660 hw->mac.type != ixgbe_mac_X550EM_a)
2663 ret = ixgbe_parse_fdir_filter_normal(dev, attr, pattern,
2664 actions, rule, error);
2669 ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2670 actions, rule, error);
2677 if (hw->mac.type == ixgbe_mac_82599EB &&
2678 rule->fdirflags == IXGBE_FDIRCMD_DROP &&
2679 (rule->ixgbe_fdir.formatted.src_port != 0 ||
2680 rule->ixgbe_fdir.formatted.dst_port != 0))
2683 if (fdir_mode == RTE_FDIR_MODE_NONE ||
2684 fdir_mode != rule->mode)
2687 if (rule->queue >= dev->data->nb_rx_queues)
2694 ixgbe_filterlist_init(void)
2696 TAILQ_INIT(&filter_ntuple_list);
2697 TAILQ_INIT(&filter_ethertype_list);
2698 TAILQ_INIT(&filter_syn_list);
2699 TAILQ_INIT(&filter_fdir_list);
2700 TAILQ_INIT(&filter_l2_tunnel_list);
2701 TAILQ_INIT(&ixgbe_flow_list);
2705 ixgbe_filterlist_flush(void)
2707 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2708 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2709 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2710 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2711 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2712 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2714 while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2715 TAILQ_REMOVE(&filter_ntuple_list,
2718 rte_free(ntuple_filter_ptr);
2721 while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2722 TAILQ_REMOVE(&filter_ethertype_list,
2723 ethertype_filter_ptr,
2725 rte_free(ethertype_filter_ptr);
2728 while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2729 TAILQ_REMOVE(&filter_syn_list,
2732 rte_free(syn_filter_ptr);
2735 while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2736 TAILQ_REMOVE(&filter_l2_tunnel_list,
2739 rte_free(l2_tn_filter_ptr);
2742 while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2743 TAILQ_REMOVE(&filter_fdir_list,
2746 rte_free(fdir_rule_ptr);
2749 while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
2750 TAILQ_REMOVE(&ixgbe_flow_list,
2753 rte_free(ixgbe_flow_mem_ptr->flow);
2754 rte_free(ixgbe_flow_mem_ptr);
2759 * Create or destroy a flow rule.
2760 * Theorically one rule can match more than one filters.
2761 * We will let it use the filter which it hitt first.
2762 * So, the sequence matters.
2764 static struct rte_flow *
2765 ixgbe_flow_create(struct rte_eth_dev *dev,
2766 const struct rte_flow_attr *attr,
2767 const struct rte_flow_item pattern[],
2768 const struct rte_flow_action actions[],
2769 struct rte_flow_error *error)
2772 struct rte_eth_ntuple_filter ntuple_filter;
2773 struct rte_eth_ethertype_filter ethertype_filter;
2774 struct rte_eth_syn_filter syn_filter;
2775 struct ixgbe_fdir_rule fdir_rule;
2776 struct rte_eth_l2_tunnel_conf l2_tn_filter;
2777 struct ixgbe_hw_fdir_info *fdir_info =
2778 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2779 struct rte_flow *flow = NULL;
2780 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2781 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2782 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2783 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2784 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2785 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2786 uint8_t first_mask = FALSE;
2788 flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
2790 PMD_DRV_LOG(ERR, "failed to allocate memory");
2791 return (struct rte_flow *)flow;
2793 ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
2794 sizeof(struct ixgbe_flow_mem), 0);
2795 if (!ixgbe_flow_mem_ptr) {
2796 PMD_DRV_LOG(ERR, "failed to allocate memory");
2800 ixgbe_flow_mem_ptr->flow = flow;
2801 TAILQ_INSERT_TAIL(&ixgbe_flow_list,
2802 ixgbe_flow_mem_ptr, entries);
2804 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2805 ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2806 actions, &ntuple_filter, error);
2808 #ifdef RTE_LIBRTE_SECURITY
2809 /* ESP flow not really a flow*/
2810 if (ntuple_filter.proto == IPPROTO_ESP)
2815 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2817 ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
2818 sizeof(struct ixgbe_ntuple_filter_ele), 0);
2819 if (!ntuple_filter_ptr) {
2820 PMD_DRV_LOG(ERR, "failed to allocate memory");
2823 rte_memcpy(&ntuple_filter_ptr->filter_info,
2825 sizeof(struct rte_eth_ntuple_filter));
2826 TAILQ_INSERT_TAIL(&filter_ntuple_list,
2827 ntuple_filter_ptr, entries);
2828 flow->rule = ntuple_filter_ptr;
2829 flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2835 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2836 ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2837 actions, ðertype_filter, error);
2839 ret = ixgbe_add_del_ethertype_filter(dev,
2840 ðertype_filter, TRUE);
2842 ethertype_filter_ptr = rte_zmalloc(
2843 "ixgbe_ethertype_filter",
2844 sizeof(struct ixgbe_ethertype_filter_ele), 0);
2845 if (!ethertype_filter_ptr) {
2846 PMD_DRV_LOG(ERR, "failed to allocate memory");
2849 rte_memcpy(ðertype_filter_ptr->filter_info,
2851 sizeof(struct rte_eth_ethertype_filter));
2852 TAILQ_INSERT_TAIL(&filter_ethertype_list,
2853 ethertype_filter_ptr, entries);
2854 flow->rule = ethertype_filter_ptr;
2855 flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2861 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2862 ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2863 actions, &syn_filter, error);
2865 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
2867 syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
2868 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
2869 if (!syn_filter_ptr) {
2870 PMD_DRV_LOG(ERR, "failed to allocate memory");
2873 rte_memcpy(&syn_filter_ptr->filter_info,
2875 sizeof(struct rte_eth_syn_filter));
2876 TAILQ_INSERT_TAIL(&filter_syn_list,
2879 flow->rule = syn_filter_ptr;
2880 flow->filter_type = RTE_ETH_FILTER_SYN;
2886 memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2887 ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2888 actions, &fdir_rule, error);
2890 /* A mask cannot be deleted. */
2891 if (fdir_rule.b_mask) {
2892 if (!fdir_info->mask_added) {
2893 /* It's the first time the mask is set. */
2894 rte_memcpy(&fdir_info->mask,
2896 sizeof(struct ixgbe_hw_fdir_mask));
2897 fdir_info->flex_bytes_offset =
2898 fdir_rule.flex_bytes_offset;
2900 if (fdir_rule.mask.flex_bytes_mask)
2901 ixgbe_fdir_set_flexbytes_offset(dev,
2902 fdir_rule.flex_bytes_offset);
2904 ret = ixgbe_fdir_set_input_mask(dev);
2908 fdir_info->mask_added = TRUE;
2912 * Only support one global mask,
2913 * all the masks should be the same.
2915 ret = memcmp(&fdir_info->mask,
2917 sizeof(struct ixgbe_hw_fdir_mask));
2921 if (fdir_info->flex_bytes_offset !=
2922 fdir_rule.flex_bytes_offset)
2927 if (fdir_rule.b_spec) {
2928 ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
2931 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
2932 sizeof(struct ixgbe_fdir_rule_ele), 0);
2933 if (!fdir_rule_ptr) {
2934 PMD_DRV_LOG(ERR, "failed to allocate memory");
2937 rte_memcpy(&fdir_rule_ptr->filter_info,
2939 sizeof(struct ixgbe_fdir_rule));
2940 TAILQ_INSERT_TAIL(&filter_fdir_list,
2941 fdir_rule_ptr, entries);
2942 flow->rule = fdir_rule_ptr;
2943 flow->filter_type = RTE_ETH_FILTER_FDIR;
2950 * clean the mask_added flag if fail to
2954 fdir_info->mask_added = FALSE;
2962 memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2963 ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2964 actions, &l2_tn_filter, error);
2966 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
2968 l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
2969 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
2970 if (!l2_tn_filter_ptr) {
2971 PMD_DRV_LOG(ERR, "failed to allocate memory");
2974 rte_memcpy(&l2_tn_filter_ptr->filter_info,
2976 sizeof(struct rte_eth_l2_tunnel_conf));
2977 TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
2978 l2_tn_filter_ptr, entries);
2979 flow->rule = l2_tn_filter_ptr;
2980 flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
2986 TAILQ_REMOVE(&ixgbe_flow_list,
2987 ixgbe_flow_mem_ptr, entries);
2988 rte_flow_error_set(error, -ret,
2989 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2990 "Failed to create flow.");
2991 rte_free(ixgbe_flow_mem_ptr);
2997 * Check if the flow rule is supported by ixgbe.
2998 * It only checkes the format. Don't guarantee the rule can be programmed into
2999 * the HW. Because there can be no enough room for the rule.
3002 ixgbe_flow_validate(struct rte_eth_dev *dev,
3003 const struct rte_flow_attr *attr,
3004 const struct rte_flow_item pattern[],
3005 const struct rte_flow_action actions[],
3006 struct rte_flow_error *error)
3008 struct rte_eth_ntuple_filter ntuple_filter;
3009 struct rte_eth_ethertype_filter ethertype_filter;
3010 struct rte_eth_syn_filter syn_filter;
3011 struct rte_eth_l2_tunnel_conf l2_tn_filter;
3012 struct ixgbe_fdir_rule fdir_rule;
3015 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
3016 ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
3017 actions, &ntuple_filter, error);
3021 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
3022 ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
3023 actions, ðertype_filter, error);
3027 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
3028 ret = ixgbe_parse_syn_filter(dev, attr, pattern,
3029 actions, &syn_filter, error);
3033 memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
3034 ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
3035 actions, &fdir_rule, error);
3039 memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
3040 ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
3041 actions, &l2_tn_filter, error);
3046 /* Destroy a flow rule on ixgbe. */
3048 ixgbe_flow_destroy(struct rte_eth_dev *dev,
3049 struct rte_flow *flow,
3050 struct rte_flow_error *error)
3053 struct rte_flow *pmd_flow = flow;
3054 enum rte_filter_type filter_type = pmd_flow->filter_type;
3055 struct rte_eth_ntuple_filter ntuple_filter;
3056 struct rte_eth_ethertype_filter ethertype_filter;
3057 struct rte_eth_syn_filter syn_filter;
3058 struct ixgbe_fdir_rule fdir_rule;
3059 struct rte_eth_l2_tunnel_conf l2_tn_filter;
3060 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
3061 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
3062 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
3063 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
3064 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
3065 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
3066 struct ixgbe_hw_fdir_info *fdir_info =
3067 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
3069 switch (filter_type) {
3070 case RTE_ETH_FILTER_NTUPLE:
3071 ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
3073 rte_memcpy(&ntuple_filter,
3074 &ntuple_filter_ptr->filter_info,
3075 sizeof(struct rte_eth_ntuple_filter));
3076 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
3078 TAILQ_REMOVE(&filter_ntuple_list,
3079 ntuple_filter_ptr, entries);
3080 rte_free(ntuple_filter_ptr);
3083 case RTE_ETH_FILTER_ETHERTYPE:
3084 ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
3086 rte_memcpy(ðertype_filter,
3087 ðertype_filter_ptr->filter_info,
3088 sizeof(struct rte_eth_ethertype_filter));
3089 ret = ixgbe_add_del_ethertype_filter(dev,
3090 ðertype_filter, FALSE);
3092 TAILQ_REMOVE(&filter_ethertype_list,
3093 ethertype_filter_ptr, entries);
3094 rte_free(ethertype_filter_ptr);
3097 case RTE_ETH_FILTER_SYN:
3098 syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
3100 rte_memcpy(&syn_filter,
3101 &syn_filter_ptr->filter_info,
3102 sizeof(struct rte_eth_syn_filter));
3103 ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
3105 TAILQ_REMOVE(&filter_syn_list,
3106 syn_filter_ptr, entries);
3107 rte_free(syn_filter_ptr);
3110 case RTE_ETH_FILTER_FDIR:
3111 fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
3112 rte_memcpy(&fdir_rule,
3113 &fdir_rule_ptr->filter_info,
3114 sizeof(struct ixgbe_fdir_rule));
3115 ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
3117 TAILQ_REMOVE(&filter_fdir_list,
3118 fdir_rule_ptr, entries);
3119 rte_free(fdir_rule_ptr);
3120 if (TAILQ_EMPTY(&filter_fdir_list))
3121 fdir_info->mask_added = false;
3124 case RTE_ETH_FILTER_L2_TUNNEL:
3125 l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
3127 rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
3128 sizeof(struct rte_eth_l2_tunnel_conf));
3129 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
3131 TAILQ_REMOVE(&filter_l2_tunnel_list,
3132 l2_tn_filter_ptr, entries);
3133 rte_free(l2_tn_filter_ptr);
3137 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
3144 rte_flow_error_set(error, EINVAL,
3145 RTE_FLOW_ERROR_TYPE_HANDLE,
3146 NULL, "Failed to destroy flow");
3150 TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
3151 if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
3152 TAILQ_REMOVE(&ixgbe_flow_list,
3153 ixgbe_flow_mem_ptr, entries);
3154 rte_free(ixgbe_flow_mem_ptr);
3162 /* Destroy all flow rules associated with a port on ixgbe. */
3164 ixgbe_flow_flush(struct rte_eth_dev *dev,
3165 struct rte_flow_error *error)
3169 ixgbe_clear_all_ntuple_filter(dev);
3170 ixgbe_clear_all_ethertype_filter(dev);
3171 ixgbe_clear_syn_filter(dev);
3173 ret = ixgbe_clear_all_fdir_filter(dev);
3175 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3176 NULL, "Failed to flush rule");
3180 ret = ixgbe_clear_all_l2_tn_filter(dev);
3182 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3183 NULL, "Failed to flush rule");
3187 ixgbe_filterlist_flush();
3192 const struct rte_flow_ops ixgbe_flow_ops = {
3193 .validate = ixgbe_flow_validate,
3194 .create = ixgbe_flow_create,
3195 .destroy = ixgbe_flow_destroy,
3196 .flush = ixgbe_flow_flush,