4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
47 #include <rte_interrupts.h>
49 #include <rte_debug.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_malloc.h>
60 #include <rte_random.h>
62 #include <rte_hash_crc.h>
64 #include <rte_flow_driver.h>
66 #include "ixgbe_logs.h"
67 #include "base/ixgbe_api.h"
68 #include "base/ixgbe_vf.h"
69 #include "base/ixgbe_common.h"
70 #include "ixgbe_ethdev.h"
71 #include "ixgbe_bypass.h"
72 #include "ixgbe_rxtx.h"
73 #include "base/ixgbe_type.h"
74 #include "base/ixgbe_phy.h"
75 #include "rte_pmd_ixgbe.h"
78 #define IXGBE_MIN_N_TUPLE_PRIO 1
79 #define IXGBE_MAX_N_TUPLE_PRIO 7
80 #define IXGBE_MAX_FLX_SOURCE_OFF 62
82 /* ntuple filter list structure */
83 struct ixgbe_ntuple_filter_ele {
84 TAILQ_ENTRY(ixgbe_ntuple_filter_ele) entries;
85 struct rte_eth_ntuple_filter filter_info;
87 /* ethertype filter list structure */
88 struct ixgbe_ethertype_filter_ele {
89 TAILQ_ENTRY(ixgbe_ethertype_filter_ele) entries;
90 struct rte_eth_ethertype_filter filter_info;
92 /* syn filter list structure */
93 struct ixgbe_eth_syn_filter_ele {
94 TAILQ_ENTRY(ixgbe_eth_syn_filter_ele) entries;
95 struct rte_eth_syn_filter filter_info;
97 /* fdir filter list structure */
98 struct ixgbe_fdir_rule_ele {
99 TAILQ_ENTRY(ixgbe_fdir_rule_ele) entries;
100 struct ixgbe_fdir_rule filter_info;
102 /* l2_tunnel filter list structure */
103 struct ixgbe_eth_l2_tunnel_conf_ele {
104 TAILQ_ENTRY(ixgbe_eth_l2_tunnel_conf_ele) entries;
105 struct rte_eth_l2_tunnel_conf filter_info;
107 /* ixgbe_flow memory list structure */
108 struct ixgbe_flow_mem {
109 TAILQ_ENTRY(ixgbe_flow_mem) entries;
110 struct rte_flow *flow;
113 TAILQ_HEAD(ixgbe_ntuple_filter_list, ixgbe_ntuple_filter_ele);
114 TAILQ_HEAD(ixgbe_ethertype_filter_list, ixgbe_ethertype_filter_ele);
115 TAILQ_HEAD(ixgbe_syn_filter_list, ixgbe_eth_syn_filter_ele);
116 TAILQ_HEAD(ixgbe_fdir_rule_filter_list, ixgbe_fdir_rule_ele);
117 TAILQ_HEAD(ixgbe_l2_tunnel_filter_list, ixgbe_eth_l2_tunnel_conf_ele);
118 TAILQ_HEAD(ixgbe_flow_mem_list, ixgbe_flow_mem);
120 static struct ixgbe_ntuple_filter_list filter_ntuple_list;
121 static struct ixgbe_ethertype_filter_list filter_ethertype_list;
122 static struct ixgbe_syn_filter_list filter_syn_list;
123 static struct ixgbe_fdir_rule_filter_list filter_fdir_list;
124 static struct ixgbe_l2_tunnel_filter_list filter_l2_tunnel_list;
125 static struct ixgbe_flow_mem_list ixgbe_flow_list;
128 * Endless loop will never happen with below assumption
129 * 1. there is at least one no-void item(END)
130 * 2. cur is before END.
133 const struct rte_flow_item *next_no_void_pattern(
134 const struct rte_flow_item pattern[],
135 const struct rte_flow_item *cur)
137 const struct rte_flow_item *next =
138 cur ? cur + 1 : &pattern[0];
140 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
147 const struct rte_flow_action *next_no_void_action(
148 const struct rte_flow_action actions[],
149 const struct rte_flow_action *cur)
151 const struct rte_flow_action *next =
152 cur ? cur + 1 : &actions[0];
154 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
161 * Please aware there's an asumption for all the parsers.
162 * rte_flow_item is using big endian, rte_flow_attr and
163 * rte_flow_action are using CPU order.
164 * Because the pattern is used to describe the packets,
165 * normally the packets should use network order.
169 * Parse the rule to see if it is a n-tuple rule.
170 * And get the n-tuple filter info BTW.
172 * The first not void item can be ETH or IPV4.
173 * The second not void item must be IPV4 if the first one is ETH.
174 * The third not void item must be UDP or TCP.
175 * The next not void item must be END.
177 * The first not void action should be QUEUE.
178 * The next not void action should be END.
182 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
183 * dst_addr 192.167.3.50 0xFFFFFFFF
184 * next_proto_id 17 0xFF
185 * UDP/TCP/ src_port 80 0xFFFF
186 * SCTP dst_port 80 0xFFFF
188 * other members in mask and spec should set to 0x00.
189 * item->last should be NULL.
191 * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY.
195 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
196 const struct rte_flow_item pattern[],
197 const struct rte_flow_action actions[],
198 struct rte_eth_ntuple_filter *filter,
199 struct rte_flow_error *error)
201 const struct rte_flow_item *item;
202 const struct rte_flow_action *act;
203 const struct rte_flow_item_ipv4 *ipv4_spec;
204 const struct rte_flow_item_ipv4 *ipv4_mask;
205 const struct rte_flow_item_tcp *tcp_spec;
206 const struct rte_flow_item_tcp *tcp_mask;
207 const struct rte_flow_item_udp *udp_spec;
208 const struct rte_flow_item_udp *udp_mask;
209 const struct rte_flow_item_sctp *sctp_spec;
210 const struct rte_flow_item_sctp *sctp_mask;
213 rte_flow_error_set(error,
214 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
215 NULL, "NULL pattern.");
220 rte_flow_error_set(error, EINVAL,
221 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
222 NULL, "NULL action.");
226 rte_flow_error_set(error, EINVAL,
227 RTE_FLOW_ERROR_TYPE_ATTR,
228 NULL, "NULL attribute.");
233 * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY
235 act = next_no_void_action(actions, NULL);
236 if (act->type == RTE_FLOW_ACTION_TYPE_SECURITY) {
237 const void *conf = act->conf;
238 /* check if the next not void item is END */
239 act = next_no_void_action(actions, act);
240 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
241 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
242 rte_flow_error_set(error, EINVAL,
243 RTE_FLOW_ERROR_TYPE_ACTION,
244 act, "Not supported action.");
248 /* get the IP pattern*/
249 item = next_no_void_pattern(pattern, NULL);
250 while (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
251 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
253 item->type == RTE_FLOW_ITEM_TYPE_END) {
254 rte_flow_error_set(error, EINVAL,
255 RTE_FLOW_ERROR_TYPE_ITEM,
256 item, "IP pattern missing.");
259 item = next_no_void_pattern(pattern, item);
262 filter->proto = IPPROTO_ESP;
263 return ixgbe_crypto_add_ingress_sa_from_flow(conf, item->spec,
264 item->type == RTE_FLOW_ITEM_TYPE_IPV6);
267 /* the first not void item can be MAC or IPv4 */
268 item = next_no_void_pattern(pattern, NULL);
270 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
271 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
272 rte_flow_error_set(error, EINVAL,
273 RTE_FLOW_ERROR_TYPE_ITEM,
274 item, "Not supported by ntuple filter");
278 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
279 /*Not supported last point for range*/
281 rte_flow_error_set(error,
283 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
284 item, "Not supported last point for range");
288 /* if the first item is MAC, the content should be NULL */
289 if (item->spec || item->mask) {
290 rte_flow_error_set(error, EINVAL,
291 RTE_FLOW_ERROR_TYPE_ITEM,
292 item, "Not supported by ntuple filter");
295 /* check if the next not void item is IPv4 */
296 item = next_no_void_pattern(pattern, item);
297 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
298 rte_flow_error_set(error,
299 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
300 item, "Not supported by ntuple filter");
305 /* get the IPv4 info */
306 if (!item->spec || !item->mask) {
307 rte_flow_error_set(error, EINVAL,
308 RTE_FLOW_ERROR_TYPE_ITEM,
309 item, "Invalid ntuple mask");
312 /*Not supported last point for range*/
314 rte_flow_error_set(error, EINVAL,
315 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
316 item, "Not supported last point for range");
321 ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
323 * Only support src & dst addresses, protocol,
324 * others should be masked.
326 if (ipv4_mask->hdr.version_ihl ||
327 ipv4_mask->hdr.type_of_service ||
328 ipv4_mask->hdr.total_length ||
329 ipv4_mask->hdr.packet_id ||
330 ipv4_mask->hdr.fragment_offset ||
331 ipv4_mask->hdr.time_to_live ||
332 ipv4_mask->hdr.hdr_checksum) {
333 rte_flow_error_set(error,
334 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
335 item, "Not supported by ntuple filter");
339 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
340 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
341 filter->proto_mask = ipv4_mask->hdr.next_proto_id;
343 ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
344 filter->dst_ip = ipv4_spec->hdr.dst_addr;
345 filter->src_ip = ipv4_spec->hdr.src_addr;
346 filter->proto = ipv4_spec->hdr.next_proto_id;
348 /* check if the next not void item is TCP or UDP */
349 item = next_no_void_pattern(pattern, item);
350 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
351 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
352 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
353 item->type != RTE_FLOW_ITEM_TYPE_END) {
354 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
355 rte_flow_error_set(error, EINVAL,
356 RTE_FLOW_ERROR_TYPE_ITEM,
357 item, "Not supported by ntuple filter");
361 /* get the TCP/UDP info */
362 if ((item->type != RTE_FLOW_ITEM_TYPE_END) &&
363 (!item->spec || !item->mask)) {
364 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
365 rte_flow_error_set(error, EINVAL,
366 RTE_FLOW_ERROR_TYPE_ITEM,
367 item, "Invalid ntuple mask");
371 /*Not supported last point for range*/
373 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
374 rte_flow_error_set(error, EINVAL,
375 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
376 item, "Not supported last point for range");
381 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
382 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
385 * Only support src & dst ports, tcp flags,
386 * others should be masked.
388 if (tcp_mask->hdr.sent_seq ||
389 tcp_mask->hdr.recv_ack ||
390 tcp_mask->hdr.data_off ||
391 tcp_mask->hdr.rx_win ||
392 tcp_mask->hdr.cksum ||
393 tcp_mask->hdr.tcp_urp) {
395 sizeof(struct rte_eth_ntuple_filter));
396 rte_flow_error_set(error, EINVAL,
397 RTE_FLOW_ERROR_TYPE_ITEM,
398 item, "Not supported by ntuple filter");
402 filter->dst_port_mask = tcp_mask->hdr.dst_port;
403 filter->src_port_mask = tcp_mask->hdr.src_port;
404 if (tcp_mask->hdr.tcp_flags == 0xFF) {
405 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
406 } else if (!tcp_mask->hdr.tcp_flags) {
407 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
409 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
410 rte_flow_error_set(error, EINVAL,
411 RTE_FLOW_ERROR_TYPE_ITEM,
412 item, "Not supported by ntuple filter");
416 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
417 filter->dst_port = tcp_spec->hdr.dst_port;
418 filter->src_port = tcp_spec->hdr.src_port;
419 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
420 } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
421 udp_mask = (const struct rte_flow_item_udp *)item->mask;
424 * Only support src & dst ports,
425 * others should be masked.
427 if (udp_mask->hdr.dgram_len ||
428 udp_mask->hdr.dgram_cksum) {
430 sizeof(struct rte_eth_ntuple_filter));
431 rte_flow_error_set(error, EINVAL,
432 RTE_FLOW_ERROR_TYPE_ITEM,
433 item, "Not supported by ntuple filter");
437 filter->dst_port_mask = udp_mask->hdr.dst_port;
438 filter->src_port_mask = udp_mask->hdr.src_port;
440 udp_spec = (const struct rte_flow_item_udp *)item->spec;
441 filter->dst_port = udp_spec->hdr.dst_port;
442 filter->src_port = udp_spec->hdr.src_port;
443 } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
444 sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
447 * Only support src & dst ports,
448 * others should be masked.
450 if (sctp_mask->hdr.tag ||
451 sctp_mask->hdr.cksum) {
453 sizeof(struct rte_eth_ntuple_filter));
454 rte_flow_error_set(error, EINVAL,
455 RTE_FLOW_ERROR_TYPE_ITEM,
456 item, "Not supported by ntuple filter");
460 filter->dst_port_mask = sctp_mask->hdr.dst_port;
461 filter->src_port_mask = sctp_mask->hdr.src_port;
463 sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
464 filter->dst_port = sctp_spec->hdr.dst_port;
465 filter->src_port = sctp_spec->hdr.src_port;
470 /* check if the next not void item is END */
471 item = next_no_void_pattern(pattern, item);
472 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
473 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
474 rte_flow_error_set(error, EINVAL,
475 RTE_FLOW_ERROR_TYPE_ITEM,
476 item, "Not supported by ntuple filter");
483 * n-tuple only supports forwarding,
484 * check if the first not void action is QUEUE.
486 act = next_no_void_action(actions, NULL);
487 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
488 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
489 rte_flow_error_set(error, EINVAL,
490 RTE_FLOW_ERROR_TYPE_ACTION,
491 item, "Not supported action.");
495 ((const struct rte_flow_action_queue *)act->conf)->index;
497 /* check if the next not void item is END */
498 act = next_no_void_action(actions, act);
499 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
500 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
501 rte_flow_error_set(error, EINVAL,
502 RTE_FLOW_ERROR_TYPE_ACTION,
503 act, "Not supported action.");
508 /* must be input direction */
509 if (!attr->ingress) {
510 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
511 rte_flow_error_set(error, EINVAL,
512 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
513 attr, "Only support ingress.");
519 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
520 rte_flow_error_set(error, EINVAL,
521 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
522 attr, "Not support egress.");
526 if (attr->priority > 0xFFFF) {
527 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
528 rte_flow_error_set(error, EINVAL,
529 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
530 attr, "Error priority.");
533 filter->priority = (uint16_t)attr->priority;
534 if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
535 attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
536 filter->priority = 1;
541 /* a specific function for ixgbe because the flags is specific */
543 ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
544 const struct rte_flow_attr *attr,
545 const struct rte_flow_item pattern[],
546 const struct rte_flow_action actions[],
547 struct rte_eth_ntuple_filter *filter,
548 struct rte_flow_error *error)
551 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
553 MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
555 ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
560 /* ESP flow not really a flow*/
561 if (filter->proto == IPPROTO_ESP)
564 /* Ixgbe doesn't support tcp flags. */
565 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
566 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
567 rte_flow_error_set(error, EINVAL,
568 RTE_FLOW_ERROR_TYPE_ITEM,
569 NULL, "Not supported by ntuple filter");
573 /* Ixgbe doesn't support many priorities. */
574 if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
575 filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
576 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
577 rte_flow_error_set(error, EINVAL,
578 RTE_FLOW_ERROR_TYPE_ITEM,
579 NULL, "Priority not supported by ntuple filter");
583 if (filter->queue >= dev->data->nb_rx_queues)
586 /* fixed value for ixgbe */
587 filter->flags = RTE_5TUPLE_FLAGS;
592 * Parse the rule to see if it is a ethertype rule.
593 * And get the ethertype filter info BTW.
595 * The first not void item can be ETH.
596 * The next not void item must be END.
598 * The first not void action should be QUEUE.
599 * The next not void action should be END.
602 * ETH type 0x0807 0xFFFF
604 * other members in mask and spec should set to 0x00.
605 * item->last should be NULL.
608 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
609 const struct rte_flow_item *pattern,
610 const struct rte_flow_action *actions,
611 struct rte_eth_ethertype_filter *filter,
612 struct rte_flow_error *error)
614 const struct rte_flow_item *item;
615 const struct rte_flow_action *act;
616 const struct rte_flow_item_eth *eth_spec;
617 const struct rte_flow_item_eth *eth_mask;
618 const struct rte_flow_action_queue *act_q;
621 rte_flow_error_set(error, EINVAL,
622 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
623 NULL, "NULL pattern.");
628 rte_flow_error_set(error, EINVAL,
629 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
630 NULL, "NULL action.");
635 rte_flow_error_set(error, EINVAL,
636 RTE_FLOW_ERROR_TYPE_ATTR,
637 NULL, "NULL attribute.");
641 item = next_no_void_pattern(pattern, NULL);
642 /* The first non-void item should be MAC. */
643 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
644 rte_flow_error_set(error, EINVAL,
645 RTE_FLOW_ERROR_TYPE_ITEM,
646 item, "Not supported by ethertype filter");
650 /*Not supported last point for range*/
652 rte_flow_error_set(error, EINVAL,
653 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
654 item, "Not supported last point for range");
658 /* Get the MAC info. */
659 if (!item->spec || !item->mask) {
660 rte_flow_error_set(error, EINVAL,
661 RTE_FLOW_ERROR_TYPE_ITEM,
662 item, "Not supported by ethertype filter");
666 eth_spec = (const struct rte_flow_item_eth *)item->spec;
667 eth_mask = (const struct rte_flow_item_eth *)item->mask;
669 /* Mask bits of source MAC address must be full of 0.
670 * Mask bits of destination MAC address must be full
673 if (!is_zero_ether_addr(ð_mask->src) ||
674 (!is_zero_ether_addr(ð_mask->dst) &&
675 !is_broadcast_ether_addr(ð_mask->dst))) {
676 rte_flow_error_set(error, EINVAL,
677 RTE_FLOW_ERROR_TYPE_ITEM,
678 item, "Invalid ether address mask");
682 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
683 rte_flow_error_set(error, EINVAL,
684 RTE_FLOW_ERROR_TYPE_ITEM,
685 item, "Invalid ethertype mask");
689 /* If mask bits of destination MAC address
690 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
692 if (is_broadcast_ether_addr(ð_mask->dst)) {
693 filter->mac_addr = eth_spec->dst;
694 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
696 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
698 filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
700 /* Check if the next non-void item is END. */
701 item = next_no_void_pattern(pattern, item);
702 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
703 rte_flow_error_set(error, EINVAL,
704 RTE_FLOW_ERROR_TYPE_ITEM,
705 item, "Not supported by ethertype filter.");
711 act = next_no_void_action(actions, NULL);
712 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
713 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
714 rte_flow_error_set(error, EINVAL,
715 RTE_FLOW_ERROR_TYPE_ACTION,
716 act, "Not supported action.");
720 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
721 act_q = (const struct rte_flow_action_queue *)act->conf;
722 filter->queue = act_q->index;
724 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
727 /* Check if the next non-void item is END */
728 act = next_no_void_action(actions, act);
729 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
730 rte_flow_error_set(error, EINVAL,
731 RTE_FLOW_ERROR_TYPE_ACTION,
732 act, "Not supported action.");
737 /* Must be input direction */
738 if (!attr->ingress) {
739 rte_flow_error_set(error, EINVAL,
740 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
741 attr, "Only support ingress.");
747 rte_flow_error_set(error, EINVAL,
748 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
749 attr, "Not support egress.");
754 if (attr->priority) {
755 rte_flow_error_set(error, EINVAL,
756 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
757 attr, "Not support priority.");
763 rte_flow_error_set(error, EINVAL,
764 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
765 attr, "Not support group.");
773 ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
774 const struct rte_flow_attr *attr,
775 const struct rte_flow_item pattern[],
776 const struct rte_flow_action actions[],
777 struct rte_eth_ethertype_filter *filter,
778 struct rte_flow_error *error)
781 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
783 MAC_TYPE_FILTER_SUP(hw->mac.type);
785 ret = cons_parse_ethertype_filter(attr, pattern,
786 actions, filter, error);
791 /* Ixgbe doesn't support MAC address. */
792 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
793 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
794 rte_flow_error_set(error, EINVAL,
795 RTE_FLOW_ERROR_TYPE_ITEM,
796 NULL, "Not supported by ethertype filter");
800 if (filter->queue >= dev->data->nb_rx_queues) {
801 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
802 rte_flow_error_set(error, EINVAL,
803 RTE_FLOW_ERROR_TYPE_ITEM,
804 NULL, "queue index much too big");
808 if (filter->ether_type == ETHER_TYPE_IPv4 ||
809 filter->ether_type == ETHER_TYPE_IPv6) {
810 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
811 rte_flow_error_set(error, EINVAL,
812 RTE_FLOW_ERROR_TYPE_ITEM,
813 NULL, "IPv4/IPv6 not supported by ethertype filter");
817 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
818 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
819 rte_flow_error_set(error, EINVAL,
820 RTE_FLOW_ERROR_TYPE_ITEM,
821 NULL, "mac compare is unsupported");
825 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
826 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
827 rte_flow_error_set(error, EINVAL,
828 RTE_FLOW_ERROR_TYPE_ITEM,
829 NULL, "drop option is unsupported");
837 * Parse the rule to see if it is a TCP SYN rule.
838 * And get the TCP SYN filter info BTW.
840 * The first not void item must be ETH.
841 * The second not void item must be IPV4 or IPV6.
842 * The third not void item must be TCP.
843 * The next not void item must be END.
845 * The first not void action should be QUEUE.
846 * The next not void action should be END.
850 * IPV4/IPV6 NULL NULL
851 * TCP tcp_flags 0x02 0xFF
853 * other members in mask and spec should set to 0x00.
854 * item->last should be NULL.
857 cons_parse_syn_filter(const struct rte_flow_attr *attr,
858 const struct rte_flow_item pattern[],
859 const struct rte_flow_action actions[],
860 struct rte_eth_syn_filter *filter,
861 struct rte_flow_error *error)
863 const struct rte_flow_item *item;
864 const struct rte_flow_action *act;
865 const struct rte_flow_item_tcp *tcp_spec;
866 const struct rte_flow_item_tcp *tcp_mask;
867 const struct rte_flow_action_queue *act_q;
870 rte_flow_error_set(error, EINVAL,
871 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
872 NULL, "NULL pattern.");
877 rte_flow_error_set(error, EINVAL,
878 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
879 NULL, "NULL action.");
884 rte_flow_error_set(error, EINVAL,
885 RTE_FLOW_ERROR_TYPE_ATTR,
886 NULL, "NULL attribute.");
891 /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
892 item = next_no_void_pattern(pattern, NULL);
893 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
894 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
895 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
896 item->type != RTE_FLOW_ITEM_TYPE_TCP) {
897 rte_flow_error_set(error, EINVAL,
898 RTE_FLOW_ERROR_TYPE_ITEM,
899 item, "Not supported by syn filter");
902 /*Not supported last point for range*/
904 rte_flow_error_set(error, EINVAL,
905 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
906 item, "Not supported last point for range");
911 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
912 /* if the item is MAC, the content should be NULL */
913 if (item->spec || item->mask) {
914 rte_flow_error_set(error, EINVAL,
915 RTE_FLOW_ERROR_TYPE_ITEM,
916 item, "Invalid SYN address mask");
920 /* check if the next not void item is IPv4 or IPv6 */
921 item = next_no_void_pattern(pattern, item);
922 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
923 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
924 rte_flow_error_set(error, EINVAL,
925 RTE_FLOW_ERROR_TYPE_ITEM,
926 item, "Not supported by syn filter");
932 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
933 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
934 /* if the item is IP, the content should be NULL */
935 if (item->spec || item->mask) {
936 rte_flow_error_set(error, EINVAL,
937 RTE_FLOW_ERROR_TYPE_ITEM,
938 item, "Invalid SYN mask");
942 /* check if the next not void item is TCP */
943 item = next_no_void_pattern(pattern, item);
944 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
945 rte_flow_error_set(error, EINVAL,
946 RTE_FLOW_ERROR_TYPE_ITEM,
947 item, "Not supported by syn filter");
952 /* Get the TCP info. Only support SYN. */
953 if (!item->spec || !item->mask) {
954 rte_flow_error_set(error, EINVAL,
955 RTE_FLOW_ERROR_TYPE_ITEM,
956 item, "Invalid SYN mask");
959 /*Not supported last point for range*/
961 rte_flow_error_set(error, EINVAL,
962 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
963 item, "Not supported last point for range");
967 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
968 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
969 if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
970 tcp_mask->hdr.src_port ||
971 tcp_mask->hdr.dst_port ||
972 tcp_mask->hdr.sent_seq ||
973 tcp_mask->hdr.recv_ack ||
974 tcp_mask->hdr.data_off ||
975 tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
976 tcp_mask->hdr.rx_win ||
977 tcp_mask->hdr.cksum ||
978 tcp_mask->hdr.tcp_urp) {
979 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
980 rte_flow_error_set(error, EINVAL,
981 RTE_FLOW_ERROR_TYPE_ITEM,
982 item, "Not supported by syn filter");
986 /* check if the next not void item is END */
987 item = next_no_void_pattern(pattern, item);
988 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
989 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
990 rte_flow_error_set(error, EINVAL,
991 RTE_FLOW_ERROR_TYPE_ITEM,
992 item, "Not supported by syn filter");
996 /* check if the first not void action is QUEUE. */
997 act = next_no_void_action(actions, NULL);
998 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
999 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1000 rte_flow_error_set(error, EINVAL,
1001 RTE_FLOW_ERROR_TYPE_ACTION,
1002 act, "Not supported action.");
1006 act_q = (const struct rte_flow_action_queue *)act->conf;
1007 filter->queue = act_q->index;
1008 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
1009 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1010 rte_flow_error_set(error, EINVAL,
1011 RTE_FLOW_ERROR_TYPE_ACTION,
1012 act, "Not supported action.");
1016 /* check if the next not void item is END */
1017 act = next_no_void_action(actions, act);
1018 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1019 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1020 rte_flow_error_set(error, EINVAL,
1021 RTE_FLOW_ERROR_TYPE_ACTION,
1022 act, "Not supported action.");
1027 /* must be input direction */
1028 if (!attr->ingress) {
1029 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1030 rte_flow_error_set(error, EINVAL,
1031 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1032 attr, "Only support ingress.");
1038 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1039 rte_flow_error_set(error, EINVAL,
1040 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1041 attr, "Not support egress.");
1045 /* Support 2 priorities, the lowest or highest. */
1046 if (!attr->priority) {
1047 filter->hig_pri = 0;
1048 } else if (attr->priority == (uint32_t)~0U) {
1049 filter->hig_pri = 1;
1051 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1052 rte_flow_error_set(error, EINVAL,
1053 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1054 attr, "Not support priority.");
1062 ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
1063 const struct rte_flow_attr *attr,
1064 const struct rte_flow_item pattern[],
1065 const struct rte_flow_action actions[],
1066 struct rte_eth_syn_filter *filter,
1067 struct rte_flow_error *error)
1070 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1072 MAC_TYPE_FILTER_SUP(hw->mac.type);
1074 ret = cons_parse_syn_filter(attr, pattern,
1075 actions, filter, error);
1077 if (filter->queue >= dev->data->nb_rx_queues)
1087 * Parse the rule to see if it is a L2 tunnel rule.
1088 * And get the L2 tunnel filter info BTW.
1089 * Only support E-tag now.
1091 * The first not void item can be E_TAG.
1092 * The next not void item must be END.
1094 * The first not void action should be QUEUE.
1095 * The next not void action should be END.
1099 e_cid_base 0x309 0xFFF
1101 * other members in mask and spec should set to 0x00.
1102 * item->last should be NULL.
1105 cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
1106 const struct rte_flow_item pattern[],
1107 const struct rte_flow_action actions[],
1108 struct rte_eth_l2_tunnel_conf *filter,
1109 struct rte_flow_error *error)
1111 const struct rte_flow_item *item;
1112 const struct rte_flow_item_e_tag *e_tag_spec;
1113 const struct rte_flow_item_e_tag *e_tag_mask;
1114 const struct rte_flow_action *act;
1115 const struct rte_flow_action_queue *act_q;
1118 rte_flow_error_set(error, EINVAL,
1119 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1120 NULL, "NULL pattern.");
1125 rte_flow_error_set(error, EINVAL,
1126 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1127 NULL, "NULL action.");
1132 rte_flow_error_set(error, EINVAL,
1133 RTE_FLOW_ERROR_TYPE_ATTR,
1134 NULL, "NULL attribute.");
1138 /* The first not void item should be e-tag. */
1139 item = next_no_void_pattern(pattern, NULL);
1140 if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1141 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1142 rte_flow_error_set(error, EINVAL,
1143 RTE_FLOW_ERROR_TYPE_ITEM,
1144 item, "Not supported by L2 tunnel filter");
1148 if (!item->spec || !item->mask) {
1149 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1150 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1151 item, "Not supported by L2 tunnel filter");
1155 /*Not supported last point for range*/
1157 rte_flow_error_set(error, EINVAL,
1158 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1159 item, "Not supported last point for range");
1163 e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
1164 e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
1166 /* Only care about GRP and E cid base. */
1167 if (e_tag_mask->epcp_edei_in_ecid_b ||
1168 e_tag_mask->in_ecid_e ||
1169 e_tag_mask->ecid_e ||
1170 e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1171 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1172 rte_flow_error_set(error, EINVAL,
1173 RTE_FLOW_ERROR_TYPE_ITEM,
1174 item, "Not supported by L2 tunnel filter");
1178 filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1180 * grp and e_cid_base are bit fields and only use 14 bits.
1181 * e-tag id is taken as little endian by HW.
1183 filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1185 /* check if the next not void item is END */
1186 item = next_no_void_pattern(pattern, item);
1187 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1188 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1189 rte_flow_error_set(error, EINVAL,
1190 RTE_FLOW_ERROR_TYPE_ITEM,
1191 item, "Not supported by L2 tunnel filter");
1196 /* must be input direction */
1197 if (!attr->ingress) {
1198 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1199 rte_flow_error_set(error, EINVAL,
1200 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1201 attr, "Only support ingress.");
1207 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1208 rte_flow_error_set(error, EINVAL,
1209 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1210 attr, "Not support egress.");
1215 if (attr->priority) {
1216 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1217 rte_flow_error_set(error, EINVAL,
1218 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1219 attr, "Not support priority.");
1223 /* check if the first not void action is QUEUE. */
1224 act = next_no_void_action(actions, NULL);
1225 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1226 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1227 rte_flow_error_set(error, EINVAL,
1228 RTE_FLOW_ERROR_TYPE_ACTION,
1229 act, "Not supported action.");
1233 act_q = (const struct rte_flow_action_queue *)act->conf;
1234 filter->pool = act_q->index;
1236 /* check if the next not void item is END */
1237 act = next_no_void_action(actions, act);
1238 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1239 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1240 rte_flow_error_set(error, EINVAL,
1241 RTE_FLOW_ERROR_TYPE_ACTION,
1242 act, "Not supported action.");
1250 ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1251 const struct rte_flow_attr *attr,
1252 const struct rte_flow_item pattern[],
1253 const struct rte_flow_action actions[],
1254 struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1255 struct rte_flow_error *error)
1258 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1260 ret = cons_parse_l2_tn_filter(attr, pattern,
1261 actions, l2_tn_filter, error);
1263 if (hw->mac.type != ixgbe_mac_X550 &&
1264 hw->mac.type != ixgbe_mac_X550EM_x &&
1265 hw->mac.type != ixgbe_mac_X550EM_a) {
1266 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1267 rte_flow_error_set(error, EINVAL,
1268 RTE_FLOW_ERROR_TYPE_ITEM,
1269 NULL, "Not supported by L2 tunnel filter");
1273 if (l2_tn_filter->pool >= dev->data->nb_rx_queues)
1279 /* Parse to get the attr and action info of flow director rule. */
1281 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1282 const struct rte_flow_action actions[],
1283 struct ixgbe_fdir_rule *rule,
1284 struct rte_flow_error *error)
1286 const struct rte_flow_action *act;
1287 const struct rte_flow_action_queue *act_q;
1288 const struct rte_flow_action_mark *mark;
1291 /* must be input direction */
1292 if (!attr->ingress) {
1293 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1294 rte_flow_error_set(error, EINVAL,
1295 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1296 attr, "Only support ingress.");
1302 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1303 rte_flow_error_set(error, EINVAL,
1304 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1305 attr, "Not support egress.");
1310 if (attr->priority) {
1311 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1312 rte_flow_error_set(error, EINVAL,
1313 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1314 attr, "Not support priority.");
1318 /* check if the first not void action is QUEUE or DROP. */
1319 act = next_no_void_action(actions, NULL);
1320 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1321 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1322 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1323 rte_flow_error_set(error, EINVAL,
1324 RTE_FLOW_ERROR_TYPE_ACTION,
1325 act, "Not supported action.");
1329 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1330 act_q = (const struct rte_flow_action_queue *)act->conf;
1331 rule->queue = act_q->index;
1333 /* signature mode does not support drop action. */
1334 if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1335 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1336 rte_flow_error_set(error, EINVAL,
1337 RTE_FLOW_ERROR_TYPE_ACTION,
1338 act, "Not supported action.");
1341 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1344 /* check if the next not void item is MARK */
1345 act = next_no_void_action(actions, act);
1346 if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1347 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1348 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1349 rte_flow_error_set(error, EINVAL,
1350 RTE_FLOW_ERROR_TYPE_ACTION,
1351 act, "Not supported action.");
1357 if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1358 mark = (const struct rte_flow_action_mark *)act->conf;
1359 rule->soft_id = mark->id;
1360 act = next_no_void_action(actions, act);
1363 /* check if the next not void item is END */
1364 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1365 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1366 rte_flow_error_set(error, EINVAL,
1367 RTE_FLOW_ERROR_TYPE_ACTION,
1368 act, "Not supported action.");
1375 /* search next no void pattern and skip fuzzy */
1377 const struct rte_flow_item *next_no_fuzzy_pattern(
1378 const struct rte_flow_item pattern[],
1379 const struct rte_flow_item *cur)
1381 const struct rte_flow_item *next =
1382 next_no_void_pattern(pattern, cur);
1384 if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
1386 next = next_no_void_pattern(pattern, next);
1390 static inline uint8_t signature_match(const struct rte_flow_item pattern[])
1392 const struct rte_flow_item_fuzzy *spec, *last, *mask;
1393 const struct rte_flow_item *item;
1394 uint32_t sh, lh, mh;
1399 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1402 if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
1404 (const struct rte_flow_item_fuzzy *)item->spec;
1406 (const struct rte_flow_item_fuzzy *)item->last;
1408 (const struct rte_flow_item_fuzzy *)item->mask;
1437 * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1438 * And get the flow director filter info BTW.
1439 * UDP/TCP/SCTP PATTERN:
1440 * The first not void item can be ETH or IPV4 or IPV6
1441 * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1442 * The next not void item could be UDP or TCP or SCTP (optional)
1443 * The next not void item could be RAW (for flexbyte, optional)
1444 * The next not void item must be END.
1445 * A Fuzzy Match pattern can appear at any place before END.
1446 * Fuzzy Match is optional for IPV4 but is required for IPV6
1448 * The first not void item must be ETH.
1449 * The second not void item must be MAC VLAN.
1450 * The next not void item must be END.
1452 * The first not void action should be QUEUE or DROP.
1453 * The second not void optional action should be MARK,
1454 * mark_id is a uint32_t number.
1455 * The next not void action should be END.
1456 * UDP/TCP/SCTP pattern example:
1459 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
1460 * dst_addr 192.167.3.50 0xFFFFFFFF
1461 * UDP/TCP/SCTP src_port 80 0xFFFF
1462 * dst_port 80 0xFFFF
1463 * FLEX relative 0 0x1
1466 * offset 12 0xFFFFFFFF
1469 * pattern[0] 0x86 0xFF
1470 * pattern[1] 0xDD 0xFF
1472 * MAC VLAN pattern example:
1475 {0xAC, 0x7B, 0xA1, {0xFF, 0xFF, 0xFF,
1476 0x2C, 0x6D, 0x36} 0xFF, 0xFF, 0xFF}
1477 * MAC VLAN tci 0x2016 0xEFFF
1479 * Other members in mask and spec should set to 0x00.
1480 * Item->last should be NULL.
1483 ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
1484 const struct rte_flow_attr *attr,
1485 const struct rte_flow_item pattern[],
1486 const struct rte_flow_action actions[],
1487 struct ixgbe_fdir_rule *rule,
1488 struct rte_flow_error *error)
1490 const struct rte_flow_item *item;
1491 const struct rte_flow_item_eth *eth_spec;
1492 const struct rte_flow_item_eth *eth_mask;
1493 const struct rte_flow_item_ipv4 *ipv4_spec;
1494 const struct rte_flow_item_ipv4 *ipv4_mask;
1495 const struct rte_flow_item_ipv6 *ipv6_spec;
1496 const struct rte_flow_item_ipv6 *ipv6_mask;
1497 const struct rte_flow_item_tcp *tcp_spec;
1498 const struct rte_flow_item_tcp *tcp_mask;
1499 const struct rte_flow_item_udp *udp_spec;
1500 const struct rte_flow_item_udp *udp_mask;
1501 const struct rte_flow_item_sctp *sctp_spec;
1502 const struct rte_flow_item_sctp *sctp_mask;
1503 const struct rte_flow_item_vlan *vlan_spec;
1504 const struct rte_flow_item_vlan *vlan_mask;
1505 const struct rte_flow_item_raw *raw_mask;
1506 const struct rte_flow_item_raw *raw_spec;
1509 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1512 rte_flow_error_set(error, EINVAL,
1513 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1514 NULL, "NULL pattern.");
1519 rte_flow_error_set(error, EINVAL,
1520 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1521 NULL, "NULL action.");
1526 rte_flow_error_set(error, EINVAL,
1527 RTE_FLOW_ERROR_TYPE_ATTR,
1528 NULL, "NULL attribute.");
1533 * Some fields may not be provided. Set spec to 0 and mask to default
1534 * value. So, we need not do anything for the not provided fields later.
1536 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1537 memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1538 rule->mask.vlan_tci_mask = 0;
1539 rule->mask.flex_bytes_mask = 0;
1542 * The first not void item should be
1543 * MAC or IPv4 or TCP or UDP or SCTP.
1545 item = next_no_fuzzy_pattern(pattern, NULL);
1546 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1547 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1548 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1549 item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1550 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1551 item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1552 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1553 rte_flow_error_set(error, EINVAL,
1554 RTE_FLOW_ERROR_TYPE_ITEM,
1555 item, "Not supported by fdir filter");
1559 if (signature_match(pattern))
1560 rule->mode = RTE_FDIR_MODE_SIGNATURE;
1562 rule->mode = RTE_FDIR_MODE_PERFECT;
1564 /*Not supported last point for range*/
1566 rte_flow_error_set(error, EINVAL,
1567 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1568 item, "Not supported last point for range");
1572 /* Get the MAC info. */
1573 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1575 * Only support vlan and dst MAC address,
1576 * others should be masked.
1578 if (item->spec && !item->mask) {
1579 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1580 rte_flow_error_set(error, EINVAL,
1581 RTE_FLOW_ERROR_TYPE_ITEM,
1582 item, "Not supported by fdir filter");
1587 rule->b_spec = TRUE;
1588 eth_spec = (const struct rte_flow_item_eth *)item->spec;
1590 /* Get the dst MAC. */
1591 for (j = 0; j < ETHER_ADDR_LEN; j++) {
1592 rule->ixgbe_fdir.formatted.inner_mac[j] =
1593 eth_spec->dst.addr_bytes[j];
1600 rule->b_mask = TRUE;
1601 eth_mask = (const struct rte_flow_item_eth *)item->mask;
1603 /* Ether type should be masked. */
1604 if (eth_mask->type ||
1605 rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1606 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1607 rte_flow_error_set(error, EINVAL,
1608 RTE_FLOW_ERROR_TYPE_ITEM,
1609 item, "Not supported by fdir filter");
1613 /* If ethernet has meaning, it means MAC VLAN mode. */
1614 rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1617 * src MAC address must be masked,
1618 * and don't support dst MAC address mask.
1620 for (j = 0; j < ETHER_ADDR_LEN; j++) {
1621 if (eth_mask->src.addr_bytes[j] ||
1622 eth_mask->dst.addr_bytes[j] != 0xFF) {
1624 sizeof(struct ixgbe_fdir_rule));
1625 rte_flow_error_set(error, EINVAL,
1626 RTE_FLOW_ERROR_TYPE_ITEM,
1627 item, "Not supported by fdir filter");
1632 /* When no VLAN, considered as full mask. */
1633 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1635 /*** If both spec and mask are item,
1636 * it means don't care about ETH.
1641 * Check if the next not void item is vlan or ipv4.
1642 * IPv6 is not supported.
1644 item = next_no_fuzzy_pattern(pattern, item);
1645 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1646 if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1647 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1648 rte_flow_error_set(error, EINVAL,
1649 RTE_FLOW_ERROR_TYPE_ITEM,
1650 item, "Not supported by fdir filter");
1654 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
1655 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1656 rte_flow_error_set(error, EINVAL,
1657 RTE_FLOW_ERROR_TYPE_ITEM,
1658 item, "Not supported by fdir filter");
1664 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1665 if (!(item->spec && item->mask)) {
1666 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1667 rte_flow_error_set(error, EINVAL,
1668 RTE_FLOW_ERROR_TYPE_ITEM,
1669 item, "Not supported by fdir filter");
1673 /*Not supported last point for range*/
1675 rte_flow_error_set(error, EINVAL,
1676 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1677 item, "Not supported last point for range");
1681 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
1682 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
1684 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1686 rule->mask.vlan_tci_mask = vlan_mask->tci;
1687 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
1688 /* More than one tags are not supported. */
1690 /* Next not void item must be END */
1691 item = next_no_fuzzy_pattern(pattern, item);
1692 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1693 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1694 rte_flow_error_set(error, EINVAL,
1695 RTE_FLOW_ERROR_TYPE_ITEM,
1696 item, "Not supported by fdir filter");
1701 /* Get the IPV4 info. */
1702 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1704 * Set the flow type even if there's no content
1705 * as we must have a flow type.
1707 rule->ixgbe_fdir.formatted.flow_type =
1708 IXGBE_ATR_FLOW_TYPE_IPV4;
1709 /*Not supported last point for range*/
1711 rte_flow_error_set(error, EINVAL,
1712 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1713 item, "Not supported last point for range");
1717 * Only care about src & dst addresses,
1718 * others should be masked.
1721 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1722 rte_flow_error_set(error, EINVAL,
1723 RTE_FLOW_ERROR_TYPE_ITEM,
1724 item, "Not supported by fdir filter");
1727 rule->b_mask = TRUE;
1729 (const struct rte_flow_item_ipv4 *)item->mask;
1730 if (ipv4_mask->hdr.version_ihl ||
1731 ipv4_mask->hdr.type_of_service ||
1732 ipv4_mask->hdr.total_length ||
1733 ipv4_mask->hdr.packet_id ||
1734 ipv4_mask->hdr.fragment_offset ||
1735 ipv4_mask->hdr.time_to_live ||
1736 ipv4_mask->hdr.next_proto_id ||
1737 ipv4_mask->hdr.hdr_checksum) {
1738 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1739 rte_flow_error_set(error, EINVAL,
1740 RTE_FLOW_ERROR_TYPE_ITEM,
1741 item, "Not supported by fdir filter");
1744 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1745 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1748 rule->b_spec = TRUE;
1750 (const struct rte_flow_item_ipv4 *)item->spec;
1751 rule->ixgbe_fdir.formatted.dst_ip[0] =
1752 ipv4_spec->hdr.dst_addr;
1753 rule->ixgbe_fdir.formatted.src_ip[0] =
1754 ipv4_spec->hdr.src_addr;
1758 * Check if the next not void item is
1759 * TCP or UDP or SCTP or END.
1761 item = next_no_fuzzy_pattern(pattern, item);
1762 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1763 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1764 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1765 item->type != RTE_FLOW_ITEM_TYPE_END &&
1766 item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1767 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1768 rte_flow_error_set(error, EINVAL,
1769 RTE_FLOW_ERROR_TYPE_ITEM,
1770 item, "Not supported by fdir filter");
1775 /* Get the IPV6 info. */
1776 if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1778 * Set the flow type even if there's no content
1779 * as we must have a flow type.
1781 rule->ixgbe_fdir.formatted.flow_type =
1782 IXGBE_ATR_FLOW_TYPE_IPV6;
1785 * 1. must signature match
1786 * 2. not support last
1787 * 3. mask must not null
1789 if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
1792 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1793 rte_flow_error_set(error, EINVAL,
1794 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1795 item, "Not supported last point for range");
1799 rule->b_mask = TRUE;
1801 (const struct rte_flow_item_ipv6 *)item->mask;
1802 if (ipv6_mask->hdr.vtc_flow ||
1803 ipv6_mask->hdr.payload_len ||
1804 ipv6_mask->hdr.proto ||
1805 ipv6_mask->hdr.hop_limits) {
1806 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1807 rte_flow_error_set(error, EINVAL,
1808 RTE_FLOW_ERROR_TYPE_ITEM,
1809 item, "Not supported by fdir filter");
1813 /* check src addr mask */
1814 for (j = 0; j < 16; j++) {
1815 if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
1816 rule->mask.src_ipv6_mask |= 1 << j;
1817 } else if (ipv6_mask->hdr.src_addr[j] != 0) {
1818 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1819 rte_flow_error_set(error, EINVAL,
1820 RTE_FLOW_ERROR_TYPE_ITEM,
1821 item, "Not supported by fdir filter");
1826 /* check dst addr mask */
1827 for (j = 0; j < 16; j++) {
1828 if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
1829 rule->mask.dst_ipv6_mask |= 1 << j;
1830 } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
1831 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1832 rte_flow_error_set(error, EINVAL,
1833 RTE_FLOW_ERROR_TYPE_ITEM,
1834 item, "Not supported by fdir filter");
1840 rule->b_spec = TRUE;
1842 (const struct rte_flow_item_ipv6 *)item->spec;
1843 rte_memcpy(rule->ixgbe_fdir.formatted.src_ip,
1844 ipv6_spec->hdr.src_addr, 16);
1845 rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip,
1846 ipv6_spec->hdr.dst_addr, 16);
1850 * Check if the next not void item is
1851 * TCP or UDP or SCTP or END.
1853 item = next_no_fuzzy_pattern(pattern, item);
1854 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1855 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1856 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1857 item->type != RTE_FLOW_ITEM_TYPE_END &&
1858 item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1859 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1860 rte_flow_error_set(error, EINVAL,
1861 RTE_FLOW_ERROR_TYPE_ITEM,
1862 item, "Not supported by fdir filter");
1867 /* Get the TCP info. */
1868 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1870 * Set the flow type even if there's no content
1871 * as we must have a flow type.
1873 rule->ixgbe_fdir.formatted.flow_type |=
1874 IXGBE_ATR_L4TYPE_TCP;
1875 /*Not supported last point for range*/
1877 rte_flow_error_set(error, EINVAL,
1878 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1879 item, "Not supported last point for range");
1883 * Only care about src & dst ports,
1884 * others should be masked.
1887 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1888 rte_flow_error_set(error, EINVAL,
1889 RTE_FLOW_ERROR_TYPE_ITEM,
1890 item, "Not supported by fdir filter");
1893 rule->b_mask = TRUE;
1894 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1895 if (tcp_mask->hdr.sent_seq ||
1896 tcp_mask->hdr.recv_ack ||
1897 tcp_mask->hdr.data_off ||
1898 tcp_mask->hdr.tcp_flags ||
1899 tcp_mask->hdr.rx_win ||
1900 tcp_mask->hdr.cksum ||
1901 tcp_mask->hdr.tcp_urp) {
1902 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1903 rte_flow_error_set(error, EINVAL,
1904 RTE_FLOW_ERROR_TYPE_ITEM,
1905 item, "Not supported by fdir filter");
1908 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1909 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1912 rule->b_spec = TRUE;
1913 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1914 rule->ixgbe_fdir.formatted.src_port =
1915 tcp_spec->hdr.src_port;
1916 rule->ixgbe_fdir.formatted.dst_port =
1917 tcp_spec->hdr.dst_port;
1920 item = next_no_fuzzy_pattern(pattern, item);
1921 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1922 item->type != RTE_FLOW_ITEM_TYPE_END) {
1923 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1924 rte_flow_error_set(error, EINVAL,
1925 RTE_FLOW_ERROR_TYPE_ITEM,
1926 item, "Not supported by fdir filter");
1932 /* Get the UDP info */
1933 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1935 * Set the flow type even if there's no content
1936 * as we must have a flow type.
1938 rule->ixgbe_fdir.formatted.flow_type |=
1939 IXGBE_ATR_L4TYPE_UDP;
1940 /*Not supported last point for range*/
1942 rte_flow_error_set(error, EINVAL,
1943 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1944 item, "Not supported last point for range");
1948 * Only care about src & dst ports,
1949 * others should be masked.
1952 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1953 rte_flow_error_set(error, EINVAL,
1954 RTE_FLOW_ERROR_TYPE_ITEM,
1955 item, "Not supported by fdir filter");
1958 rule->b_mask = TRUE;
1959 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1960 if (udp_mask->hdr.dgram_len ||
1961 udp_mask->hdr.dgram_cksum) {
1962 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1963 rte_flow_error_set(error, EINVAL,
1964 RTE_FLOW_ERROR_TYPE_ITEM,
1965 item, "Not supported by fdir filter");
1968 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1969 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1972 rule->b_spec = TRUE;
1973 udp_spec = (const struct rte_flow_item_udp *)item->spec;
1974 rule->ixgbe_fdir.formatted.src_port =
1975 udp_spec->hdr.src_port;
1976 rule->ixgbe_fdir.formatted.dst_port =
1977 udp_spec->hdr.dst_port;
1980 item = next_no_fuzzy_pattern(pattern, item);
1981 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1982 item->type != RTE_FLOW_ITEM_TYPE_END) {
1983 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1984 rte_flow_error_set(error, EINVAL,
1985 RTE_FLOW_ERROR_TYPE_ITEM,
1986 item, "Not supported by fdir filter");
1992 /* Get the SCTP info */
1993 if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
1995 * Set the flow type even if there's no content
1996 * as we must have a flow type.
1998 rule->ixgbe_fdir.formatted.flow_type |=
1999 IXGBE_ATR_L4TYPE_SCTP;
2000 /*Not supported last point for range*/
2002 rte_flow_error_set(error, EINVAL,
2003 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2004 item, "Not supported last point for range");
2008 /* only x550 family only support sctp port */
2009 if (hw->mac.type == ixgbe_mac_X550 ||
2010 hw->mac.type == ixgbe_mac_X550EM_x ||
2011 hw->mac.type == ixgbe_mac_X550EM_a) {
2013 * Only care about src & dst ports,
2014 * others should be masked.
2017 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2018 rte_flow_error_set(error, EINVAL,
2019 RTE_FLOW_ERROR_TYPE_ITEM,
2020 item, "Not supported by fdir filter");
2023 rule->b_mask = TRUE;
2025 (const struct rte_flow_item_sctp *)item->mask;
2026 if (sctp_mask->hdr.tag ||
2027 sctp_mask->hdr.cksum) {
2028 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2029 rte_flow_error_set(error, EINVAL,
2030 RTE_FLOW_ERROR_TYPE_ITEM,
2031 item, "Not supported by fdir filter");
2034 rule->mask.src_port_mask = sctp_mask->hdr.src_port;
2035 rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
2038 rule->b_spec = TRUE;
2040 (const struct rte_flow_item_sctp *)item->spec;
2041 rule->ixgbe_fdir.formatted.src_port =
2042 sctp_spec->hdr.src_port;
2043 rule->ixgbe_fdir.formatted.dst_port =
2044 sctp_spec->hdr.dst_port;
2046 /* others even sctp port is not supported */
2049 (const struct rte_flow_item_sctp *)item->mask;
2051 (sctp_mask->hdr.src_port ||
2052 sctp_mask->hdr.dst_port ||
2053 sctp_mask->hdr.tag ||
2054 sctp_mask->hdr.cksum)) {
2055 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2056 rte_flow_error_set(error, EINVAL,
2057 RTE_FLOW_ERROR_TYPE_ITEM,
2058 item, "Not supported by fdir filter");
2063 item = next_no_fuzzy_pattern(pattern, item);
2064 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
2065 item->type != RTE_FLOW_ITEM_TYPE_END) {
2066 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2067 rte_flow_error_set(error, EINVAL,
2068 RTE_FLOW_ERROR_TYPE_ITEM,
2069 item, "Not supported by fdir filter");
2074 /* Get the flex byte info */
2075 if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
2076 /* Not supported last point for range*/
2078 rte_flow_error_set(error, EINVAL,
2079 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2080 item, "Not supported last point for range");
2083 /* mask should not be null */
2084 if (!item->mask || !item->spec) {
2085 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2086 rte_flow_error_set(error, EINVAL,
2087 RTE_FLOW_ERROR_TYPE_ITEM,
2088 item, "Not supported by fdir filter");
2092 raw_mask = (const struct rte_flow_item_raw *)item->mask;
2095 if (raw_mask->relative != 0x1 ||
2096 raw_mask->search != 0x1 ||
2097 raw_mask->reserved != 0x0 ||
2098 (uint32_t)raw_mask->offset != 0xffffffff ||
2099 raw_mask->limit != 0xffff ||
2100 raw_mask->length != 0xffff) {
2101 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2102 rte_flow_error_set(error, EINVAL,
2103 RTE_FLOW_ERROR_TYPE_ITEM,
2104 item, "Not supported by fdir filter");
2108 raw_spec = (const struct rte_flow_item_raw *)item->spec;
2111 if (raw_spec->relative != 0 ||
2112 raw_spec->search != 0 ||
2113 raw_spec->reserved != 0 ||
2114 raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
2115 raw_spec->offset % 2 ||
2116 raw_spec->limit != 0 ||
2117 raw_spec->length != 2 ||
2118 /* pattern can't be 0xffff */
2119 (raw_spec->pattern[0] == 0xff &&
2120 raw_spec->pattern[1] == 0xff)) {
2121 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2122 rte_flow_error_set(error, EINVAL,
2123 RTE_FLOW_ERROR_TYPE_ITEM,
2124 item, "Not supported by fdir filter");
2128 /* check pattern mask */
2129 if (raw_mask->pattern[0] != 0xff ||
2130 raw_mask->pattern[1] != 0xff) {
2131 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2132 rte_flow_error_set(error, EINVAL,
2133 RTE_FLOW_ERROR_TYPE_ITEM,
2134 item, "Not supported by fdir filter");
2138 rule->mask.flex_bytes_mask = 0xffff;
2139 rule->ixgbe_fdir.formatted.flex_bytes =
2140 (((uint16_t)raw_spec->pattern[1]) << 8) |
2141 raw_spec->pattern[0];
2142 rule->flex_bytes_offset = raw_spec->offset;
2145 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2146 /* check if the next not void item is END */
2147 item = next_no_fuzzy_pattern(pattern, item);
2148 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2149 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2150 rte_flow_error_set(error, EINVAL,
2151 RTE_FLOW_ERROR_TYPE_ITEM,
2152 item, "Not supported by fdir filter");
2157 return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2160 #define NVGRE_PROTOCOL 0x6558
2163 * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
2164 * And get the flow director filter info BTW.
2166 * The first not void item must be ETH.
2167 * The second not void item must be IPV4/ IPV6.
2168 * The third not void item must be NVGRE.
2169 * The next not void item must be END.
2171 * The first not void item must be ETH.
2172 * The second not void item must be IPV4/ IPV6.
2173 * The third not void item must be NVGRE.
2174 * The next not void item must be END.
2176 * The first not void action should be QUEUE or DROP.
2177 * The second not void optional action should be MARK,
2178 * mark_id is a uint32_t number.
2179 * The next not void action should be END.
2180 * VxLAN pattern example:
2183 * IPV4/IPV6 NULL NULL
2185 * VxLAN vni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
2186 * MAC VLAN tci 0x2016 0xEFFF
2188 * NEGRV pattern example:
2191 * IPV4/IPV6 NULL NULL
2192 * NVGRE protocol 0x6558 0xFFFF
2193 * tni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
2194 * MAC VLAN tci 0x2016 0xEFFF
2196 * other members in mask and spec should set to 0x00.
2197 * item->last should be NULL.
2200 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
2201 const struct rte_flow_item pattern[],
2202 const struct rte_flow_action actions[],
2203 struct ixgbe_fdir_rule *rule,
2204 struct rte_flow_error *error)
2206 const struct rte_flow_item *item;
2207 const struct rte_flow_item_vxlan *vxlan_spec;
2208 const struct rte_flow_item_vxlan *vxlan_mask;
2209 const struct rte_flow_item_nvgre *nvgre_spec;
2210 const struct rte_flow_item_nvgre *nvgre_mask;
2211 const struct rte_flow_item_eth *eth_spec;
2212 const struct rte_flow_item_eth *eth_mask;
2213 const struct rte_flow_item_vlan *vlan_spec;
2214 const struct rte_flow_item_vlan *vlan_mask;
2218 rte_flow_error_set(error, EINVAL,
2219 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2220 NULL, "NULL pattern.");
2225 rte_flow_error_set(error, EINVAL,
2226 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2227 NULL, "NULL action.");
2232 rte_flow_error_set(error, EINVAL,
2233 RTE_FLOW_ERROR_TYPE_ATTR,
2234 NULL, "NULL attribute.");
2239 * Some fields may not be provided. Set spec to 0 and mask to default
2240 * value. So, we need not do anything for the not provided fields later.
2242 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2243 memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
2244 rule->mask.vlan_tci_mask = 0;
2247 * The first not void item should be
2248 * MAC or IPv4 or IPv6 or UDP or VxLAN.
2250 item = next_no_void_pattern(pattern, NULL);
2251 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
2252 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2253 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
2254 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2255 item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
2256 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2257 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2258 rte_flow_error_set(error, EINVAL,
2259 RTE_FLOW_ERROR_TYPE_ITEM,
2260 item, "Not supported by fdir filter");
2264 rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
2267 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
2268 /* Only used to describe the protocol stack. */
2269 if (item->spec || item->mask) {
2270 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2271 rte_flow_error_set(error, EINVAL,
2272 RTE_FLOW_ERROR_TYPE_ITEM,
2273 item, "Not supported by fdir filter");
2276 /* Not supported last point for range*/
2278 rte_flow_error_set(error, EINVAL,
2279 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2280 item, "Not supported last point for range");
2284 /* Check if the next not void item is IPv4 or IPv6. */
2285 item = next_no_void_pattern(pattern, item);
2286 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2287 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2288 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2289 rte_flow_error_set(error, EINVAL,
2290 RTE_FLOW_ERROR_TYPE_ITEM,
2291 item, "Not supported by fdir filter");
2297 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2298 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2299 /* Only used to describe the protocol stack. */
2300 if (item->spec || item->mask) {
2301 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2302 rte_flow_error_set(error, EINVAL,
2303 RTE_FLOW_ERROR_TYPE_ITEM,
2304 item, "Not supported by fdir filter");
2307 /*Not supported last point for range*/
2309 rte_flow_error_set(error, EINVAL,
2310 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2311 item, "Not supported last point for range");
2315 /* Check if the next not void item is UDP or NVGRE. */
2316 item = next_no_void_pattern(pattern, item);
2317 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2318 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2319 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2320 rte_flow_error_set(error, EINVAL,
2321 RTE_FLOW_ERROR_TYPE_ITEM,
2322 item, "Not supported by fdir filter");
2328 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2329 /* Only used to describe the protocol stack. */
2330 if (item->spec || item->mask) {
2331 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2332 rte_flow_error_set(error, EINVAL,
2333 RTE_FLOW_ERROR_TYPE_ITEM,
2334 item, "Not supported by fdir filter");
2337 /*Not supported last point for range*/
2339 rte_flow_error_set(error, EINVAL,
2340 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2341 item, "Not supported last point for range");
2345 /* Check if the next not void item is VxLAN. */
2346 item = next_no_void_pattern(pattern, item);
2347 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2348 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2349 rte_flow_error_set(error, EINVAL,
2350 RTE_FLOW_ERROR_TYPE_ITEM,
2351 item, "Not supported by fdir filter");
2356 /* Get the VxLAN info */
2357 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2358 rule->ixgbe_fdir.formatted.tunnel_type =
2359 RTE_FDIR_TUNNEL_TYPE_VXLAN;
2361 /* Only care about VNI, others should be masked. */
2363 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2364 rte_flow_error_set(error, EINVAL,
2365 RTE_FLOW_ERROR_TYPE_ITEM,
2366 item, "Not supported by fdir filter");
2369 /*Not supported last point for range*/
2371 rte_flow_error_set(error, EINVAL,
2372 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2373 item, "Not supported last point for range");
2376 rule->b_mask = TRUE;
2378 /* Tunnel type is always meaningful. */
2379 rule->mask.tunnel_type_mask = 1;
2382 (const struct rte_flow_item_vxlan *)item->mask;
2383 if (vxlan_mask->flags) {
2384 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2385 rte_flow_error_set(error, EINVAL,
2386 RTE_FLOW_ERROR_TYPE_ITEM,
2387 item, "Not supported by fdir filter");
2390 /* VNI must be totally masked or not. */
2391 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2392 vxlan_mask->vni[2]) &&
2393 ((vxlan_mask->vni[0] != 0xFF) ||
2394 (vxlan_mask->vni[1] != 0xFF) ||
2395 (vxlan_mask->vni[2] != 0xFF))) {
2396 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2397 rte_flow_error_set(error, EINVAL,
2398 RTE_FLOW_ERROR_TYPE_ITEM,
2399 item, "Not supported by fdir filter");
2403 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2404 RTE_DIM(vxlan_mask->vni));
2407 rule->b_spec = TRUE;
2408 vxlan_spec = (const struct rte_flow_item_vxlan *)
2410 rte_memcpy(((uint8_t *)
2411 &rule->ixgbe_fdir.formatted.tni_vni + 1),
2412 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2413 rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
2414 rule->ixgbe_fdir.formatted.tni_vni);
2418 /* Get the NVGRE info */
2419 if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2420 rule->ixgbe_fdir.formatted.tunnel_type =
2421 RTE_FDIR_TUNNEL_TYPE_NVGRE;
2424 * Only care about flags0, flags1, protocol and TNI,
2425 * others should be masked.
2428 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2429 rte_flow_error_set(error, EINVAL,
2430 RTE_FLOW_ERROR_TYPE_ITEM,
2431 item, "Not supported by fdir filter");
2434 /*Not supported last point for range*/
2436 rte_flow_error_set(error, EINVAL,
2437 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2438 item, "Not supported last point for range");
2441 rule->b_mask = TRUE;
2443 /* Tunnel type is always meaningful. */
2444 rule->mask.tunnel_type_mask = 1;
2447 (const struct rte_flow_item_nvgre *)item->mask;
2448 if (nvgre_mask->flow_id) {
2449 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2450 rte_flow_error_set(error, EINVAL,
2451 RTE_FLOW_ERROR_TYPE_ITEM,
2452 item, "Not supported by fdir filter");
2455 if (nvgre_mask->c_k_s_rsvd0_ver !=
2456 rte_cpu_to_be_16(0x3000) ||
2457 nvgre_mask->protocol != 0xFFFF) {
2458 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2459 rte_flow_error_set(error, EINVAL,
2460 RTE_FLOW_ERROR_TYPE_ITEM,
2461 item, "Not supported by fdir filter");
2464 /* TNI must be totally masked or not. */
2465 if (nvgre_mask->tni[0] &&
2466 ((nvgre_mask->tni[0] != 0xFF) ||
2467 (nvgre_mask->tni[1] != 0xFF) ||
2468 (nvgre_mask->tni[2] != 0xFF))) {
2469 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2470 rte_flow_error_set(error, EINVAL,
2471 RTE_FLOW_ERROR_TYPE_ITEM,
2472 item, "Not supported by fdir filter");
2475 /* tni is a 24-bits bit field */
2476 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2477 RTE_DIM(nvgre_mask->tni));
2478 rule->mask.tunnel_id_mask <<= 8;
2481 rule->b_spec = TRUE;
2483 (const struct rte_flow_item_nvgre *)item->spec;
2484 if (nvgre_spec->c_k_s_rsvd0_ver !=
2485 rte_cpu_to_be_16(0x2000) ||
2486 nvgre_spec->protocol !=
2487 rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2488 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2489 rte_flow_error_set(error, EINVAL,
2490 RTE_FLOW_ERROR_TYPE_ITEM,
2491 item, "Not supported by fdir filter");
2494 /* tni is a 24-bits bit field */
2495 rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2496 nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2497 rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2501 /* check if the next not void item is MAC */
2502 item = next_no_void_pattern(pattern, item);
2503 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2504 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2505 rte_flow_error_set(error, EINVAL,
2506 RTE_FLOW_ERROR_TYPE_ITEM,
2507 item, "Not supported by fdir filter");
2512 * Only support vlan and dst MAC address,
2513 * others should be masked.
2517 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2518 rte_flow_error_set(error, EINVAL,
2519 RTE_FLOW_ERROR_TYPE_ITEM,
2520 item, "Not supported by fdir filter");
2523 /*Not supported last point for range*/
2525 rte_flow_error_set(error, EINVAL,
2526 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2527 item, "Not supported last point for range");
2530 rule->b_mask = TRUE;
2531 eth_mask = (const struct rte_flow_item_eth *)item->mask;
2533 /* Ether type should be masked. */
2534 if (eth_mask->type) {
2535 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2536 rte_flow_error_set(error, EINVAL,
2537 RTE_FLOW_ERROR_TYPE_ITEM,
2538 item, "Not supported by fdir filter");
2542 /* src MAC address should be masked. */
2543 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2544 if (eth_mask->src.addr_bytes[j]) {
2546 sizeof(struct ixgbe_fdir_rule));
2547 rte_flow_error_set(error, EINVAL,
2548 RTE_FLOW_ERROR_TYPE_ITEM,
2549 item, "Not supported by fdir filter");
2553 rule->mask.mac_addr_byte_mask = 0;
2554 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2555 /* It's a per byte mask. */
2556 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2557 rule->mask.mac_addr_byte_mask |= 0x1 << j;
2558 } else if (eth_mask->dst.addr_bytes[j]) {
2559 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2560 rte_flow_error_set(error, EINVAL,
2561 RTE_FLOW_ERROR_TYPE_ITEM,
2562 item, "Not supported by fdir filter");
2567 /* When no vlan, considered as full mask. */
2568 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2571 rule->b_spec = TRUE;
2572 eth_spec = (const struct rte_flow_item_eth *)item->spec;
2574 /* Get the dst MAC. */
2575 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2576 rule->ixgbe_fdir.formatted.inner_mac[j] =
2577 eth_spec->dst.addr_bytes[j];
2582 * Check if the next not void item is vlan or ipv4.
2583 * IPv6 is not supported.
2585 item = next_no_void_pattern(pattern, item);
2586 if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2587 (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
2588 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2589 rte_flow_error_set(error, EINVAL,
2590 RTE_FLOW_ERROR_TYPE_ITEM,
2591 item, "Not supported by fdir filter");
2594 /*Not supported last point for range*/
2596 rte_flow_error_set(error, EINVAL,
2597 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2598 item, "Not supported last point for range");
2602 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2603 if (!(item->spec && item->mask)) {
2604 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2605 rte_flow_error_set(error, EINVAL,
2606 RTE_FLOW_ERROR_TYPE_ITEM,
2607 item, "Not supported by fdir filter");
2611 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
2612 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
2614 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2616 rule->mask.vlan_tci_mask = vlan_mask->tci;
2617 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
2618 /* More than one tags are not supported. */
2620 /* check if the next not void item is END */
2621 item = next_no_void_pattern(pattern, item);
2623 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2624 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2625 rte_flow_error_set(error, EINVAL,
2626 RTE_FLOW_ERROR_TYPE_ITEM,
2627 item, "Not supported by fdir filter");
2633 * If the tags is 0, it means don't care about the VLAN.
2637 return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2641 ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2642 const struct rte_flow_attr *attr,
2643 const struct rte_flow_item pattern[],
2644 const struct rte_flow_action actions[],
2645 struct ixgbe_fdir_rule *rule,
2646 struct rte_flow_error *error)
2649 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2650 enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2652 if (hw->mac.type != ixgbe_mac_82599EB &&
2653 hw->mac.type != ixgbe_mac_X540 &&
2654 hw->mac.type != ixgbe_mac_X550 &&
2655 hw->mac.type != ixgbe_mac_X550EM_x &&
2656 hw->mac.type != ixgbe_mac_X550EM_a)
2659 ret = ixgbe_parse_fdir_filter_normal(dev, attr, pattern,
2660 actions, rule, error);
2665 ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2666 actions, rule, error);
2673 if (hw->mac.type == ixgbe_mac_82599EB &&
2674 rule->fdirflags == IXGBE_FDIRCMD_DROP &&
2675 (rule->ixgbe_fdir.formatted.src_port != 0 ||
2676 rule->ixgbe_fdir.formatted.dst_port != 0))
2679 if (fdir_mode == RTE_FDIR_MODE_NONE ||
2680 fdir_mode != rule->mode)
2683 if (rule->queue >= dev->data->nb_rx_queues)
2690 ixgbe_filterlist_init(void)
2692 TAILQ_INIT(&filter_ntuple_list);
2693 TAILQ_INIT(&filter_ethertype_list);
2694 TAILQ_INIT(&filter_syn_list);
2695 TAILQ_INIT(&filter_fdir_list);
2696 TAILQ_INIT(&filter_l2_tunnel_list);
2697 TAILQ_INIT(&ixgbe_flow_list);
2701 ixgbe_filterlist_flush(void)
2703 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2704 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2705 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2706 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2707 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2708 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2710 while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2711 TAILQ_REMOVE(&filter_ntuple_list,
2714 rte_free(ntuple_filter_ptr);
2717 while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2718 TAILQ_REMOVE(&filter_ethertype_list,
2719 ethertype_filter_ptr,
2721 rte_free(ethertype_filter_ptr);
2724 while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2725 TAILQ_REMOVE(&filter_syn_list,
2728 rte_free(syn_filter_ptr);
2731 while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2732 TAILQ_REMOVE(&filter_l2_tunnel_list,
2735 rte_free(l2_tn_filter_ptr);
2738 while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2739 TAILQ_REMOVE(&filter_fdir_list,
2742 rte_free(fdir_rule_ptr);
2745 while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
2746 TAILQ_REMOVE(&ixgbe_flow_list,
2749 rte_free(ixgbe_flow_mem_ptr->flow);
2750 rte_free(ixgbe_flow_mem_ptr);
2755 * Create or destroy a flow rule.
2756 * Theorically one rule can match more than one filters.
2757 * We will let it use the filter which it hitt first.
2758 * So, the sequence matters.
2760 static struct rte_flow *
2761 ixgbe_flow_create(struct rte_eth_dev *dev,
2762 const struct rte_flow_attr *attr,
2763 const struct rte_flow_item pattern[],
2764 const struct rte_flow_action actions[],
2765 struct rte_flow_error *error)
2768 struct rte_eth_ntuple_filter ntuple_filter;
2769 struct rte_eth_ethertype_filter ethertype_filter;
2770 struct rte_eth_syn_filter syn_filter;
2771 struct ixgbe_fdir_rule fdir_rule;
2772 struct rte_eth_l2_tunnel_conf l2_tn_filter;
2773 struct ixgbe_hw_fdir_info *fdir_info =
2774 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2775 struct rte_flow *flow = NULL;
2776 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2777 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2778 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2779 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2780 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2781 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2782 uint8_t first_mask = FALSE;
2784 flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
2786 PMD_DRV_LOG(ERR, "failed to allocate memory");
2787 return (struct rte_flow *)flow;
2789 ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
2790 sizeof(struct ixgbe_flow_mem), 0);
2791 if (!ixgbe_flow_mem_ptr) {
2792 PMD_DRV_LOG(ERR, "failed to allocate memory");
2796 ixgbe_flow_mem_ptr->flow = flow;
2797 TAILQ_INSERT_TAIL(&ixgbe_flow_list,
2798 ixgbe_flow_mem_ptr, entries);
2800 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2801 ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2802 actions, &ntuple_filter, error);
2804 /* ESP flow not really a flow*/
2805 if (ntuple_filter.proto == IPPROTO_ESP)
2809 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2811 ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
2812 sizeof(struct ixgbe_ntuple_filter_ele), 0);
2813 if (!ntuple_filter_ptr) {
2814 PMD_DRV_LOG(ERR, "failed to allocate memory");
2817 rte_memcpy(&ntuple_filter_ptr->filter_info,
2819 sizeof(struct rte_eth_ntuple_filter));
2820 TAILQ_INSERT_TAIL(&filter_ntuple_list,
2821 ntuple_filter_ptr, entries);
2822 flow->rule = ntuple_filter_ptr;
2823 flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2829 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2830 ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2831 actions, ðertype_filter, error);
2833 ret = ixgbe_add_del_ethertype_filter(dev,
2834 ðertype_filter, TRUE);
2836 ethertype_filter_ptr = rte_zmalloc(
2837 "ixgbe_ethertype_filter",
2838 sizeof(struct ixgbe_ethertype_filter_ele), 0);
2839 if (!ethertype_filter_ptr) {
2840 PMD_DRV_LOG(ERR, "failed to allocate memory");
2843 rte_memcpy(ðertype_filter_ptr->filter_info,
2845 sizeof(struct rte_eth_ethertype_filter));
2846 TAILQ_INSERT_TAIL(&filter_ethertype_list,
2847 ethertype_filter_ptr, entries);
2848 flow->rule = ethertype_filter_ptr;
2849 flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2855 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2856 ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2857 actions, &syn_filter, error);
2859 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
2861 syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
2862 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
2863 if (!syn_filter_ptr) {
2864 PMD_DRV_LOG(ERR, "failed to allocate memory");
2867 rte_memcpy(&syn_filter_ptr->filter_info,
2869 sizeof(struct rte_eth_syn_filter));
2870 TAILQ_INSERT_TAIL(&filter_syn_list,
2873 flow->rule = syn_filter_ptr;
2874 flow->filter_type = RTE_ETH_FILTER_SYN;
2880 memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2881 ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2882 actions, &fdir_rule, error);
2884 /* A mask cannot be deleted. */
2885 if (fdir_rule.b_mask) {
2886 if (!fdir_info->mask_added) {
2887 /* It's the first time the mask is set. */
2888 rte_memcpy(&fdir_info->mask,
2890 sizeof(struct ixgbe_hw_fdir_mask));
2891 fdir_info->flex_bytes_offset =
2892 fdir_rule.flex_bytes_offset;
2894 if (fdir_rule.mask.flex_bytes_mask)
2895 ixgbe_fdir_set_flexbytes_offset(dev,
2896 fdir_rule.flex_bytes_offset);
2898 ret = ixgbe_fdir_set_input_mask(dev);
2902 fdir_info->mask_added = TRUE;
2906 * Only support one global mask,
2907 * all the masks should be the same.
2909 ret = memcmp(&fdir_info->mask,
2911 sizeof(struct ixgbe_hw_fdir_mask));
2915 if (fdir_info->flex_bytes_offset !=
2916 fdir_rule.flex_bytes_offset)
2921 if (fdir_rule.b_spec) {
2922 ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
2925 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
2926 sizeof(struct ixgbe_fdir_rule_ele), 0);
2927 if (!fdir_rule_ptr) {
2928 PMD_DRV_LOG(ERR, "failed to allocate memory");
2931 rte_memcpy(&fdir_rule_ptr->filter_info,
2933 sizeof(struct ixgbe_fdir_rule));
2934 TAILQ_INSERT_TAIL(&filter_fdir_list,
2935 fdir_rule_ptr, entries);
2936 flow->rule = fdir_rule_ptr;
2937 flow->filter_type = RTE_ETH_FILTER_FDIR;
2944 * clean the mask_added flag if fail to
2948 fdir_info->mask_added = FALSE;
2956 memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2957 ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2958 actions, &l2_tn_filter, error);
2960 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
2962 l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
2963 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
2964 if (!l2_tn_filter_ptr) {
2965 PMD_DRV_LOG(ERR, "failed to allocate memory");
2968 rte_memcpy(&l2_tn_filter_ptr->filter_info,
2970 sizeof(struct rte_eth_l2_tunnel_conf));
2971 TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
2972 l2_tn_filter_ptr, entries);
2973 flow->rule = l2_tn_filter_ptr;
2974 flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
2980 TAILQ_REMOVE(&ixgbe_flow_list,
2981 ixgbe_flow_mem_ptr, entries);
2982 rte_flow_error_set(error, -ret,
2983 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2984 "Failed to create flow.");
2985 rte_free(ixgbe_flow_mem_ptr);
2991 * Check if the flow rule is supported by ixgbe.
2992 * It only checkes the format. Don't guarantee the rule can be programmed into
2993 * the HW. Because there can be no enough room for the rule.
2996 ixgbe_flow_validate(struct rte_eth_dev *dev,
2997 const struct rte_flow_attr *attr,
2998 const struct rte_flow_item pattern[],
2999 const struct rte_flow_action actions[],
3000 struct rte_flow_error *error)
3002 struct rte_eth_ntuple_filter ntuple_filter;
3003 struct rte_eth_ethertype_filter ethertype_filter;
3004 struct rte_eth_syn_filter syn_filter;
3005 struct rte_eth_l2_tunnel_conf l2_tn_filter;
3006 struct ixgbe_fdir_rule fdir_rule;
3009 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
3010 ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
3011 actions, &ntuple_filter, error);
3015 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
3016 ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
3017 actions, ðertype_filter, error);
3021 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
3022 ret = ixgbe_parse_syn_filter(dev, attr, pattern,
3023 actions, &syn_filter, error);
3027 memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
3028 ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
3029 actions, &fdir_rule, error);
3033 memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
3034 ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
3035 actions, &l2_tn_filter, error);
3040 /* Destroy a flow rule on ixgbe. */
3042 ixgbe_flow_destroy(struct rte_eth_dev *dev,
3043 struct rte_flow *flow,
3044 struct rte_flow_error *error)
3047 struct rte_flow *pmd_flow = flow;
3048 enum rte_filter_type filter_type = pmd_flow->filter_type;
3049 struct rte_eth_ntuple_filter ntuple_filter;
3050 struct rte_eth_ethertype_filter ethertype_filter;
3051 struct rte_eth_syn_filter syn_filter;
3052 struct ixgbe_fdir_rule fdir_rule;
3053 struct rte_eth_l2_tunnel_conf l2_tn_filter;
3054 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
3055 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
3056 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
3057 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
3058 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
3059 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
3060 struct ixgbe_hw_fdir_info *fdir_info =
3061 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
3063 switch (filter_type) {
3064 case RTE_ETH_FILTER_NTUPLE:
3065 ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
3067 rte_memcpy(&ntuple_filter,
3068 &ntuple_filter_ptr->filter_info,
3069 sizeof(struct rte_eth_ntuple_filter));
3070 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
3072 TAILQ_REMOVE(&filter_ntuple_list,
3073 ntuple_filter_ptr, entries);
3074 rte_free(ntuple_filter_ptr);
3077 case RTE_ETH_FILTER_ETHERTYPE:
3078 ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
3080 rte_memcpy(ðertype_filter,
3081 ðertype_filter_ptr->filter_info,
3082 sizeof(struct rte_eth_ethertype_filter));
3083 ret = ixgbe_add_del_ethertype_filter(dev,
3084 ðertype_filter, FALSE);
3086 TAILQ_REMOVE(&filter_ethertype_list,
3087 ethertype_filter_ptr, entries);
3088 rte_free(ethertype_filter_ptr);
3091 case RTE_ETH_FILTER_SYN:
3092 syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
3094 rte_memcpy(&syn_filter,
3095 &syn_filter_ptr->filter_info,
3096 sizeof(struct rte_eth_syn_filter));
3097 ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
3099 TAILQ_REMOVE(&filter_syn_list,
3100 syn_filter_ptr, entries);
3101 rte_free(syn_filter_ptr);
3104 case RTE_ETH_FILTER_FDIR:
3105 fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
3106 rte_memcpy(&fdir_rule,
3107 &fdir_rule_ptr->filter_info,
3108 sizeof(struct ixgbe_fdir_rule));
3109 ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
3111 TAILQ_REMOVE(&filter_fdir_list,
3112 fdir_rule_ptr, entries);
3113 rte_free(fdir_rule_ptr);
3114 if (TAILQ_EMPTY(&filter_fdir_list))
3115 fdir_info->mask_added = false;
3118 case RTE_ETH_FILTER_L2_TUNNEL:
3119 l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
3121 rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
3122 sizeof(struct rte_eth_l2_tunnel_conf));
3123 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
3125 TAILQ_REMOVE(&filter_l2_tunnel_list,
3126 l2_tn_filter_ptr, entries);
3127 rte_free(l2_tn_filter_ptr);
3131 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
3138 rte_flow_error_set(error, EINVAL,
3139 RTE_FLOW_ERROR_TYPE_HANDLE,
3140 NULL, "Failed to destroy flow");
3144 TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
3145 if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
3146 TAILQ_REMOVE(&ixgbe_flow_list,
3147 ixgbe_flow_mem_ptr, entries);
3148 rte_free(ixgbe_flow_mem_ptr);
3156 /* Destroy all flow rules associated with a port on ixgbe. */
3158 ixgbe_flow_flush(struct rte_eth_dev *dev,
3159 struct rte_flow_error *error)
3163 ixgbe_clear_all_ntuple_filter(dev);
3164 ixgbe_clear_all_ethertype_filter(dev);
3165 ixgbe_clear_syn_filter(dev);
3167 ret = ixgbe_clear_all_fdir_filter(dev);
3169 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3170 NULL, "Failed to flush rule");
3174 ret = ixgbe_clear_all_l2_tn_filter(dev);
3176 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3177 NULL, "Failed to flush rule");
3181 ixgbe_filterlist_flush();
3186 const struct rte_flow_ops ixgbe_flow_ops = {
3187 .validate = ixgbe_flow_validate,
3188 .create = ixgbe_flow_create,
3189 .destroy = ixgbe_flow_destroy,
3190 .flush = ixgbe_flow_flush,