1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
13 #include <netinet/in.h>
14 #include <rte_byteorder.h>
15 #include <rte_common.h>
16 #include <rte_cycles.h>
18 #include <rte_interrupts.h>
20 #include <rte_debug.h>
22 #include <rte_atomic.h>
23 #include <rte_branch_prediction.h>
24 #include <rte_memory.h>
26 #include <rte_alarm.h>
27 #include <rte_ether.h>
28 #include <rte_ethdev.h>
29 #include <rte_malloc.h>
30 #include <rte_random.h>
32 #include <rte_hash_crc.h>
34 #include <rte_flow_driver.h>
36 #include "ixgbe_logs.h"
37 #include "base/ixgbe_api.h"
38 #include "base/ixgbe_vf.h"
39 #include "base/ixgbe_common.h"
40 #include "ixgbe_ethdev.h"
41 #include "ixgbe_bypass.h"
42 #include "ixgbe_rxtx.h"
43 #include "base/ixgbe_type.h"
44 #include "base/ixgbe_phy.h"
45 #include "rte_pmd_ixgbe.h"
48 #define IXGBE_MIN_N_TUPLE_PRIO 1
49 #define IXGBE_MAX_N_TUPLE_PRIO 7
50 #define IXGBE_MAX_FLX_SOURCE_OFF 62
52 /* ntuple filter list structure */
53 struct ixgbe_ntuple_filter_ele {
54 TAILQ_ENTRY(ixgbe_ntuple_filter_ele) entries;
55 struct rte_eth_ntuple_filter filter_info;
57 /* ethertype filter list structure */
58 struct ixgbe_ethertype_filter_ele {
59 TAILQ_ENTRY(ixgbe_ethertype_filter_ele) entries;
60 struct rte_eth_ethertype_filter filter_info;
62 /* syn filter list structure */
63 struct ixgbe_eth_syn_filter_ele {
64 TAILQ_ENTRY(ixgbe_eth_syn_filter_ele) entries;
65 struct rte_eth_syn_filter filter_info;
67 /* fdir filter list structure */
68 struct ixgbe_fdir_rule_ele {
69 TAILQ_ENTRY(ixgbe_fdir_rule_ele) entries;
70 struct ixgbe_fdir_rule filter_info;
72 /* l2_tunnel filter list structure */
73 struct ixgbe_eth_l2_tunnel_conf_ele {
74 TAILQ_ENTRY(ixgbe_eth_l2_tunnel_conf_ele) entries;
75 struct rte_eth_l2_tunnel_conf filter_info;
77 /* ixgbe_flow memory list structure */
78 struct ixgbe_flow_mem {
79 TAILQ_ENTRY(ixgbe_flow_mem) entries;
80 struct rte_flow *flow;
83 TAILQ_HEAD(ixgbe_ntuple_filter_list, ixgbe_ntuple_filter_ele);
84 TAILQ_HEAD(ixgbe_ethertype_filter_list, ixgbe_ethertype_filter_ele);
85 TAILQ_HEAD(ixgbe_syn_filter_list, ixgbe_eth_syn_filter_ele);
86 TAILQ_HEAD(ixgbe_fdir_rule_filter_list, ixgbe_fdir_rule_ele);
87 TAILQ_HEAD(ixgbe_l2_tunnel_filter_list, ixgbe_eth_l2_tunnel_conf_ele);
88 TAILQ_HEAD(ixgbe_flow_mem_list, ixgbe_flow_mem);
90 static struct ixgbe_ntuple_filter_list filter_ntuple_list;
91 static struct ixgbe_ethertype_filter_list filter_ethertype_list;
92 static struct ixgbe_syn_filter_list filter_syn_list;
93 static struct ixgbe_fdir_rule_filter_list filter_fdir_list;
94 static struct ixgbe_l2_tunnel_filter_list filter_l2_tunnel_list;
95 static struct ixgbe_flow_mem_list ixgbe_flow_list;
98 * Endless loop will never happen with below assumption
99 * 1. there is at least one no-void item(END)
100 * 2. cur is before END.
103 const struct rte_flow_item *next_no_void_pattern(
104 const struct rte_flow_item pattern[],
105 const struct rte_flow_item *cur)
107 const struct rte_flow_item *next =
108 cur ? cur + 1 : &pattern[0];
110 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
117 const struct rte_flow_action *next_no_void_action(
118 const struct rte_flow_action actions[],
119 const struct rte_flow_action *cur)
121 const struct rte_flow_action *next =
122 cur ? cur + 1 : &actions[0];
124 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
131 * Please aware there's an asumption for all the parsers.
132 * rte_flow_item is using big endian, rte_flow_attr and
133 * rte_flow_action are using CPU order.
134 * Because the pattern is used to describe the packets,
135 * normally the packets should use network order.
139 * Parse the rule to see if it is a n-tuple rule.
140 * And get the n-tuple filter info BTW.
142 * The first not void item can be ETH or IPV4.
143 * The second not void item must be IPV4 if the first one is ETH.
144 * The third not void item must be UDP or TCP.
145 * The next not void item must be END.
147 * The first not void action should be QUEUE.
148 * The next not void action should be END.
152 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
153 * dst_addr 192.167.3.50 0xFFFFFFFF
154 * next_proto_id 17 0xFF
155 * UDP/TCP/ src_port 80 0xFFFF
156 * SCTP dst_port 80 0xFFFF
158 * other members in mask and spec should set to 0x00.
159 * item->last should be NULL.
161 * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY.
165 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
166 const struct rte_flow_item pattern[],
167 const struct rte_flow_action actions[],
168 struct rte_eth_ntuple_filter *filter,
169 struct rte_flow_error *error)
171 const struct rte_flow_item *item;
172 const struct rte_flow_action *act;
173 const struct rte_flow_item_ipv4 *ipv4_spec;
174 const struct rte_flow_item_ipv4 *ipv4_mask;
175 const struct rte_flow_item_tcp *tcp_spec;
176 const struct rte_flow_item_tcp *tcp_mask;
177 const struct rte_flow_item_udp *udp_spec;
178 const struct rte_flow_item_udp *udp_mask;
179 const struct rte_flow_item_sctp *sctp_spec;
180 const struct rte_flow_item_sctp *sctp_mask;
183 rte_flow_error_set(error,
184 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
185 NULL, "NULL pattern.");
190 rte_flow_error_set(error, EINVAL,
191 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
192 NULL, "NULL action.");
196 rte_flow_error_set(error, EINVAL,
197 RTE_FLOW_ERROR_TYPE_ATTR,
198 NULL, "NULL attribute.");
202 #ifdef RTE_LIBRTE_SECURITY
204 * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY
206 act = next_no_void_action(actions, NULL);
207 if (act->type == RTE_FLOW_ACTION_TYPE_SECURITY) {
208 const void *conf = act->conf;
209 /* check if the next not void item is END */
210 act = next_no_void_action(actions, act);
211 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
212 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
213 rte_flow_error_set(error, EINVAL,
214 RTE_FLOW_ERROR_TYPE_ACTION,
215 act, "Not supported action.");
219 /* get the IP pattern*/
220 item = next_no_void_pattern(pattern, NULL);
221 while (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
222 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
224 item->type == RTE_FLOW_ITEM_TYPE_END) {
225 rte_flow_error_set(error, EINVAL,
226 RTE_FLOW_ERROR_TYPE_ITEM,
227 item, "IP pattern missing.");
230 item = next_no_void_pattern(pattern, item);
233 filter->proto = IPPROTO_ESP;
234 return ixgbe_crypto_add_ingress_sa_from_flow(conf, item->spec,
235 item->type == RTE_FLOW_ITEM_TYPE_IPV6);
239 /* the first not void item can be MAC or IPv4 */
240 item = next_no_void_pattern(pattern, NULL);
242 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
243 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
244 rte_flow_error_set(error, EINVAL,
245 RTE_FLOW_ERROR_TYPE_ITEM,
246 item, "Not supported by ntuple filter");
250 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
251 /*Not supported last point for range*/
253 rte_flow_error_set(error,
255 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
256 item, "Not supported last point for range");
260 /* if the first item is MAC, the content should be NULL */
261 if (item->spec || item->mask) {
262 rte_flow_error_set(error, EINVAL,
263 RTE_FLOW_ERROR_TYPE_ITEM,
264 item, "Not supported by ntuple filter");
267 /* check if the next not void item is IPv4 */
268 item = next_no_void_pattern(pattern, item);
269 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
270 rte_flow_error_set(error,
271 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
272 item, "Not supported by ntuple filter");
277 /* get the IPv4 info */
278 if (!item->spec || !item->mask) {
279 rte_flow_error_set(error, EINVAL,
280 RTE_FLOW_ERROR_TYPE_ITEM,
281 item, "Invalid ntuple mask");
284 /*Not supported last point for range*/
286 rte_flow_error_set(error, EINVAL,
287 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
288 item, "Not supported last point for range");
293 ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
295 * Only support src & dst addresses, protocol,
296 * others should be masked.
298 if (ipv4_mask->hdr.version_ihl ||
299 ipv4_mask->hdr.type_of_service ||
300 ipv4_mask->hdr.total_length ||
301 ipv4_mask->hdr.packet_id ||
302 ipv4_mask->hdr.fragment_offset ||
303 ipv4_mask->hdr.time_to_live ||
304 ipv4_mask->hdr.hdr_checksum) {
305 rte_flow_error_set(error,
306 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
307 item, "Not supported by ntuple filter");
311 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
312 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
313 filter->proto_mask = ipv4_mask->hdr.next_proto_id;
315 ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
316 filter->dst_ip = ipv4_spec->hdr.dst_addr;
317 filter->src_ip = ipv4_spec->hdr.src_addr;
318 filter->proto = ipv4_spec->hdr.next_proto_id;
320 /* check if the next not void item is TCP or UDP */
321 item = next_no_void_pattern(pattern, item);
322 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
323 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
324 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
325 item->type != RTE_FLOW_ITEM_TYPE_END) {
326 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
327 rte_flow_error_set(error, EINVAL,
328 RTE_FLOW_ERROR_TYPE_ITEM,
329 item, "Not supported by ntuple filter");
333 /* get the TCP/UDP info */
334 if ((item->type != RTE_FLOW_ITEM_TYPE_END) &&
335 (!item->spec || !item->mask)) {
336 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
337 rte_flow_error_set(error, EINVAL,
338 RTE_FLOW_ERROR_TYPE_ITEM,
339 item, "Invalid ntuple mask");
343 /*Not supported last point for range*/
345 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
346 rte_flow_error_set(error, EINVAL,
347 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
348 item, "Not supported last point for range");
353 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
354 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
357 * Only support src & dst ports, tcp flags,
358 * others should be masked.
360 if (tcp_mask->hdr.sent_seq ||
361 tcp_mask->hdr.recv_ack ||
362 tcp_mask->hdr.data_off ||
363 tcp_mask->hdr.rx_win ||
364 tcp_mask->hdr.cksum ||
365 tcp_mask->hdr.tcp_urp) {
367 sizeof(struct rte_eth_ntuple_filter));
368 rte_flow_error_set(error, EINVAL,
369 RTE_FLOW_ERROR_TYPE_ITEM,
370 item, "Not supported by ntuple filter");
374 filter->dst_port_mask = tcp_mask->hdr.dst_port;
375 filter->src_port_mask = tcp_mask->hdr.src_port;
376 if (tcp_mask->hdr.tcp_flags == 0xFF) {
377 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
378 } else if (!tcp_mask->hdr.tcp_flags) {
379 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
381 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
382 rte_flow_error_set(error, EINVAL,
383 RTE_FLOW_ERROR_TYPE_ITEM,
384 item, "Not supported by ntuple filter");
388 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
389 filter->dst_port = tcp_spec->hdr.dst_port;
390 filter->src_port = tcp_spec->hdr.src_port;
391 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
392 } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
393 udp_mask = (const struct rte_flow_item_udp *)item->mask;
396 * Only support src & dst ports,
397 * others should be masked.
399 if (udp_mask->hdr.dgram_len ||
400 udp_mask->hdr.dgram_cksum) {
402 sizeof(struct rte_eth_ntuple_filter));
403 rte_flow_error_set(error, EINVAL,
404 RTE_FLOW_ERROR_TYPE_ITEM,
405 item, "Not supported by ntuple filter");
409 filter->dst_port_mask = udp_mask->hdr.dst_port;
410 filter->src_port_mask = udp_mask->hdr.src_port;
412 udp_spec = (const struct rte_flow_item_udp *)item->spec;
413 filter->dst_port = udp_spec->hdr.dst_port;
414 filter->src_port = udp_spec->hdr.src_port;
415 } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
416 sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
419 * Only support src & dst ports,
420 * others should be masked.
422 if (sctp_mask->hdr.tag ||
423 sctp_mask->hdr.cksum) {
425 sizeof(struct rte_eth_ntuple_filter));
426 rte_flow_error_set(error, EINVAL,
427 RTE_FLOW_ERROR_TYPE_ITEM,
428 item, "Not supported by ntuple filter");
432 filter->dst_port_mask = sctp_mask->hdr.dst_port;
433 filter->src_port_mask = sctp_mask->hdr.src_port;
435 sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
436 filter->dst_port = sctp_spec->hdr.dst_port;
437 filter->src_port = sctp_spec->hdr.src_port;
442 /* check if the next not void item is END */
443 item = next_no_void_pattern(pattern, item);
444 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
445 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
446 rte_flow_error_set(error, EINVAL,
447 RTE_FLOW_ERROR_TYPE_ITEM,
448 item, "Not supported by ntuple filter");
455 * n-tuple only supports forwarding,
456 * check if the first not void action is QUEUE.
458 act = next_no_void_action(actions, NULL);
459 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
460 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
461 rte_flow_error_set(error, EINVAL,
462 RTE_FLOW_ERROR_TYPE_ACTION,
463 item, "Not supported action.");
467 ((const struct rte_flow_action_queue *)act->conf)->index;
469 /* check if the next not void item is END */
470 act = next_no_void_action(actions, act);
471 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
472 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
473 rte_flow_error_set(error, EINVAL,
474 RTE_FLOW_ERROR_TYPE_ACTION,
475 act, "Not supported action.");
480 /* must be input direction */
481 if (!attr->ingress) {
482 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
483 rte_flow_error_set(error, EINVAL,
484 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
485 attr, "Only support ingress.");
491 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
492 rte_flow_error_set(error, EINVAL,
493 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
494 attr, "Not support egress.");
498 if (attr->priority > 0xFFFF) {
499 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
500 rte_flow_error_set(error, EINVAL,
501 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
502 attr, "Error priority.");
505 filter->priority = (uint16_t)attr->priority;
506 if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
507 attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
508 filter->priority = 1;
513 /* a specific function for ixgbe because the flags is specific */
515 ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
516 const struct rte_flow_attr *attr,
517 const struct rte_flow_item pattern[],
518 const struct rte_flow_action actions[],
519 struct rte_eth_ntuple_filter *filter,
520 struct rte_flow_error *error)
523 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
525 MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
527 ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
532 #ifdef RTE_LIBRTE_SECURITY
533 /* ESP flow not really a flow*/
534 if (filter->proto == IPPROTO_ESP)
538 /* Ixgbe doesn't support tcp flags. */
539 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
540 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
541 rte_flow_error_set(error, EINVAL,
542 RTE_FLOW_ERROR_TYPE_ITEM,
543 NULL, "Not supported by ntuple filter");
547 /* Ixgbe doesn't support many priorities. */
548 if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
549 filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
550 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
551 rte_flow_error_set(error, EINVAL,
552 RTE_FLOW_ERROR_TYPE_ITEM,
553 NULL, "Priority not supported by ntuple filter");
557 if (filter->queue >= dev->data->nb_rx_queues)
560 /* fixed value for ixgbe */
561 filter->flags = RTE_5TUPLE_FLAGS;
566 * Parse the rule to see if it is a ethertype rule.
567 * And get the ethertype filter info BTW.
569 * The first not void item can be ETH.
570 * The next not void item must be END.
572 * The first not void action should be QUEUE.
573 * The next not void action should be END.
576 * ETH type 0x0807 0xFFFF
578 * other members in mask and spec should set to 0x00.
579 * item->last should be NULL.
582 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
583 const struct rte_flow_item *pattern,
584 const struct rte_flow_action *actions,
585 struct rte_eth_ethertype_filter *filter,
586 struct rte_flow_error *error)
588 const struct rte_flow_item *item;
589 const struct rte_flow_action *act;
590 const struct rte_flow_item_eth *eth_spec;
591 const struct rte_flow_item_eth *eth_mask;
592 const struct rte_flow_action_queue *act_q;
595 rte_flow_error_set(error, EINVAL,
596 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
597 NULL, "NULL pattern.");
602 rte_flow_error_set(error, EINVAL,
603 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
604 NULL, "NULL action.");
609 rte_flow_error_set(error, EINVAL,
610 RTE_FLOW_ERROR_TYPE_ATTR,
611 NULL, "NULL attribute.");
615 item = next_no_void_pattern(pattern, NULL);
616 /* The first non-void item should be MAC. */
617 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
618 rte_flow_error_set(error, EINVAL,
619 RTE_FLOW_ERROR_TYPE_ITEM,
620 item, "Not supported by ethertype filter");
624 /*Not supported last point for range*/
626 rte_flow_error_set(error, EINVAL,
627 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
628 item, "Not supported last point for range");
632 /* Get the MAC info. */
633 if (!item->spec || !item->mask) {
634 rte_flow_error_set(error, EINVAL,
635 RTE_FLOW_ERROR_TYPE_ITEM,
636 item, "Not supported by ethertype filter");
640 eth_spec = (const struct rte_flow_item_eth *)item->spec;
641 eth_mask = (const struct rte_flow_item_eth *)item->mask;
643 /* Mask bits of source MAC address must be full of 0.
644 * Mask bits of destination MAC address must be full
647 if (!is_zero_ether_addr(ð_mask->src) ||
648 (!is_zero_ether_addr(ð_mask->dst) &&
649 !is_broadcast_ether_addr(ð_mask->dst))) {
650 rte_flow_error_set(error, EINVAL,
651 RTE_FLOW_ERROR_TYPE_ITEM,
652 item, "Invalid ether address mask");
656 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
657 rte_flow_error_set(error, EINVAL,
658 RTE_FLOW_ERROR_TYPE_ITEM,
659 item, "Invalid ethertype mask");
663 /* If mask bits of destination MAC address
664 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
666 if (is_broadcast_ether_addr(ð_mask->dst)) {
667 filter->mac_addr = eth_spec->dst;
668 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
670 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
672 filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
674 /* Check if the next non-void item is END. */
675 item = next_no_void_pattern(pattern, item);
676 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
677 rte_flow_error_set(error, EINVAL,
678 RTE_FLOW_ERROR_TYPE_ITEM,
679 item, "Not supported by ethertype filter.");
685 act = next_no_void_action(actions, NULL);
686 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
687 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
688 rte_flow_error_set(error, EINVAL,
689 RTE_FLOW_ERROR_TYPE_ACTION,
690 act, "Not supported action.");
694 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
695 act_q = (const struct rte_flow_action_queue *)act->conf;
696 filter->queue = act_q->index;
698 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
701 /* Check if the next non-void item is END */
702 act = next_no_void_action(actions, act);
703 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
704 rte_flow_error_set(error, EINVAL,
705 RTE_FLOW_ERROR_TYPE_ACTION,
706 act, "Not supported action.");
711 /* Must be input direction */
712 if (!attr->ingress) {
713 rte_flow_error_set(error, EINVAL,
714 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
715 attr, "Only support ingress.");
721 rte_flow_error_set(error, EINVAL,
722 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
723 attr, "Not support egress.");
728 if (attr->priority) {
729 rte_flow_error_set(error, EINVAL,
730 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
731 attr, "Not support priority.");
737 rte_flow_error_set(error, EINVAL,
738 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
739 attr, "Not support group.");
747 ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
748 const struct rte_flow_attr *attr,
749 const struct rte_flow_item pattern[],
750 const struct rte_flow_action actions[],
751 struct rte_eth_ethertype_filter *filter,
752 struct rte_flow_error *error)
755 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
757 MAC_TYPE_FILTER_SUP(hw->mac.type);
759 ret = cons_parse_ethertype_filter(attr, pattern,
760 actions, filter, error);
765 /* Ixgbe doesn't support MAC address. */
766 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
767 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
768 rte_flow_error_set(error, EINVAL,
769 RTE_FLOW_ERROR_TYPE_ITEM,
770 NULL, "Not supported by ethertype filter");
774 if (filter->queue >= dev->data->nb_rx_queues) {
775 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
776 rte_flow_error_set(error, EINVAL,
777 RTE_FLOW_ERROR_TYPE_ITEM,
778 NULL, "queue index much too big");
782 if (filter->ether_type == ETHER_TYPE_IPv4 ||
783 filter->ether_type == ETHER_TYPE_IPv6) {
784 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
785 rte_flow_error_set(error, EINVAL,
786 RTE_FLOW_ERROR_TYPE_ITEM,
787 NULL, "IPv4/IPv6 not supported by ethertype filter");
791 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
792 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
793 rte_flow_error_set(error, EINVAL,
794 RTE_FLOW_ERROR_TYPE_ITEM,
795 NULL, "mac compare is unsupported");
799 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
800 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
801 rte_flow_error_set(error, EINVAL,
802 RTE_FLOW_ERROR_TYPE_ITEM,
803 NULL, "drop option is unsupported");
811 * Parse the rule to see if it is a TCP SYN rule.
812 * And get the TCP SYN filter info BTW.
814 * The first not void item must be ETH.
815 * The second not void item must be IPV4 or IPV6.
816 * The third not void item must be TCP.
817 * The next not void item must be END.
819 * The first not void action should be QUEUE.
820 * The next not void action should be END.
824 * IPV4/IPV6 NULL NULL
825 * TCP tcp_flags 0x02 0xFF
827 * other members in mask and spec should set to 0x00.
828 * item->last should be NULL.
831 cons_parse_syn_filter(const struct rte_flow_attr *attr,
832 const struct rte_flow_item pattern[],
833 const struct rte_flow_action actions[],
834 struct rte_eth_syn_filter *filter,
835 struct rte_flow_error *error)
837 const struct rte_flow_item *item;
838 const struct rte_flow_action *act;
839 const struct rte_flow_item_tcp *tcp_spec;
840 const struct rte_flow_item_tcp *tcp_mask;
841 const struct rte_flow_action_queue *act_q;
844 rte_flow_error_set(error, EINVAL,
845 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
846 NULL, "NULL pattern.");
851 rte_flow_error_set(error, EINVAL,
852 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
853 NULL, "NULL action.");
858 rte_flow_error_set(error, EINVAL,
859 RTE_FLOW_ERROR_TYPE_ATTR,
860 NULL, "NULL attribute.");
865 /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
866 item = next_no_void_pattern(pattern, NULL);
867 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
868 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
869 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
870 item->type != RTE_FLOW_ITEM_TYPE_TCP) {
871 rte_flow_error_set(error, EINVAL,
872 RTE_FLOW_ERROR_TYPE_ITEM,
873 item, "Not supported by syn filter");
876 /*Not supported last point for range*/
878 rte_flow_error_set(error, EINVAL,
879 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
880 item, "Not supported last point for range");
885 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
886 /* if the item is MAC, the content should be NULL */
887 if (item->spec || item->mask) {
888 rte_flow_error_set(error, EINVAL,
889 RTE_FLOW_ERROR_TYPE_ITEM,
890 item, "Invalid SYN address mask");
894 /* check if the next not void item is IPv4 or IPv6 */
895 item = next_no_void_pattern(pattern, item);
896 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
897 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
898 rte_flow_error_set(error, EINVAL,
899 RTE_FLOW_ERROR_TYPE_ITEM,
900 item, "Not supported by syn filter");
906 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
907 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
908 /* if the item is IP, the content should be NULL */
909 if (item->spec || item->mask) {
910 rte_flow_error_set(error, EINVAL,
911 RTE_FLOW_ERROR_TYPE_ITEM,
912 item, "Invalid SYN mask");
916 /* check if the next not void item is TCP */
917 item = next_no_void_pattern(pattern, item);
918 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
919 rte_flow_error_set(error, EINVAL,
920 RTE_FLOW_ERROR_TYPE_ITEM,
921 item, "Not supported by syn filter");
926 /* Get the TCP info. Only support SYN. */
927 if (!item->spec || !item->mask) {
928 rte_flow_error_set(error, EINVAL,
929 RTE_FLOW_ERROR_TYPE_ITEM,
930 item, "Invalid SYN mask");
933 /*Not supported last point for range*/
935 rte_flow_error_set(error, EINVAL,
936 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
937 item, "Not supported last point for range");
941 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
942 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
943 if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
944 tcp_mask->hdr.src_port ||
945 tcp_mask->hdr.dst_port ||
946 tcp_mask->hdr.sent_seq ||
947 tcp_mask->hdr.recv_ack ||
948 tcp_mask->hdr.data_off ||
949 tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
950 tcp_mask->hdr.rx_win ||
951 tcp_mask->hdr.cksum ||
952 tcp_mask->hdr.tcp_urp) {
953 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
954 rte_flow_error_set(error, EINVAL,
955 RTE_FLOW_ERROR_TYPE_ITEM,
956 item, "Not supported by syn filter");
960 /* check if the next not void item is END */
961 item = next_no_void_pattern(pattern, item);
962 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
963 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
964 rte_flow_error_set(error, EINVAL,
965 RTE_FLOW_ERROR_TYPE_ITEM,
966 item, "Not supported by syn filter");
970 /* check if the first not void action is QUEUE. */
971 act = next_no_void_action(actions, NULL);
972 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
973 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
974 rte_flow_error_set(error, EINVAL,
975 RTE_FLOW_ERROR_TYPE_ACTION,
976 act, "Not supported action.");
980 act_q = (const struct rte_flow_action_queue *)act->conf;
981 filter->queue = act_q->index;
982 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
983 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
984 rte_flow_error_set(error, EINVAL,
985 RTE_FLOW_ERROR_TYPE_ACTION,
986 act, "Not supported action.");
990 /* check if the next not void item is END */
991 act = next_no_void_action(actions, act);
992 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
993 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
994 rte_flow_error_set(error, EINVAL,
995 RTE_FLOW_ERROR_TYPE_ACTION,
996 act, "Not supported action.");
1001 /* must be input direction */
1002 if (!attr->ingress) {
1003 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1004 rte_flow_error_set(error, EINVAL,
1005 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1006 attr, "Only support ingress.");
1012 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1013 rte_flow_error_set(error, EINVAL,
1014 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1015 attr, "Not support egress.");
1019 /* Support 2 priorities, the lowest or highest. */
1020 if (!attr->priority) {
1021 filter->hig_pri = 0;
1022 } else if (attr->priority == (uint32_t)~0U) {
1023 filter->hig_pri = 1;
1025 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1026 rte_flow_error_set(error, EINVAL,
1027 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1028 attr, "Not support priority.");
1036 ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
1037 const struct rte_flow_attr *attr,
1038 const struct rte_flow_item pattern[],
1039 const struct rte_flow_action actions[],
1040 struct rte_eth_syn_filter *filter,
1041 struct rte_flow_error *error)
1044 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1046 MAC_TYPE_FILTER_SUP(hw->mac.type);
1048 ret = cons_parse_syn_filter(attr, pattern,
1049 actions, filter, error);
1051 if (filter->queue >= dev->data->nb_rx_queues)
1061 * Parse the rule to see if it is a L2 tunnel rule.
1062 * And get the L2 tunnel filter info BTW.
1063 * Only support E-tag now.
1065 * The first not void item can be E_TAG.
1066 * The next not void item must be END.
1068 * The first not void action should be VF or PF.
1069 * The next not void action should be END.
1073 e_cid_base 0x309 0xFFF
1075 * other members in mask and spec should set to 0x00.
1076 * item->last should be NULL.
1079 cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
1080 const struct rte_flow_attr *attr,
1081 const struct rte_flow_item pattern[],
1082 const struct rte_flow_action actions[],
1083 struct rte_eth_l2_tunnel_conf *filter,
1084 struct rte_flow_error *error)
1086 const struct rte_flow_item *item;
1087 const struct rte_flow_item_e_tag *e_tag_spec;
1088 const struct rte_flow_item_e_tag *e_tag_mask;
1089 const struct rte_flow_action *act;
1090 const struct rte_flow_action_vf *act_vf;
1091 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1094 rte_flow_error_set(error, EINVAL,
1095 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1096 NULL, "NULL pattern.");
1101 rte_flow_error_set(error, EINVAL,
1102 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1103 NULL, "NULL action.");
1108 rte_flow_error_set(error, EINVAL,
1109 RTE_FLOW_ERROR_TYPE_ATTR,
1110 NULL, "NULL attribute.");
1114 /* The first not void item should be e-tag. */
1115 item = next_no_void_pattern(pattern, NULL);
1116 if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1117 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1118 rte_flow_error_set(error, EINVAL,
1119 RTE_FLOW_ERROR_TYPE_ITEM,
1120 item, "Not supported by L2 tunnel filter");
1124 if (!item->spec || !item->mask) {
1125 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1126 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1127 item, "Not supported by L2 tunnel filter");
1131 /*Not supported last point for range*/
1133 rte_flow_error_set(error, EINVAL,
1134 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1135 item, "Not supported last point for range");
1139 e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
1140 e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
1142 /* Only care about GRP and E cid base. */
1143 if (e_tag_mask->epcp_edei_in_ecid_b ||
1144 e_tag_mask->in_ecid_e ||
1145 e_tag_mask->ecid_e ||
1146 e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1147 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1148 rte_flow_error_set(error, EINVAL,
1149 RTE_FLOW_ERROR_TYPE_ITEM,
1150 item, "Not supported by L2 tunnel filter");
1154 filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1156 * grp and e_cid_base are bit fields and only use 14 bits.
1157 * e-tag id is taken as little endian by HW.
1159 filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1161 /* check if the next not void item is END */
1162 item = next_no_void_pattern(pattern, item);
1163 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1164 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1165 rte_flow_error_set(error, EINVAL,
1166 RTE_FLOW_ERROR_TYPE_ITEM,
1167 item, "Not supported by L2 tunnel filter");
1172 /* must be input direction */
1173 if (!attr->ingress) {
1174 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1175 rte_flow_error_set(error, EINVAL,
1176 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1177 attr, "Only support ingress.");
1183 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1184 rte_flow_error_set(error, EINVAL,
1185 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1186 attr, "Not support egress.");
1191 if (attr->priority) {
1192 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1193 rte_flow_error_set(error, EINVAL,
1194 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1195 attr, "Not support priority.");
1199 /* check if the first not void action is VF or PF. */
1200 act = next_no_void_action(actions, NULL);
1201 if (act->type != RTE_FLOW_ACTION_TYPE_VF &&
1202 act->type != RTE_FLOW_ACTION_TYPE_PF) {
1203 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1204 rte_flow_error_set(error, EINVAL,
1205 RTE_FLOW_ERROR_TYPE_ACTION,
1206 act, "Not supported action.");
1210 if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
1211 act_vf = (const struct rte_flow_action_vf *)act->conf;
1212 filter->pool = act_vf->id;
1214 filter->pool = pci_dev->max_vfs;
1217 /* check if the next not void item is END */
1218 act = next_no_void_action(actions, act);
1219 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1220 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1221 rte_flow_error_set(error, EINVAL,
1222 RTE_FLOW_ERROR_TYPE_ACTION,
1223 act, "Not supported action.");
1231 ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1232 const struct rte_flow_attr *attr,
1233 const struct rte_flow_item pattern[],
1234 const struct rte_flow_action actions[],
1235 struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1236 struct rte_flow_error *error)
1239 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1240 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1243 ret = cons_parse_l2_tn_filter(dev, attr, pattern,
1244 actions, l2_tn_filter, error);
1246 if (hw->mac.type != ixgbe_mac_X550 &&
1247 hw->mac.type != ixgbe_mac_X550EM_x &&
1248 hw->mac.type != ixgbe_mac_X550EM_a) {
1249 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1250 rte_flow_error_set(error, EINVAL,
1251 RTE_FLOW_ERROR_TYPE_ITEM,
1252 NULL, "Not supported by L2 tunnel filter");
1256 vf_num = pci_dev->max_vfs;
1258 if (l2_tn_filter->pool > vf_num)
1264 /* Parse to get the attr and action info of flow director rule. */
1266 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1267 const struct rte_flow_action actions[],
1268 struct ixgbe_fdir_rule *rule,
1269 struct rte_flow_error *error)
1271 const struct rte_flow_action *act;
1272 const struct rte_flow_action_queue *act_q;
1273 const struct rte_flow_action_mark *mark;
1276 /* must be input direction */
1277 if (!attr->ingress) {
1278 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1279 rte_flow_error_set(error, EINVAL,
1280 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1281 attr, "Only support ingress.");
1287 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1288 rte_flow_error_set(error, EINVAL,
1289 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1290 attr, "Not support egress.");
1295 if (attr->priority) {
1296 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1297 rte_flow_error_set(error, EINVAL,
1298 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1299 attr, "Not support priority.");
1303 /* check if the first not void action is QUEUE or DROP. */
1304 act = next_no_void_action(actions, NULL);
1305 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1306 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1307 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1308 rte_flow_error_set(error, EINVAL,
1309 RTE_FLOW_ERROR_TYPE_ACTION,
1310 act, "Not supported action.");
1314 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1315 act_q = (const struct rte_flow_action_queue *)act->conf;
1316 rule->queue = act_q->index;
1318 /* signature mode does not support drop action. */
1319 if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1320 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1321 rte_flow_error_set(error, EINVAL,
1322 RTE_FLOW_ERROR_TYPE_ACTION,
1323 act, "Not supported action.");
1326 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1329 /* check if the next not void item is MARK */
1330 act = next_no_void_action(actions, act);
1331 if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1332 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1333 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1334 rte_flow_error_set(error, EINVAL,
1335 RTE_FLOW_ERROR_TYPE_ACTION,
1336 act, "Not supported action.");
1342 if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1343 mark = (const struct rte_flow_action_mark *)act->conf;
1344 rule->soft_id = mark->id;
1345 act = next_no_void_action(actions, act);
1348 /* check if the next not void item is END */
1349 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1350 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1351 rte_flow_error_set(error, EINVAL,
1352 RTE_FLOW_ERROR_TYPE_ACTION,
1353 act, "Not supported action.");
1360 /* search next no void pattern and skip fuzzy */
1362 const struct rte_flow_item *next_no_fuzzy_pattern(
1363 const struct rte_flow_item pattern[],
1364 const struct rte_flow_item *cur)
1366 const struct rte_flow_item *next =
1367 next_no_void_pattern(pattern, cur);
1369 if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
1371 next = next_no_void_pattern(pattern, next);
1375 static inline uint8_t signature_match(const struct rte_flow_item pattern[])
1377 const struct rte_flow_item_fuzzy *spec, *last, *mask;
1378 const struct rte_flow_item *item;
1379 uint32_t sh, lh, mh;
1384 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1387 if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
1389 (const struct rte_flow_item_fuzzy *)item->spec;
1391 (const struct rte_flow_item_fuzzy *)item->last;
1393 (const struct rte_flow_item_fuzzy *)item->mask;
1422 * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1423 * And get the flow director filter info BTW.
1424 * UDP/TCP/SCTP PATTERN:
1425 * The first not void item can be ETH or IPV4 or IPV6
1426 * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1427 * The next not void item could be UDP or TCP or SCTP (optional)
1428 * The next not void item could be RAW (for flexbyte, optional)
1429 * The next not void item must be END.
1430 * A Fuzzy Match pattern can appear at any place before END.
1431 * Fuzzy Match is optional for IPV4 but is required for IPV6
1433 * The first not void item must be ETH.
1434 * The second not void item must be MAC VLAN.
1435 * The next not void item must be END.
1437 * The first not void action should be QUEUE or DROP.
1438 * The second not void optional action should be MARK,
1439 * mark_id is a uint32_t number.
1440 * The next not void action should be END.
1441 * UDP/TCP/SCTP pattern example:
1444 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
1445 * dst_addr 192.167.3.50 0xFFFFFFFF
1446 * UDP/TCP/SCTP src_port 80 0xFFFF
1447 * dst_port 80 0xFFFF
1448 * FLEX relative 0 0x1
1451 * offset 12 0xFFFFFFFF
1454 * pattern[0] 0x86 0xFF
1455 * pattern[1] 0xDD 0xFF
1457 * MAC VLAN pattern example:
1460 {0xAC, 0x7B, 0xA1, {0xFF, 0xFF, 0xFF,
1461 0x2C, 0x6D, 0x36} 0xFF, 0xFF, 0xFF}
1462 * MAC VLAN tci 0x2016 0xEFFF
1464 * Other members in mask and spec should set to 0x00.
1465 * Item->last should be NULL.
1468 ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
1469 const struct rte_flow_attr *attr,
1470 const struct rte_flow_item pattern[],
1471 const struct rte_flow_action actions[],
1472 struct ixgbe_fdir_rule *rule,
1473 struct rte_flow_error *error)
1475 const struct rte_flow_item *item;
1476 const struct rte_flow_item_eth *eth_spec;
1477 const struct rte_flow_item_eth *eth_mask;
1478 const struct rte_flow_item_ipv4 *ipv4_spec;
1479 const struct rte_flow_item_ipv4 *ipv4_mask;
1480 const struct rte_flow_item_ipv6 *ipv6_spec;
1481 const struct rte_flow_item_ipv6 *ipv6_mask;
1482 const struct rte_flow_item_tcp *tcp_spec;
1483 const struct rte_flow_item_tcp *tcp_mask;
1484 const struct rte_flow_item_udp *udp_spec;
1485 const struct rte_flow_item_udp *udp_mask;
1486 const struct rte_flow_item_sctp *sctp_spec;
1487 const struct rte_flow_item_sctp *sctp_mask;
1488 const struct rte_flow_item_vlan *vlan_spec;
1489 const struct rte_flow_item_vlan *vlan_mask;
1490 const struct rte_flow_item_raw *raw_mask;
1491 const struct rte_flow_item_raw *raw_spec;
1494 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1497 rte_flow_error_set(error, EINVAL,
1498 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1499 NULL, "NULL pattern.");
1504 rte_flow_error_set(error, EINVAL,
1505 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1506 NULL, "NULL action.");
1511 rte_flow_error_set(error, EINVAL,
1512 RTE_FLOW_ERROR_TYPE_ATTR,
1513 NULL, "NULL attribute.");
1518 * Some fields may not be provided. Set spec to 0 and mask to default
1519 * value. So, we need not do anything for the not provided fields later.
1521 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1522 memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1523 rule->mask.vlan_tci_mask = 0;
1524 rule->mask.flex_bytes_mask = 0;
1527 * The first not void item should be
1528 * MAC or IPv4 or TCP or UDP or SCTP.
1530 item = next_no_fuzzy_pattern(pattern, NULL);
1531 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1532 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1533 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1534 item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1535 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1536 item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1537 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1538 rte_flow_error_set(error, EINVAL,
1539 RTE_FLOW_ERROR_TYPE_ITEM,
1540 item, "Not supported by fdir filter");
1544 if (signature_match(pattern))
1545 rule->mode = RTE_FDIR_MODE_SIGNATURE;
1547 rule->mode = RTE_FDIR_MODE_PERFECT;
1549 /*Not supported last point for range*/
1551 rte_flow_error_set(error, EINVAL,
1552 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1553 item, "Not supported last point for range");
1557 /* Get the MAC info. */
1558 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1560 * Only support vlan and dst MAC address,
1561 * others should be masked.
1563 if (item->spec && !item->mask) {
1564 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1565 rte_flow_error_set(error, EINVAL,
1566 RTE_FLOW_ERROR_TYPE_ITEM,
1567 item, "Not supported by fdir filter");
1572 rule->b_spec = TRUE;
1573 eth_spec = (const struct rte_flow_item_eth *)item->spec;
1575 /* Get the dst MAC. */
1576 for (j = 0; j < ETHER_ADDR_LEN; j++) {
1577 rule->ixgbe_fdir.formatted.inner_mac[j] =
1578 eth_spec->dst.addr_bytes[j];
1585 rule->b_mask = TRUE;
1586 eth_mask = (const struct rte_flow_item_eth *)item->mask;
1588 /* Ether type should be masked. */
1589 if (eth_mask->type ||
1590 rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1591 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1592 rte_flow_error_set(error, EINVAL,
1593 RTE_FLOW_ERROR_TYPE_ITEM,
1594 item, "Not supported by fdir filter");
1598 /* If ethernet has meaning, it means MAC VLAN mode. */
1599 rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1602 * src MAC address must be masked,
1603 * and don't support dst MAC address mask.
1605 for (j = 0; j < ETHER_ADDR_LEN; j++) {
1606 if (eth_mask->src.addr_bytes[j] ||
1607 eth_mask->dst.addr_bytes[j] != 0xFF) {
1609 sizeof(struct ixgbe_fdir_rule));
1610 rte_flow_error_set(error, EINVAL,
1611 RTE_FLOW_ERROR_TYPE_ITEM,
1612 item, "Not supported by fdir filter");
1617 /* When no VLAN, considered as full mask. */
1618 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1620 /*** If both spec and mask are item,
1621 * it means don't care about ETH.
1626 * Check if the next not void item is vlan or ipv4.
1627 * IPv6 is not supported.
1629 item = next_no_fuzzy_pattern(pattern, item);
1630 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1631 if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1632 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1633 rte_flow_error_set(error, EINVAL,
1634 RTE_FLOW_ERROR_TYPE_ITEM,
1635 item, "Not supported by fdir filter");
1639 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
1640 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1641 rte_flow_error_set(error, EINVAL,
1642 RTE_FLOW_ERROR_TYPE_ITEM,
1643 item, "Not supported by fdir filter");
1649 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1650 if (!(item->spec && item->mask)) {
1651 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1652 rte_flow_error_set(error, EINVAL,
1653 RTE_FLOW_ERROR_TYPE_ITEM,
1654 item, "Not supported by fdir filter");
1658 /*Not supported last point for range*/
1660 rte_flow_error_set(error, EINVAL,
1661 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1662 item, "Not supported last point for range");
1666 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
1667 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
1669 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1671 rule->mask.vlan_tci_mask = vlan_mask->tci;
1672 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
1673 /* More than one tags are not supported. */
1675 /* Next not void item must be END */
1676 item = next_no_fuzzy_pattern(pattern, item);
1677 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1678 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1679 rte_flow_error_set(error, EINVAL,
1680 RTE_FLOW_ERROR_TYPE_ITEM,
1681 item, "Not supported by fdir filter");
1686 /* Get the IPV4 info. */
1687 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1689 * Set the flow type even if there's no content
1690 * as we must have a flow type.
1692 rule->ixgbe_fdir.formatted.flow_type =
1693 IXGBE_ATR_FLOW_TYPE_IPV4;
1694 /*Not supported last point for range*/
1696 rte_flow_error_set(error, EINVAL,
1697 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1698 item, "Not supported last point for range");
1702 * Only care about src & dst addresses,
1703 * others should be masked.
1706 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1707 rte_flow_error_set(error, EINVAL,
1708 RTE_FLOW_ERROR_TYPE_ITEM,
1709 item, "Not supported by fdir filter");
1712 rule->b_mask = TRUE;
1714 (const struct rte_flow_item_ipv4 *)item->mask;
1715 if (ipv4_mask->hdr.version_ihl ||
1716 ipv4_mask->hdr.type_of_service ||
1717 ipv4_mask->hdr.total_length ||
1718 ipv4_mask->hdr.packet_id ||
1719 ipv4_mask->hdr.fragment_offset ||
1720 ipv4_mask->hdr.time_to_live ||
1721 ipv4_mask->hdr.next_proto_id ||
1722 ipv4_mask->hdr.hdr_checksum) {
1723 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1724 rte_flow_error_set(error, EINVAL,
1725 RTE_FLOW_ERROR_TYPE_ITEM,
1726 item, "Not supported by fdir filter");
1729 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1730 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1733 rule->b_spec = TRUE;
1735 (const struct rte_flow_item_ipv4 *)item->spec;
1736 rule->ixgbe_fdir.formatted.dst_ip[0] =
1737 ipv4_spec->hdr.dst_addr;
1738 rule->ixgbe_fdir.formatted.src_ip[0] =
1739 ipv4_spec->hdr.src_addr;
1743 * Check if the next not void item is
1744 * TCP or UDP or SCTP or END.
1746 item = next_no_fuzzy_pattern(pattern, item);
1747 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1748 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1749 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1750 item->type != RTE_FLOW_ITEM_TYPE_END &&
1751 item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1752 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1753 rte_flow_error_set(error, EINVAL,
1754 RTE_FLOW_ERROR_TYPE_ITEM,
1755 item, "Not supported by fdir filter");
1760 /* Get the IPV6 info. */
1761 if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1763 * Set the flow type even if there's no content
1764 * as we must have a flow type.
1766 rule->ixgbe_fdir.formatted.flow_type =
1767 IXGBE_ATR_FLOW_TYPE_IPV6;
1770 * 1. must signature match
1771 * 2. not support last
1772 * 3. mask must not null
1774 if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
1777 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1778 rte_flow_error_set(error, EINVAL,
1779 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1780 item, "Not supported last point for range");
1784 rule->b_mask = TRUE;
1786 (const struct rte_flow_item_ipv6 *)item->mask;
1787 if (ipv6_mask->hdr.vtc_flow ||
1788 ipv6_mask->hdr.payload_len ||
1789 ipv6_mask->hdr.proto ||
1790 ipv6_mask->hdr.hop_limits) {
1791 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1792 rte_flow_error_set(error, EINVAL,
1793 RTE_FLOW_ERROR_TYPE_ITEM,
1794 item, "Not supported by fdir filter");
1798 /* check src addr mask */
1799 for (j = 0; j < 16; j++) {
1800 if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
1801 rule->mask.src_ipv6_mask |= 1 << j;
1802 } else if (ipv6_mask->hdr.src_addr[j] != 0) {
1803 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1804 rte_flow_error_set(error, EINVAL,
1805 RTE_FLOW_ERROR_TYPE_ITEM,
1806 item, "Not supported by fdir filter");
1811 /* check dst addr mask */
1812 for (j = 0; j < 16; j++) {
1813 if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
1814 rule->mask.dst_ipv6_mask |= 1 << j;
1815 } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
1816 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1817 rte_flow_error_set(error, EINVAL,
1818 RTE_FLOW_ERROR_TYPE_ITEM,
1819 item, "Not supported by fdir filter");
1825 rule->b_spec = TRUE;
1827 (const struct rte_flow_item_ipv6 *)item->spec;
1828 rte_memcpy(rule->ixgbe_fdir.formatted.src_ip,
1829 ipv6_spec->hdr.src_addr, 16);
1830 rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip,
1831 ipv6_spec->hdr.dst_addr, 16);
1835 * Check if the next not void item is
1836 * TCP or UDP or SCTP or END.
1838 item = next_no_fuzzy_pattern(pattern, item);
1839 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1840 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1841 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1842 item->type != RTE_FLOW_ITEM_TYPE_END &&
1843 item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1844 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1845 rte_flow_error_set(error, EINVAL,
1846 RTE_FLOW_ERROR_TYPE_ITEM,
1847 item, "Not supported by fdir filter");
1852 /* Get the TCP info. */
1853 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1855 * Set the flow type even if there's no content
1856 * as we must have a flow type.
1858 rule->ixgbe_fdir.formatted.flow_type |=
1859 IXGBE_ATR_L4TYPE_TCP;
1860 /*Not supported last point for range*/
1862 rte_flow_error_set(error, EINVAL,
1863 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1864 item, "Not supported last point for range");
1868 * Only care about src & dst ports,
1869 * others should be masked.
1872 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1873 rte_flow_error_set(error, EINVAL,
1874 RTE_FLOW_ERROR_TYPE_ITEM,
1875 item, "Not supported by fdir filter");
1878 rule->b_mask = TRUE;
1879 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1880 if (tcp_mask->hdr.sent_seq ||
1881 tcp_mask->hdr.recv_ack ||
1882 tcp_mask->hdr.data_off ||
1883 tcp_mask->hdr.tcp_flags ||
1884 tcp_mask->hdr.rx_win ||
1885 tcp_mask->hdr.cksum ||
1886 tcp_mask->hdr.tcp_urp) {
1887 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1888 rte_flow_error_set(error, EINVAL,
1889 RTE_FLOW_ERROR_TYPE_ITEM,
1890 item, "Not supported by fdir filter");
1893 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1894 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1897 rule->b_spec = TRUE;
1898 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1899 rule->ixgbe_fdir.formatted.src_port =
1900 tcp_spec->hdr.src_port;
1901 rule->ixgbe_fdir.formatted.dst_port =
1902 tcp_spec->hdr.dst_port;
1905 item = next_no_fuzzy_pattern(pattern, item);
1906 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1907 item->type != RTE_FLOW_ITEM_TYPE_END) {
1908 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1909 rte_flow_error_set(error, EINVAL,
1910 RTE_FLOW_ERROR_TYPE_ITEM,
1911 item, "Not supported by fdir filter");
1917 /* Get the UDP info */
1918 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1920 * Set the flow type even if there's no content
1921 * as we must have a flow type.
1923 rule->ixgbe_fdir.formatted.flow_type |=
1924 IXGBE_ATR_L4TYPE_UDP;
1925 /*Not supported last point for range*/
1927 rte_flow_error_set(error, EINVAL,
1928 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1929 item, "Not supported last point for range");
1933 * Only care about src & dst ports,
1934 * others should be masked.
1937 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1938 rte_flow_error_set(error, EINVAL,
1939 RTE_FLOW_ERROR_TYPE_ITEM,
1940 item, "Not supported by fdir filter");
1943 rule->b_mask = TRUE;
1944 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1945 if (udp_mask->hdr.dgram_len ||
1946 udp_mask->hdr.dgram_cksum) {
1947 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1948 rte_flow_error_set(error, EINVAL,
1949 RTE_FLOW_ERROR_TYPE_ITEM,
1950 item, "Not supported by fdir filter");
1953 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1954 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1957 rule->b_spec = TRUE;
1958 udp_spec = (const struct rte_flow_item_udp *)item->spec;
1959 rule->ixgbe_fdir.formatted.src_port =
1960 udp_spec->hdr.src_port;
1961 rule->ixgbe_fdir.formatted.dst_port =
1962 udp_spec->hdr.dst_port;
1965 item = next_no_fuzzy_pattern(pattern, item);
1966 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1967 item->type != RTE_FLOW_ITEM_TYPE_END) {
1968 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1969 rte_flow_error_set(error, EINVAL,
1970 RTE_FLOW_ERROR_TYPE_ITEM,
1971 item, "Not supported by fdir filter");
1977 /* Get the SCTP info */
1978 if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
1980 * Set the flow type even if there's no content
1981 * as we must have a flow type.
1983 rule->ixgbe_fdir.formatted.flow_type |=
1984 IXGBE_ATR_L4TYPE_SCTP;
1985 /*Not supported last point for range*/
1987 rte_flow_error_set(error, EINVAL,
1988 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1989 item, "Not supported last point for range");
1993 /* only x550 family only support sctp port */
1994 if (hw->mac.type == ixgbe_mac_X550 ||
1995 hw->mac.type == ixgbe_mac_X550EM_x ||
1996 hw->mac.type == ixgbe_mac_X550EM_a) {
1998 * Only care about src & dst ports,
1999 * others should be masked.
2002 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2003 rte_flow_error_set(error, EINVAL,
2004 RTE_FLOW_ERROR_TYPE_ITEM,
2005 item, "Not supported by fdir filter");
2008 rule->b_mask = TRUE;
2010 (const struct rte_flow_item_sctp *)item->mask;
2011 if (sctp_mask->hdr.tag ||
2012 sctp_mask->hdr.cksum) {
2013 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2014 rte_flow_error_set(error, EINVAL,
2015 RTE_FLOW_ERROR_TYPE_ITEM,
2016 item, "Not supported by fdir filter");
2019 rule->mask.src_port_mask = sctp_mask->hdr.src_port;
2020 rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
2023 rule->b_spec = TRUE;
2025 (const struct rte_flow_item_sctp *)item->spec;
2026 rule->ixgbe_fdir.formatted.src_port =
2027 sctp_spec->hdr.src_port;
2028 rule->ixgbe_fdir.formatted.dst_port =
2029 sctp_spec->hdr.dst_port;
2031 /* others even sctp port is not supported */
2034 (const struct rte_flow_item_sctp *)item->mask;
2036 (sctp_mask->hdr.src_port ||
2037 sctp_mask->hdr.dst_port ||
2038 sctp_mask->hdr.tag ||
2039 sctp_mask->hdr.cksum)) {
2040 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2041 rte_flow_error_set(error, EINVAL,
2042 RTE_FLOW_ERROR_TYPE_ITEM,
2043 item, "Not supported by fdir filter");
2048 item = next_no_fuzzy_pattern(pattern, item);
2049 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
2050 item->type != RTE_FLOW_ITEM_TYPE_END) {
2051 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2052 rte_flow_error_set(error, EINVAL,
2053 RTE_FLOW_ERROR_TYPE_ITEM,
2054 item, "Not supported by fdir filter");
2059 /* Get the flex byte info */
2060 if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
2061 /* Not supported last point for range*/
2063 rte_flow_error_set(error, EINVAL,
2064 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2065 item, "Not supported last point for range");
2068 /* mask should not be null */
2069 if (!item->mask || !item->spec) {
2070 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2071 rte_flow_error_set(error, EINVAL,
2072 RTE_FLOW_ERROR_TYPE_ITEM,
2073 item, "Not supported by fdir filter");
2077 raw_mask = (const struct rte_flow_item_raw *)item->mask;
2080 if (raw_mask->relative != 0x1 ||
2081 raw_mask->search != 0x1 ||
2082 raw_mask->reserved != 0x0 ||
2083 (uint32_t)raw_mask->offset != 0xffffffff ||
2084 raw_mask->limit != 0xffff ||
2085 raw_mask->length != 0xffff) {
2086 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2087 rte_flow_error_set(error, EINVAL,
2088 RTE_FLOW_ERROR_TYPE_ITEM,
2089 item, "Not supported by fdir filter");
2093 raw_spec = (const struct rte_flow_item_raw *)item->spec;
2096 if (raw_spec->relative != 0 ||
2097 raw_spec->search != 0 ||
2098 raw_spec->reserved != 0 ||
2099 raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
2100 raw_spec->offset % 2 ||
2101 raw_spec->limit != 0 ||
2102 raw_spec->length != 2 ||
2103 /* pattern can't be 0xffff */
2104 (raw_spec->pattern[0] == 0xff &&
2105 raw_spec->pattern[1] == 0xff)) {
2106 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2107 rte_flow_error_set(error, EINVAL,
2108 RTE_FLOW_ERROR_TYPE_ITEM,
2109 item, "Not supported by fdir filter");
2113 /* check pattern mask */
2114 if (raw_mask->pattern[0] != 0xff ||
2115 raw_mask->pattern[1] != 0xff) {
2116 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2117 rte_flow_error_set(error, EINVAL,
2118 RTE_FLOW_ERROR_TYPE_ITEM,
2119 item, "Not supported by fdir filter");
2123 rule->mask.flex_bytes_mask = 0xffff;
2124 rule->ixgbe_fdir.formatted.flex_bytes =
2125 (((uint16_t)raw_spec->pattern[1]) << 8) |
2126 raw_spec->pattern[0];
2127 rule->flex_bytes_offset = raw_spec->offset;
2130 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2131 /* check if the next not void item is END */
2132 item = next_no_fuzzy_pattern(pattern, item);
2133 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2134 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2135 rte_flow_error_set(error, EINVAL,
2136 RTE_FLOW_ERROR_TYPE_ITEM,
2137 item, "Not supported by fdir filter");
2142 return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2145 #define NVGRE_PROTOCOL 0x6558
2148 * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
2149 * And get the flow director filter info BTW.
2151 * The first not void item must be ETH.
2152 * The second not void item must be IPV4/ IPV6.
2153 * The third not void item must be NVGRE.
2154 * The next not void item must be END.
2156 * The first not void item must be ETH.
2157 * The second not void item must be IPV4/ IPV6.
2158 * The third not void item must be NVGRE.
2159 * The next not void item must be END.
2161 * The first not void action should be QUEUE or DROP.
2162 * The second not void optional action should be MARK,
2163 * mark_id is a uint32_t number.
2164 * The next not void action should be END.
2165 * VxLAN pattern example:
2168 * IPV4/IPV6 NULL NULL
2170 * VxLAN vni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
2171 * MAC VLAN tci 0x2016 0xEFFF
2173 * NEGRV pattern example:
2176 * IPV4/IPV6 NULL NULL
2177 * NVGRE protocol 0x6558 0xFFFF
2178 * tni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
2179 * MAC VLAN tci 0x2016 0xEFFF
2181 * other members in mask and spec should set to 0x00.
2182 * item->last should be NULL.
2185 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
2186 const struct rte_flow_item pattern[],
2187 const struct rte_flow_action actions[],
2188 struct ixgbe_fdir_rule *rule,
2189 struct rte_flow_error *error)
2191 const struct rte_flow_item *item;
2192 const struct rte_flow_item_vxlan *vxlan_spec;
2193 const struct rte_flow_item_vxlan *vxlan_mask;
2194 const struct rte_flow_item_nvgre *nvgre_spec;
2195 const struct rte_flow_item_nvgre *nvgre_mask;
2196 const struct rte_flow_item_eth *eth_spec;
2197 const struct rte_flow_item_eth *eth_mask;
2198 const struct rte_flow_item_vlan *vlan_spec;
2199 const struct rte_flow_item_vlan *vlan_mask;
2203 rte_flow_error_set(error, EINVAL,
2204 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2205 NULL, "NULL pattern.");
2210 rte_flow_error_set(error, EINVAL,
2211 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2212 NULL, "NULL action.");
2217 rte_flow_error_set(error, EINVAL,
2218 RTE_FLOW_ERROR_TYPE_ATTR,
2219 NULL, "NULL attribute.");
2224 * Some fields may not be provided. Set spec to 0 and mask to default
2225 * value. So, we need not do anything for the not provided fields later.
2227 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2228 memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
2229 rule->mask.vlan_tci_mask = 0;
2232 * The first not void item should be
2233 * MAC or IPv4 or IPv6 or UDP or VxLAN.
2235 item = next_no_void_pattern(pattern, NULL);
2236 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
2237 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2238 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
2239 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2240 item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
2241 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2242 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2243 rte_flow_error_set(error, EINVAL,
2244 RTE_FLOW_ERROR_TYPE_ITEM,
2245 item, "Not supported by fdir filter");
2249 rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
2252 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
2253 /* Only used to describe the protocol stack. */
2254 if (item->spec || item->mask) {
2255 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2256 rte_flow_error_set(error, EINVAL,
2257 RTE_FLOW_ERROR_TYPE_ITEM,
2258 item, "Not supported by fdir filter");
2261 /* Not supported last point for range*/
2263 rte_flow_error_set(error, EINVAL,
2264 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2265 item, "Not supported last point for range");
2269 /* Check if the next not void item is IPv4 or IPv6. */
2270 item = next_no_void_pattern(pattern, item);
2271 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2272 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2273 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2274 rte_flow_error_set(error, EINVAL,
2275 RTE_FLOW_ERROR_TYPE_ITEM,
2276 item, "Not supported by fdir filter");
2282 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2283 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2284 /* Only used to describe the protocol stack. */
2285 if (item->spec || item->mask) {
2286 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2287 rte_flow_error_set(error, EINVAL,
2288 RTE_FLOW_ERROR_TYPE_ITEM,
2289 item, "Not supported by fdir filter");
2292 /*Not supported last point for range*/
2294 rte_flow_error_set(error, EINVAL,
2295 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2296 item, "Not supported last point for range");
2300 /* Check if the next not void item is UDP or NVGRE. */
2301 item = next_no_void_pattern(pattern, item);
2302 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2303 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2304 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2305 rte_flow_error_set(error, EINVAL,
2306 RTE_FLOW_ERROR_TYPE_ITEM,
2307 item, "Not supported by fdir filter");
2313 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2314 /* Only used to describe the protocol stack. */
2315 if (item->spec || item->mask) {
2316 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2317 rte_flow_error_set(error, EINVAL,
2318 RTE_FLOW_ERROR_TYPE_ITEM,
2319 item, "Not supported by fdir filter");
2322 /*Not supported last point for range*/
2324 rte_flow_error_set(error, EINVAL,
2325 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2326 item, "Not supported last point for range");
2330 /* Check if the next not void item is VxLAN. */
2331 item = next_no_void_pattern(pattern, item);
2332 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2333 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2334 rte_flow_error_set(error, EINVAL,
2335 RTE_FLOW_ERROR_TYPE_ITEM,
2336 item, "Not supported by fdir filter");
2341 /* Get the VxLAN info */
2342 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2343 rule->ixgbe_fdir.formatted.tunnel_type =
2344 RTE_FDIR_TUNNEL_TYPE_VXLAN;
2346 /* Only care about VNI, others should be masked. */
2348 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2349 rte_flow_error_set(error, EINVAL,
2350 RTE_FLOW_ERROR_TYPE_ITEM,
2351 item, "Not supported by fdir filter");
2354 /*Not supported last point for range*/
2356 rte_flow_error_set(error, EINVAL,
2357 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2358 item, "Not supported last point for range");
2361 rule->b_mask = TRUE;
2363 /* Tunnel type is always meaningful. */
2364 rule->mask.tunnel_type_mask = 1;
2367 (const struct rte_flow_item_vxlan *)item->mask;
2368 if (vxlan_mask->flags) {
2369 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2370 rte_flow_error_set(error, EINVAL,
2371 RTE_FLOW_ERROR_TYPE_ITEM,
2372 item, "Not supported by fdir filter");
2375 /* VNI must be totally masked or not. */
2376 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2377 vxlan_mask->vni[2]) &&
2378 ((vxlan_mask->vni[0] != 0xFF) ||
2379 (vxlan_mask->vni[1] != 0xFF) ||
2380 (vxlan_mask->vni[2] != 0xFF))) {
2381 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2382 rte_flow_error_set(error, EINVAL,
2383 RTE_FLOW_ERROR_TYPE_ITEM,
2384 item, "Not supported by fdir filter");
2388 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2389 RTE_DIM(vxlan_mask->vni));
2392 rule->b_spec = TRUE;
2393 vxlan_spec = (const struct rte_flow_item_vxlan *)
2395 rte_memcpy(((uint8_t *)
2396 &rule->ixgbe_fdir.formatted.tni_vni + 1),
2397 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2398 rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
2399 rule->ixgbe_fdir.formatted.tni_vni);
2403 /* Get the NVGRE info */
2404 if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2405 rule->ixgbe_fdir.formatted.tunnel_type =
2406 RTE_FDIR_TUNNEL_TYPE_NVGRE;
2409 * Only care about flags0, flags1, protocol and TNI,
2410 * others should be masked.
2413 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2414 rte_flow_error_set(error, EINVAL,
2415 RTE_FLOW_ERROR_TYPE_ITEM,
2416 item, "Not supported by fdir filter");
2419 /*Not supported last point for range*/
2421 rte_flow_error_set(error, EINVAL,
2422 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2423 item, "Not supported last point for range");
2426 rule->b_mask = TRUE;
2428 /* Tunnel type is always meaningful. */
2429 rule->mask.tunnel_type_mask = 1;
2432 (const struct rte_flow_item_nvgre *)item->mask;
2433 if (nvgre_mask->flow_id) {
2434 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2435 rte_flow_error_set(error, EINVAL,
2436 RTE_FLOW_ERROR_TYPE_ITEM,
2437 item, "Not supported by fdir filter");
2440 if (nvgre_mask->c_k_s_rsvd0_ver !=
2441 rte_cpu_to_be_16(0x3000) ||
2442 nvgre_mask->protocol != 0xFFFF) {
2443 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2444 rte_flow_error_set(error, EINVAL,
2445 RTE_FLOW_ERROR_TYPE_ITEM,
2446 item, "Not supported by fdir filter");
2449 /* TNI must be totally masked or not. */
2450 if (nvgre_mask->tni[0] &&
2451 ((nvgre_mask->tni[0] != 0xFF) ||
2452 (nvgre_mask->tni[1] != 0xFF) ||
2453 (nvgre_mask->tni[2] != 0xFF))) {
2454 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2455 rte_flow_error_set(error, EINVAL,
2456 RTE_FLOW_ERROR_TYPE_ITEM,
2457 item, "Not supported by fdir filter");
2460 /* tni is a 24-bits bit field */
2461 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2462 RTE_DIM(nvgre_mask->tni));
2463 rule->mask.tunnel_id_mask <<= 8;
2466 rule->b_spec = TRUE;
2468 (const struct rte_flow_item_nvgre *)item->spec;
2469 if (nvgre_spec->c_k_s_rsvd0_ver !=
2470 rte_cpu_to_be_16(0x2000) ||
2471 nvgre_spec->protocol !=
2472 rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2473 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2474 rte_flow_error_set(error, EINVAL,
2475 RTE_FLOW_ERROR_TYPE_ITEM,
2476 item, "Not supported by fdir filter");
2479 /* tni is a 24-bits bit field */
2480 rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2481 nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2482 rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2486 /* check if the next not void item is MAC */
2487 item = next_no_void_pattern(pattern, item);
2488 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2489 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2490 rte_flow_error_set(error, EINVAL,
2491 RTE_FLOW_ERROR_TYPE_ITEM,
2492 item, "Not supported by fdir filter");
2497 * Only support vlan and dst MAC address,
2498 * others should be masked.
2502 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2503 rte_flow_error_set(error, EINVAL,
2504 RTE_FLOW_ERROR_TYPE_ITEM,
2505 item, "Not supported by fdir filter");
2508 /*Not supported last point for range*/
2510 rte_flow_error_set(error, EINVAL,
2511 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2512 item, "Not supported last point for range");
2515 rule->b_mask = TRUE;
2516 eth_mask = (const struct rte_flow_item_eth *)item->mask;
2518 /* Ether type should be masked. */
2519 if (eth_mask->type) {
2520 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2521 rte_flow_error_set(error, EINVAL,
2522 RTE_FLOW_ERROR_TYPE_ITEM,
2523 item, "Not supported by fdir filter");
2527 /* src MAC address should be masked. */
2528 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2529 if (eth_mask->src.addr_bytes[j]) {
2531 sizeof(struct ixgbe_fdir_rule));
2532 rte_flow_error_set(error, EINVAL,
2533 RTE_FLOW_ERROR_TYPE_ITEM,
2534 item, "Not supported by fdir filter");
2538 rule->mask.mac_addr_byte_mask = 0;
2539 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2540 /* It's a per byte mask. */
2541 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2542 rule->mask.mac_addr_byte_mask |= 0x1 << j;
2543 } else if (eth_mask->dst.addr_bytes[j]) {
2544 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2545 rte_flow_error_set(error, EINVAL,
2546 RTE_FLOW_ERROR_TYPE_ITEM,
2547 item, "Not supported by fdir filter");
2552 /* When no vlan, considered as full mask. */
2553 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2556 rule->b_spec = TRUE;
2557 eth_spec = (const struct rte_flow_item_eth *)item->spec;
2559 /* Get the dst MAC. */
2560 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2561 rule->ixgbe_fdir.formatted.inner_mac[j] =
2562 eth_spec->dst.addr_bytes[j];
2567 * Check if the next not void item is vlan or ipv4.
2568 * IPv6 is not supported.
2570 item = next_no_void_pattern(pattern, item);
2571 if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2572 (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
2573 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2574 rte_flow_error_set(error, EINVAL,
2575 RTE_FLOW_ERROR_TYPE_ITEM,
2576 item, "Not supported by fdir filter");
2579 /*Not supported last point for range*/
2581 rte_flow_error_set(error, EINVAL,
2582 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2583 item, "Not supported last point for range");
2587 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2588 if (!(item->spec && item->mask)) {
2589 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2590 rte_flow_error_set(error, EINVAL,
2591 RTE_FLOW_ERROR_TYPE_ITEM,
2592 item, "Not supported by fdir filter");
2596 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
2597 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
2599 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2601 rule->mask.vlan_tci_mask = vlan_mask->tci;
2602 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
2603 /* More than one tags are not supported. */
2605 /* check if the next not void item is END */
2606 item = next_no_void_pattern(pattern, item);
2608 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2609 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2610 rte_flow_error_set(error, EINVAL,
2611 RTE_FLOW_ERROR_TYPE_ITEM,
2612 item, "Not supported by fdir filter");
2618 * If the tags is 0, it means don't care about the VLAN.
2622 return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2626 ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2627 const struct rte_flow_attr *attr,
2628 const struct rte_flow_item pattern[],
2629 const struct rte_flow_action actions[],
2630 struct ixgbe_fdir_rule *rule,
2631 struct rte_flow_error *error)
2634 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2635 enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2637 if (hw->mac.type != ixgbe_mac_82599EB &&
2638 hw->mac.type != ixgbe_mac_X540 &&
2639 hw->mac.type != ixgbe_mac_X550 &&
2640 hw->mac.type != ixgbe_mac_X550EM_x &&
2641 hw->mac.type != ixgbe_mac_X550EM_a)
2644 ret = ixgbe_parse_fdir_filter_normal(dev, attr, pattern,
2645 actions, rule, error);
2650 ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2651 actions, rule, error);
2658 if (hw->mac.type == ixgbe_mac_82599EB &&
2659 rule->fdirflags == IXGBE_FDIRCMD_DROP &&
2660 (rule->ixgbe_fdir.formatted.src_port != 0 ||
2661 rule->ixgbe_fdir.formatted.dst_port != 0))
2664 if (fdir_mode == RTE_FDIR_MODE_NONE ||
2665 fdir_mode != rule->mode)
2668 if (rule->queue >= dev->data->nb_rx_queues)
2675 ixgbe_filterlist_init(void)
2677 TAILQ_INIT(&filter_ntuple_list);
2678 TAILQ_INIT(&filter_ethertype_list);
2679 TAILQ_INIT(&filter_syn_list);
2680 TAILQ_INIT(&filter_fdir_list);
2681 TAILQ_INIT(&filter_l2_tunnel_list);
2682 TAILQ_INIT(&ixgbe_flow_list);
2686 ixgbe_filterlist_flush(void)
2688 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2689 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2690 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2691 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2692 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2693 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2695 while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2696 TAILQ_REMOVE(&filter_ntuple_list,
2699 rte_free(ntuple_filter_ptr);
2702 while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2703 TAILQ_REMOVE(&filter_ethertype_list,
2704 ethertype_filter_ptr,
2706 rte_free(ethertype_filter_ptr);
2709 while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2710 TAILQ_REMOVE(&filter_syn_list,
2713 rte_free(syn_filter_ptr);
2716 while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2717 TAILQ_REMOVE(&filter_l2_tunnel_list,
2720 rte_free(l2_tn_filter_ptr);
2723 while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2724 TAILQ_REMOVE(&filter_fdir_list,
2727 rte_free(fdir_rule_ptr);
2730 while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
2731 TAILQ_REMOVE(&ixgbe_flow_list,
2734 rte_free(ixgbe_flow_mem_ptr->flow);
2735 rte_free(ixgbe_flow_mem_ptr);
2740 * Create or destroy a flow rule.
2741 * Theorically one rule can match more than one filters.
2742 * We will let it use the filter which it hitt first.
2743 * So, the sequence matters.
2745 static struct rte_flow *
2746 ixgbe_flow_create(struct rte_eth_dev *dev,
2747 const struct rte_flow_attr *attr,
2748 const struct rte_flow_item pattern[],
2749 const struct rte_flow_action actions[],
2750 struct rte_flow_error *error)
2753 struct rte_eth_ntuple_filter ntuple_filter;
2754 struct rte_eth_ethertype_filter ethertype_filter;
2755 struct rte_eth_syn_filter syn_filter;
2756 struct ixgbe_fdir_rule fdir_rule;
2757 struct rte_eth_l2_tunnel_conf l2_tn_filter;
2758 struct ixgbe_hw_fdir_info *fdir_info =
2759 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2760 struct rte_flow *flow = NULL;
2761 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2762 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2763 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2764 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2765 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2766 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2767 uint8_t first_mask = FALSE;
2769 flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
2771 PMD_DRV_LOG(ERR, "failed to allocate memory");
2772 return (struct rte_flow *)flow;
2774 ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
2775 sizeof(struct ixgbe_flow_mem), 0);
2776 if (!ixgbe_flow_mem_ptr) {
2777 PMD_DRV_LOG(ERR, "failed to allocate memory");
2781 ixgbe_flow_mem_ptr->flow = flow;
2782 TAILQ_INSERT_TAIL(&ixgbe_flow_list,
2783 ixgbe_flow_mem_ptr, entries);
2785 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2786 ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2787 actions, &ntuple_filter, error);
2789 #ifdef RTE_LIBRTE_SECURITY
2790 /* ESP flow not really a flow*/
2791 if (ntuple_filter.proto == IPPROTO_ESP)
2796 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2798 ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
2799 sizeof(struct ixgbe_ntuple_filter_ele), 0);
2800 if (!ntuple_filter_ptr) {
2801 PMD_DRV_LOG(ERR, "failed to allocate memory");
2804 rte_memcpy(&ntuple_filter_ptr->filter_info,
2806 sizeof(struct rte_eth_ntuple_filter));
2807 TAILQ_INSERT_TAIL(&filter_ntuple_list,
2808 ntuple_filter_ptr, entries);
2809 flow->rule = ntuple_filter_ptr;
2810 flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2816 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2817 ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2818 actions, ðertype_filter, error);
2820 ret = ixgbe_add_del_ethertype_filter(dev,
2821 ðertype_filter, TRUE);
2823 ethertype_filter_ptr = rte_zmalloc(
2824 "ixgbe_ethertype_filter",
2825 sizeof(struct ixgbe_ethertype_filter_ele), 0);
2826 if (!ethertype_filter_ptr) {
2827 PMD_DRV_LOG(ERR, "failed to allocate memory");
2830 rte_memcpy(ðertype_filter_ptr->filter_info,
2832 sizeof(struct rte_eth_ethertype_filter));
2833 TAILQ_INSERT_TAIL(&filter_ethertype_list,
2834 ethertype_filter_ptr, entries);
2835 flow->rule = ethertype_filter_ptr;
2836 flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2842 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2843 ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2844 actions, &syn_filter, error);
2846 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
2848 syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
2849 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
2850 if (!syn_filter_ptr) {
2851 PMD_DRV_LOG(ERR, "failed to allocate memory");
2854 rte_memcpy(&syn_filter_ptr->filter_info,
2856 sizeof(struct rte_eth_syn_filter));
2857 TAILQ_INSERT_TAIL(&filter_syn_list,
2860 flow->rule = syn_filter_ptr;
2861 flow->filter_type = RTE_ETH_FILTER_SYN;
2867 memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2868 ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2869 actions, &fdir_rule, error);
2871 /* A mask cannot be deleted. */
2872 if (fdir_rule.b_mask) {
2873 if (!fdir_info->mask_added) {
2874 /* It's the first time the mask is set. */
2875 rte_memcpy(&fdir_info->mask,
2877 sizeof(struct ixgbe_hw_fdir_mask));
2878 fdir_info->flex_bytes_offset =
2879 fdir_rule.flex_bytes_offset;
2881 if (fdir_rule.mask.flex_bytes_mask)
2882 ixgbe_fdir_set_flexbytes_offset(dev,
2883 fdir_rule.flex_bytes_offset);
2885 ret = ixgbe_fdir_set_input_mask(dev);
2889 fdir_info->mask_added = TRUE;
2893 * Only support one global mask,
2894 * all the masks should be the same.
2896 ret = memcmp(&fdir_info->mask,
2898 sizeof(struct ixgbe_hw_fdir_mask));
2902 if (fdir_info->flex_bytes_offset !=
2903 fdir_rule.flex_bytes_offset)
2908 if (fdir_rule.b_spec) {
2909 ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
2912 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
2913 sizeof(struct ixgbe_fdir_rule_ele), 0);
2914 if (!fdir_rule_ptr) {
2915 PMD_DRV_LOG(ERR, "failed to allocate memory");
2918 rte_memcpy(&fdir_rule_ptr->filter_info,
2920 sizeof(struct ixgbe_fdir_rule));
2921 TAILQ_INSERT_TAIL(&filter_fdir_list,
2922 fdir_rule_ptr, entries);
2923 flow->rule = fdir_rule_ptr;
2924 flow->filter_type = RTE_ETH_FILTER_FDIR;
2931 * clean the mask_added flag if fail to
2935 fdir_info->mask_added = FALSE;
2943 memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2944 ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2945 actions, &l2_tn_filter, error);
2947 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
2949 l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
2950 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
2951 if (!l2_tn_filter_ptr) {
2952 PMD_DRV_LOG(ERR, "failed to allocate memory");
2955 rte_memcpy(&l2_tn_filter_ptr->filter_info,
2957 sizeof(struct rte_eth_l2_tunnel_conf));
2958 TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
2959 l2_tn_filter_ptr, entries);
2960 flow->rule = l2_tn_filter_ptr;
2961 flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
2967 TAILQ_REMOVE(&ixgbe_flow_list,
2968 ixgbe_flow_mem_ptr, entries);
2969 rte_flow_error_set(error, -ret,
2970 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2971 "Failed to create flow.");
2972 rte_free(ixgbe_flow_mem_ptr);
2978 * Check if the flow rule is supported by ixgbe.
2979 * It only checkes the format. Don't guarantee the rule can be programmed into
2980 * the HW. Because there can be no enough room for the rule.
2983 ixgbe_flow_validate(struct rte_eth_dev *dev,
2984 const struct rte_flow_attr *attr,
2985 const struct rte_flow_item pattern[],
2986 const struct rte_flow_action actions[],
2987 struct rte_flow_error *error)
2989 struct rte_eth_ntuple_filter ntuple_filter;
2990 struct rte_eth_ethertype_filter ethertype_filter;
2991 struct rte_eth_syn_filter syn_filter;
2992 struct rte_eth_l2_tunnel_conf l2_tn_filter;
2993 struct ixgbe_fdir_rule fdir_rule;
2996 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2997 ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2998 actions, &ntuple_filter, error);
3002 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
3003 ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
3004 actions, ðertype_filter, error);
3008 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
3009 ret = ixgbe_parse_syn_filter(dev, attr, pattern,
3010 actions, &syn_filter, error);
3014 memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
3015 ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
3016 actions, &fdir_rule, error);
3020 memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
3021 ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
3022 actions, &l2_tn_filter, error);
3027 /* Destroy a flow rule on ixgbe. */
3029 ixgbe_flow_destroy(struct rte_eth_dev *dev,
3030 struct rte_flow *flow,
3031 struct rte_flow_error *error)
3034 struct rte_flow *pmd_flow = flow;
3035 enum rte_filter_type filter_type = pmd_flow->filter_type;
3036 struct rte_eth_ntuple_filter ntuple_filter;
3037 struct rte_eth_ethertype_filter ethertype_filter;
3038 struct rte_eth_syn_filter syn_filter;
3039 struct ixgbe_fdir_rule fdir_rule;
3040 struct rte_eth_l2_tunnel_conf l2_tn_filter;
3041 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
3042 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
3043 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
3044 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
3045 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
3046 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
3047 struct ixgbe_hw_fdir_info *fdir_info =
3048 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
3050 switch (filter_type) {
3051 case RTE_ETH_FILTER_NTUPLE:
3052 ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
3054 rte_memcpy(&ntuple_filter,
3055 &ntuple_filter_ptr->filter_info,
3056 sizeof(struct rte_eth_ntuple_filter));
3057 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
3059 TAILQ_REMOVE(&filter_ntuple_list,
3060 ntuple_filter_ptr, entries);
3061 rte_free(ntuple_filter_ptr);
3064 case RTE_ETH_FILTER_ETHERTYPE:
3065 ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
3067 rte_memcpy(ðertype_filter,
3068 ðertype_filter_ptr->filter_info,
3069 sizeof(struct rte_eth_ethertype_filter));
3070 ret = ixgbe_add_del_ethertype_filter(dev,
3071 ðertype_filter, FALSE);
3073 TAILQ_REMOVE(&filter_ethertype_list,
3074 ethertype_filter_ptr, entries);
3075 rte_free(ethertype_filter_ptr);
3078 case RTE_ETH_FILTER_SYN:
3079 syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
3081 rte_memcpy(&syn_filter,
3082 &syn_filter_ptr->filter_info,
3083 sizeof(struct rte_eth_syn_filter));
3084 ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
3086 TAILQ_REMOVE(&filter_syn_list,
3087 syn_filter_ptr, entries);
3088 rte_free(syn_filter_ptr);
3091 case RTE_ETH_FILTER_FDIR:
3092 fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
3093 rte_memcpy(&fdir_rule,
3094 &fdir_rule_ptr->filter_info,
3095 sizeof(struct ixgbe_fdir_rule));
3096 ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
3098 TAILQ_REMOVE(&filter_fdir_list,
3099 fdir_rule_ptr, entries);
3100 rte_free(fdir_rule_ptr);
3101 if (TAILQ_EMPTY(&filter_fdir_list))
3102 fdir_info->mask_added = false;
3105 case RTE_ETH_FILTER_L2_TUNNEL:
3106 l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
3108 rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
3109 sizeof(struct rte_eth_l2_tunnel_conf));
3110 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
3112 TAILQ_REMOVE(&filter_l2_tunnel_list,
3113 l2_tn_filter_ptr, entries);
3114 rte_free(l2_tn_filter_ptr);
3118 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
3125 rte_flow_error_set(error, EINVAL,
3126 RTE_FLOW_ERROR_TYPE_HANDLE,
3127 NULL, "Failed to destroy flow");
3131 TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
3132 if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
3133 TAILQ_REMOVE(&ixgbe_flow_list,
3134 ixgbe_flow_mem_ptr, entries);
3135 rte_free(ixgbe_flow_mem_ptr);
3143 /* Destroy all flow rules associated with a port on ixgbe. */
3145 ixgbe_flow_flush(struct rte_eth_dev *dev,
3146 struct rte_flow_error *error)
3150 ixgbe_clear_all_ntuple_filter(dev);
3151 ixgbe_clear_all_ethertype_filter(dev);
3152 ixgbe_clear_syn_filter(dev);
3154 ret = ixgbe_clear_all_fdir_filter(dev);
3156 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3157 NULL, "Failed to flush rule");
3161 ret = ixgbe_clear_all_l2_tn_filter(dev);
3163 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3164 NULL, "Failed to flush rule");
3168 ixgbe_filterlist_flush();
3173 const struct rte_flow_ops ixgbe_flow_ops = {
3174 .validate = ixgbe_flow_validate,
3175 .create = ixgbe_flow_create,
3176 .destroy = ixgbe_flow_destroy,
3177 .flush = ixgbe_flow_flush,