1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
13 #include <netinet/in.h>
14 #include <rte_byteorder.h>
15 #include <rte_common.h>
16 #include <rte_cycles.h>
18 #include <rte_interrupts.h>
20 #include <rte_debug.h>
22 #include <rte_atomic.h>
23 #include <rte_branch_prediction.h>
24 #include <rte_memory.h>
26 #include <rte_alarm.h>
27 #include <rte_ether.h>
28 #include <rte_ethdev.h>
29 #include <rte_malloc.h>
30 #include <rte_random.h>
32 #include <rte_hash_crc.h>
34 #include <rte_flow_driver.h>
36 #include "ixgbe_logs.h"
37 #include "base/ixgbe_api.h"
38 #include "base/ixgbe_vf.h"
39 #include "base/ixgbe_common.h"
40 #include "ixgbe_ethdev.h"
41 #include "ixgbe_bypass.h"
42 #include "ixgbe_rxtx.h"
43 #include "base/ixgbe_type.h"
44 #include "base/ixgbe_phy.h"
45 #include "rte_pmd_ixgbe.h"
48 #define IXGBE_MIN_N_TUPLE_PRIO 1
49 #define IXGBE_MAX_N_TUPLE_PRIO 7
50 #define IXGBE_MAX_FLX_SOURCE_OFF 62
52 /* ntuple filter list structure */
53 struct ixgbe_ntuple_filter_ele {
54 TAILQ_ENTRY(ixgbe_ntuple_filter_ele) entries;
55 struct rte_eth_ntuple_filter filter_info;
57 /* ethertype filter list structure */
58 struct ixgbe_ethertype_filter_ele {
59 TAILQ_ENTRY(ixgbe_ethertype_filter_ele) entries;
60 struct rte_eth_ethertype_filter filter_info;
62 /* syn filter list structure */
63 struct ixgbe_eth_syn_filter_ele {
64 TAILQ_ENTRY(ixgbe_eth_syn_filter_ele) entries;
65 struct rte_eth_syn_filter filter_info;
67 /* fdir filter list structure */
68 struct ixgbe_fdir_rule_ele {
69 TAILQ_ENTRY(ixgbe_fdir_rule_ele) entries;
70 struct ixgbe_fdir_rule filter_info;
72 /* l2_tunnel filter list structure */
73 struct ixgbe_eth_l2_tunnel_conf_ele {
74 TAILQ_ENTRY(ixgbe_eth_l2_tunnel_conf_ele) entries;
75 struct rte_eth_l2_tunnel_conf filter_info;
77 /* ixgbe_flow memory list structure */
78 struct ixgbe_flow_mem {
79 TAILQ_ENTRY(ixgbe_flow_mem) entries;
80 struct rte_flow *flow;
83 TAILQ_HEAD(ixgbe_ntuple_filter_list, ixgbe_ntuple_filter_ele);
84 TAILQ_HEAD(ixgbe_ethertype_filter_list, ixgbe_ethertype_filter_ele);
85 TAILQ_HEAD(ixgbe_syn_filter_list, ixgbe_eth_syn_filter_ele);
86 TAILQ_HEAD(ixgbe_fdir_rule_filter_list, ixgbe_fdir_rule_ele);
87 TAILQ_HEAD(ixgbe_l2_tunnel_filter_list, ixgbe_eth_l2_tunnel_conf_ele);
88 TAILQ_HEAD(ixgbe_flow_mem_list, ixgbe_flow_mem);
90 static struct ixgbe_ntuple_filter_list filter_ntuple_list;
91 static struct ixgbe_ethertype_filter_list filter_ethertype_list;
92 static struct ixgbe_syn_filter_list filter_syn_list;
93 static struct ixgbe_fdir_rule_filter_list filter_fdir_list;
94 static struct ixgbe_l2_tunnel_filter_list filter_l2_tunnel_list;
95 static struct ixgbe_flow_mem_list ixgbe_flow_list;
98 * Endless loop will never happen with below assumption
99 * 1. there is at least one no-void item(END)
100 * 2. cur is before END.
103 const struct rte_flow_item *next_no_void_pattern(
104 const struct rte_flow_item pattern[],
105 const struct rte_flow_item *cur)
107 const struct rte_flow_item *next =
108 cur ? cur + 1 : &pattern[0];
110 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
117 const struct rte_flow_action *next_no_void_action(
118 const struct rte_flow_action actions[],
119 const struct rte_flow_action *cur)
121 const struct rte_flow_action *next =
122 cur ? cur + 1 : &actions[0];
124 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
131 * Please aware there's an asumption for all the parsers.
132 * rte_flow_item is using big endian, rte_flow_attr and
133 * rte_flow_action are using CPU order.
134 * Because the pattern is used to describe the packets,
135 * normally the packets should use network order.
139 * Parse the rule to see if it is a n-tuple rule.
140 * And get the n-tuple filter info BTW.
142 * The first not void item can be ETH or IPV4.
143 * The second not void item must be IPV4 if the first one is ETH.
144 * The third not void item must be UDP or TCP.
145 * The next not void item must be END.
147 * The first not void action should be QUEUE.
148 * The next not void action should be END.
152 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
153 * dst_addr 192.167.3.50 0xFFFFFFFF
154 * next_proto_id 17 0xFF
155 * UDP/TCP/ src_port 80 0xFFFF
156 * SCTP dst_port 80 0xFFFF
158 * other members in mask and spec should set to 0x00.
159 * item->last should be NULL.
161 * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY.
165 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
166 const struct rte_flow_item pattern[],
167 const struct rte_flow_action actions[],
168 struct rte_eth_ntuple_filter *filter,
169 struct rte_flow_error *error)
171 const struct rte_flow_item *item;
172 const struct rte_flow_action *act;
173 const struct rte_flow_item_ipv4 *ipv4_spec;
174 const struct rte_flow_item_ipv4 *ipv4_mask;
175 const struct rte_flow_item_tcp *tcp_spec;
176 const struct rte_flow_item_tcp *tcp_mask;
177 const struct rte_flow_item_udp *udp_spec;
178 const struct rte_flow_item_udp *udp_mask;
179 const struct rte_flow_item_sctp *sctp_spec;
180 const struct rte_flow_item_sctp *sctp_mask;
181 const struct rte_flow_item_eth *eth_spec;
182 const struct rte_flow_item_eth *eth_mask;
183 const struct rte_flow_item_vlan *vlan_spec;
184 const struct rte_flow_item_vlan *vlan_mask;
185 struct rte_flow_item_eth eth_null;
186 struct rte_flow_item_vlan vlan_null;
189 rte_flow_error_set(error,
190 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
191 NULL, "NULL pattern.");
196 rte_flow_error_set(error, EINVAL,
197 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
198 NULL, "NULL action.");
202 rte_flow_error_set(error, EINVAL,
203 RTE_FLOW_ERROR_TYPE_ATTR,
204 NULL, "NULL attribute.");
208 memset(ð_null, 0, sizeof(struct rte_flow_item_eth));
209 memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan));
211 #ifdef RTE_LIBRTE_SECURITY
213 * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY
215 act = next_no_void_action(actions, NULL);
216 if (act->type == RTE_FLOW_ACTION_TYPE_SECURITY) {
217 const void *conf = act->conf;
218 /* check if the next not void item is END */
219 act = next_no_void_action(actions, act);
220 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
221 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
222 rte_flow_error_set(error, EINVAL,
223 RTE_FLOW_ERROR_TYPE_ACTION,
224 act, "Not supported action.");
228 /* get the IP pattern*/
229 item = next_no_void_pattern(pattern, NULL);
230 while (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
231 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
233 item->type == RTE_FLOW_ITEM_TYPE_END) {
234 rte_flow_error_set(error, EINVAL,
235 RTE_FLOW_ERROR_TYPE_ITEM,
236 item, "IP pattern missing.");
239 item = next_no_void_pattern(pattern, item);
242 filter->proto = IPPROTO_ESP;
243 return ixgbe_crypto_add_ingress_sa_from_flow(conf, item->spec,
244 item->type == RTE_FLOW_ITEM_TYPE_IPV6);
248 /* the first not void item can be MAC or IPv4 */
249 item = next_no_void_pattern(pattern, NULL);
251 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
252 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
253 rte_flow_error_set(error, EINVAL,
254 RTE_FLOW_ERROR_TYPE_ITEM,
255 item, "Not supported by ntuple filter");
259 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
260 eth_spec = (const struct rte_flow_item_eth *)item->spec;
261 eth_mask = (const struct rte_flow_item_eth *)item->mask;
262 /*Not supported last point for range*/
264 rte_flow_error_set(error,
266 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
267 item, "Not supported last point for range");
271 /* if the first item is MAC, the content should be NULL */
272 if ((item->spec || item->mask) &&
273 (memcmp(eth_spec, ð_null,
274 sizeof(struct rte_flow_item_eth)) ||
275 memcmp(eth_mask, ð_null,
276 sizeof(struct rte_flow_item_eth)))) {
277 rte_flow_error_set(error, EINVAL,
278 RTE_FLOW_ERROR_TYPE_ITEM,
279 item, "Not supported by ntuple filter");
282 /* check if the next not void item is IPv4 or Vlan */
283 item = next_no_void_pattern(pattern, item);
284 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
285 item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
286 rte_flow_error_set(error,
287 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
288 item, "Not supported by ntuple filter");
293 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
294 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
295 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
296 /*Not supported last point for range*/
298 rte_flow_error_set(error,
300 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
301 item, "Not supported last point for range");
304 /* the content should be NULL */
305 if ((item->spec || item->mask) &&
306 (memcmp(vlan_spec, &vlan_null,
307 sizeof(struct rte_flow_item_vlan)) ||
308 memcmp(vlan_mask, &vlan_null,
309 sizeof(struct rte_flow_item_vlan)))) {
311 rte_flow_error_set(error, EINVAL,
312 RTE_FLOW_ERROR_TYPE_ITEM,
313 item, "Not supported by ntuple filter");
316 /* check if the next not void item is IPv4 */
317 item = next_no_void_pattern(pattern, item);
318 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
319 rte_flow_error_set(error,
320 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
321 item, "Not supported by ntuple filter");
327 /* get the IPv4 info */
328 if (!item->spec || !item->mask) {
329 rte_flow_error_set(error, EINVAL,
330 RTE_FLOW_ERROR_TYPE_ITEM,
331 item, "Invalid ntuple mask");
334 /*Not supported last point for range*/
336 rte_flow_error_set(error, EINVAL,
337 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
338 item, "Not supported last point for range");
342 ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
344 * Only support src & dst addresses, protocol,
345 * others should be masked.
347 if (ipv4_mask->hdr.version_ihl ||
348 ipv4_mask->hdr.type_of_service ||
349 ipv4_mask->hdr.total_length ||
350 ipv4_mask->hdr.packet_id ||
351 ipv4_mask->hdr.fragment_offset ||
352 ipv4_mask->hdr.time_to_live ||
353 ipv4_mask->hdr.hdr_checksum) {
354 rte_flow_error_set(error,
355 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
356 item, "Not supported by ntuple filter");
360 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
361 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
362 filter->proto_mask = ipv4_mask->hdr.next_proto_id;
364 ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
365 filter->dst_ip = ipv4_spec->hdr.dst_addr;
366 filter->src_ip = ipv4_spec->hdr.src_addr;
367 filter->proto = ipv4_spec->hdr.next_proto_id;
370 /* check if the next not void item is TCP or UDP */
371 item = next_no_void_pattern(pattern, item);
372 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
373 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
374 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
375 item->type != RTE_FLOW_ITEM_TYPE_END) {
376 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
377 rte_flow_error_set(error, EINVAL,
378 RTE_FLOW_ERROR_TYPE_ITEM,
379 item, "Not supported by ntuple filter");
383 if ((item->type != RTE_FLOW_ITEM_TYPE_END) &&
384 (!item->spec && !item->mask)) {
388 /* get the TCP/UDP/SCTP info */
389 if (item->type != RTE_FLOW_ITEM_TYPE_END &&
390 (!item->spec || !item->mask)) {
391 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
392 rte_flow_error_set(error, EINVAL,
393 RTE_FLOW_ERROR_TYPE_ITEM,
394 item, "Invalid ntuple mask");
398 /*Not supported last point for range*/
400 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
401 rte_flow_error_set(error, EINVAL,
402 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
403 item, "Not supported last point for range");
408 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
409 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
412 * Only support src & dst ports, tcp flags,
413 * others should be masked.
415 if (tcp_mask->hdr.sent_seq ||
416 tcp_mask->hdr.recv_ack ||
417 tcp_mask->hdr.data_off ||
418 tcp_mask->hdr.rx_win ||
419 tcp_mask->hdr.cksum ||
420 tcp_mask->hdr.tcp_urp) {
422 sizeof(struct rte_eth_ntuple_filter));
423 rte_flow_error_set(error, EINVAL,
424 RTE_FLOW_ERROR_TYPE_ITEM,
425 item, "Not supported by ntuple filter");
429 filter->dst_port_mask = tcp_mask->hdr.dst_port;
430 filter->src_port_mask = tcp_mask->hdr.src_port;
431 if (tcp_mask->hdr.tcp_flags == 0xFF) {
432 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
433 } else if (!tcp_mask->hdr.tcp_flags) {
434 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
436 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
437 rte_flow_error_set(error, EINVAL,
438 RTE_FLOW_ERROR_TYPE_ITEM,
439 item, "Not supported by ntuple filter");
443 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
444 filter->dst_port = tcp_spec->hdr.dst_port;
445 filter->src_port = tcp_spec->hdr.src_port;
446 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
447 } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
448 udp_mask = (const struct rte_flow_item_udp *)item->mask;
451 * Only support src & dst ports,
452 * others should be masked.
454 if (udp_mask->hdr.dgram_len ||
455 udp_mask->hdr.dgram_cksum) {
457 sizeof(struct rte_eth_ntuple_filter));
458 rte_flow_error_set(error, EINVAL,
459 RTE_FLOW_ERROR_TYPE_ITEM,
460 item, "Not supported by ntuple filter");
464 filter->dst_port_mask = udp_mask->hdr.dst_port;
465 filter->src_port_mask = udp_mask->hdr.src_port;
467 udp_spec = (const struct rte_flow_item_udp *)item->spec;
468 filter->dst_port = udp_spec->hdr.dst_port;
469 filter->src_port = udp_spec->hdr.src_port;
470 } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
471 sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
474 * Only support src & dst ports,
475 * others should be masked.
477 if (sctp_mask->hdr.tag ||
478 sctp_mask->hdr.cksum) {
480 sizeof(struct rte_eth_ntuple_filter));
481 rte_flow_error_set(error, EINVAL,
482 RTE_FLOW_ERROR_TYPE_ITEM,
483 item, "Not supported by ntuple filter");
487 filter->dst_port_mask = sctp_mask->hdr.dst_port;
488 filter->src_port_mask = sctp_mask->hdr.src_port;
490 sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
491 filter->dst_port = sctp_spec->hdr.dst_port;
492 filter->src_port = sctp_spec->hdr.src_port;
497 /* check if the next not void item is END */
498 item = next_no_void_pattern(pattern, item);
499 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
500 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
501 rte_flow_error_set(error, EINVAL,
502 RTE_FLOW_ERROR_TYPE_ITEM,
503 item, "Not supported by ntuple filter");
510 * n-tuple only supports forwarding,
511 * check if the first not void action is QUEUE.
513 act = next_no_void_action(actions, NULL);
514 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
515 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
516 rte_flow_error_set(error, EINVAL,
517 RTE_FLOW_ERROR_TYPE_ACTION,
518 item, "Not supported action.");
522 ((const struct rte_flow_action_queue *)act->conf)->index;
524 /* check if the next not void item is END */
525 act = next_no_void_action(actions, act);
526 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
527 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
528 rte_flow_error_set(error, EINVAL,
529 RTE_FLOW_ERROR_TYPE_ACTION,
530 act, "Not supported action.");
535 /* must be input direction */
536 if (!attr->ingress) {
537 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
538 rte_flow_error_set(error, EINVAL,
539 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
540 attr, "Only support ingress.");
546 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
547 rte_flow_error_set(error, EINVAL,
548 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
549 attr, "Not support egress.");
553 if (attr->priority > 0xFFFF) {
554 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
555 rte_flow_error_set(error, EINVAL,
556 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
557 attr, "Error priority.");
560 filter->priority = (uint16_t)attr->priority;
561 if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
562 attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
563 filter->priority = 1;
568 /* a specific function for ixgbe because the flags is specific */
570 ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
571 const struct rte_flow_attr *attr,
572 const struct rte_flow_item pattern[],
573 const struct rte_flow_action actions[],
574 struct rte_eth_ntuple_filter *filter,
575 struct rte_flow_error *error)
578 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
580 MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
582 ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
587 #ifdef RTE_LIBRTE_SECURITY
588 /* ESP flow not really a flow*/
589 if (filter->proto == IPPROTO_ESP)
593 /* Ixgbe doesn't support tcp flags. */
594 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
595 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
596 rte_flow_error_set(error, EINVAL,
597 RTE_FLOW_ERROR_TYPE_ITEM,
598 NULL, "Not supported by ntuple filter");
602 /* Ixgbe doesn't support many priorities. */
603 if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
604 filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
605 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
606 rte_flow_error_set(error, EINVAL,
607 RTE_FLOW_ERROR_TYPE_ITEM,
608 NULL, "Priority not supported by ntuple filter");
612 if (filter->queue >= dev->data->nb_rx_queues)
615 /* fixed value for ixgbe */
616 filter->flags = RTE_5TUPLE_FLAGS;
621 * Parse the rule to see if it is a ethertype rule.
622 * And get the ethertype filter info BTW.
624 * The first not void item can be ETH.
625 * The next not void item must be END.
627 * The first not void action should be QUEUE.
628 * The next not void action should be END.
631 * ETH type 0x0807 0xFFFF
633 * other members in mask and spec should set to 0x00.
634 * item->last should be NULL.
637 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
638 const struct rte_flow_item *pattern,
639 const struct rte_flow_action *actions,
640 struct rte_eth_ethertype_filter *filter,
641 struct rte_flow_error *error)
643 const struct rte_flow_item *item;
644 const struct rte_flow_action *act;
645 const struct rte_flow_item_eth *eth_spec;
646 const struct rte_flow_item_eth *eth_mask;
647 const struct rte_flow_action_queue *act_q;
650 rte_flow_error_set(error, EINVAL,
651 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
652 NULL, "NULL pattern.");
657 rte_flow_error_set(error, EINVAL,
658 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
659 NULL, "NULL action.");
664 rte_flow_error_set(error, EINVAL,
665 RTE_FLOW_ERROR_TYPE_ATTR,
666 NULL, "NULL attribute.");
670 item = next_no_void_pattern(pattern, NULL);
671 /* The first non-void item should be MAC. */
672 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
673 rte_flow_error_set(error, EINVAL,
674 RTE_FLOW_ERROR_TYPE_ITEM,
675 item, "Not supported by ethertype filter");
679 /*Not supported last point for range*/
681 rte_flow_error_set(error, EINVAL,
682 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
683 item, "Not supported last point for range");
687 /* Get the MAC info. */
688 if (!item->spec || !item->mask) {
689 rte_flow_error_set(error, EINVAL,
690 RTE_FLOW_ERROR_TYPE_ITEM,
691 item, "Not supported by ethertype filter");
695 eth_spec = (const struct rte_flow_item_eth *)item->spec;
696 eth_mask = (const struct rte_flow_item_eth *)item->mask;
698 /* Mask bits of source MAC address must be full of 0.
699 * Mask bits of destination MAC address must be full
702 if (!is_zero_ether_addr(ð_mask->src) ||
703 (!is_zero_ether_addr(ð_mask->dst) &&
704 !is_broadcast_ether_addr(ð_mask->dst))) {
705 rte_flow_error_set(error, EINVAL,
706 RTE_FLOW_ERROR_TYPE_ITEM,
707 item, "Invalid ether address mask");
711 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
712 rte_flow_error_set(error, EINVAL,
713 RTE_FLOW_ERROR_TYPE_ITEM,
714 item, "Invalid ethertype mask");
718 /* If mask bits of destination MAC address
719 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
721 if (is_broadcast_ether_addr(ð_mask->dst)) {
722 filter->mac_addr = eth_spec->dst;
723 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
725 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
727 filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
729 /* Check if the next non-void item is END. */
730 item = next_no_void_pattern(pattern, item);
731 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
732 rte_flow_error_set(error, EINVAL,
733 RTE_FLOW_ERROR_TYPE_ITEM,
734 item, "Not supported by ethertype filter.");
740 act = next_no_void_action(actions, NULL);
741 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
742 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
743 rte_flow_error_set(error, EINVAL,
744 RTE_FLOW_ERROR_TYPE_ACTION,
745 act, "Not supported action.");
749 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
750 act_q = (const struct rte_flow_action_queue *)act->conf;
751 filter->queue = act_q->index;
753 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
756 /* Check if the next non-void item is END */
757 act = next_no_void_action(actions, act);
758 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
759 rte_flow_error_set(error, EINVAL,
760 RTE_FLOW_ERROR_TYPE_ACTION,
761 act, "Not supported action.");
766 /* Must be input direction */
767 if (!attr->ingress) {
768 rte_flow_error_set(error, EINVAL,
769 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
770 attr, "Only support ingress.");
776 rte_flow_error_set(error, EINVAL,
777 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
778 attr, "Not support egress.");
783 if (attr->priority) {
784 rte_flow_error_set(error, EINVAL,
785 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
786 attr, "Not support priority.");
792 rte_flow_error_set(error, EINVAL,
793 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
794 attr, "Not support group.");
802 ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
803 const struct rte_flow_attr *attr,
804 const struct rte_flow_item pattern[],
805 const struct rte_flow_action actions[],
806 struct rte_eth_ethertype_filter *filter,
807 struct rte_flow_error *error)
810 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
812 MAC_TYPE_FILTER_SUP(hw->mac.type);
814 ret = cons_parse_ethertype_filter(attr, pattern,
815 actions, filter, error);
820 /* Ixgbe doesn't support MAC address. */
821 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
822 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
823 rte_flow_error_set(error, EINVAL,
824 RTE_FLOW_ERROR_TYPE_ITEM,
825 NULL, "Not supported by ethertype filter");
829 if (filter->queue >= dev->data->nb_rx_queues) {
830 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
831 rte_flow_error_set(error, EINVAL,
832 RTE_FLOW_ERROR_TYPE_ITEM,
833 NULL, "queue index much too big");
837 if (filter->ether_type == ETHER_TYPE_IPv4 ||
838 filter->ether_type == ETHER_TYPE_IPv6) {
839 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
840 rte_flow_error_set(error, EINVAL,
841 RTE_FLOW_ERROR_TYPE_ITEM,
842 NULL, "IPv4/IPv6 not supported by ethertype filter");
846 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
847 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
848 rte_flow_error_set(error, EINVAL,
849 RTE_FLOW_ERROR_TYPE_ITEM,
850 NULL, "mac compare is unsupported");
854 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
855 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
856 rte_flow_error_set(error, EINVAL,
857 RTE_FLOW_ERROR_TYPE_ITEM,
858 NULL, "drop option is unsupported");
866 * Parse the rule to see if it is a TCP SYN rule.
867 * And get the TCP SYN filter info BTW.
869 * The first not void item must be ETH.
870 * The second not void item must be IPV4 or IPV6.
871 * The third not void item must be TCP.
872 * The next not void item must be END.
874 * The first not void action should be QUEUE.
875 * The next not void action should be END.
879 * IPV4/IPV6 NULL NULL
880 * TCP tcp_flags 0x02 0xFF
882 * other members in mask and spec should set to 0x00.
883 * item->last should be NULL.
886 cons_parse_syn_filter(const struct rte_flow_attr *attr,
887 const struct rte_flow_item pattern[],
888 const struct rte_flow_action actions[],
889 struct rte_eth_syn_filter *filter,
890 struct rte_flow_error *error)
892 const struct rte_flow_item *item;
893 const struct rte_flow_action *act;
894 const struct rte_flow_item_tcp *tcp_spec;
895 const struct rte_flow_item_tcp *tcp_mask;
896 const struct rte_flow_action_queue *act_q;
899 rte_flow_error_set(error, EINVAL,
900 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
901 NULL, "NULL pattern.");
906 rte_flow_error_set(error, EINVAL,
907 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
908 NULL, "NULL action.");
913 rte_flow_error_set(error, EINVAL,
914 RTE_FLOW_ERROR_TYPE_ATTR,
915 NULL, "NULL attribute.");
920 /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
921 item = next_no_void_pattern(pattern, NULL);
922 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
923 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
924 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
925 item->type != RTE_FLOW_ITEM_TYPE_TCP) {
926 rte_flow_error_set(error, EINVAL,
927 RTE_FLOW_ERROR_TYPE_ITEM,
928 item, "Not supported by syn filter");
931 /*Not supported last point for range*/
933 rte_flow_error_set(error, EINVAL,
934 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
935 item, "Not supported last point for range");
940 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
941 /* if the item is MAC, the content should be NULL */
942 if (item->spec || item->mask) {
943 rte_flow_error_set(error, EINVAL,
944 RTE_FLOW_ERROR_TYPE_ITEM,
945 item, "Invalid SYN address mask");
949 /* check if the next not void item is IPv4 or IPv6 */
950 item = next_no_void_pattern(pattern, item);
951 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
952 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
953 rte_flow_error_set(error, EINVAL,
954 RTE_FLOW_ERROR_TYPE_ITEM,
955 item, "Not supported by syn filter");
961 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
962 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
963 /* if the item is IP, the content should be NULL */
964 if (item->spec || item->mask) {
965 rte_flow_error_set(error, EINVAL,
966 RTE_FLOW_ERROR_TYPE_ITEM,
967 item, "Invalid SYN mask");
971 /* check if the next not void item is TCP */
972 item = next_no_void_pattern(pattern, item);
973 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
974 rte_flow_error_set(error, EINVAL,
975 RTE_FLOW_ERROR_TYPE_ITEM,
976 item, "Not supported by syn filter");
981 /* Get the TCP info. Only support SYN. */
982 if (!item->spec || !item->mask) {
983 rte_flow_error_set(error, EINVAL,
984 RTE_FLOW_ERROR_TYPE_ITEM,
985 item, "Invalid SYN mask");
988 /*Not supported last point for range*/
990 rte_flow_error_set(error, EINVAL,
991 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
992 item, "Not supported last point for range");
996 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
997 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
998 if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
999 tcp_mask->hdr.src_port ||
1000 tcp_mask->hdr.dst_port ||
1001 tcp_mask->hdr.sent_seq ||
1002 tcp_mask->hdr.recv_ack ||
1003 tcp_mask->hdr.data_off ||
1004 tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
1005 tcp_mask->hdr.rx_win ||
1006 tcp_mask->hdr.cksum ||
1007 tcp_mask->hdr.tcp_urp) {
1008 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1009 rte_flow_error_set(error, EINVAL,
1010 RTE_FLOW_ERROR_TYPE_ITEM,
1011 item, "Not supported by syn filter");
1015 /* check if the next not void item is END */
1016 item = next_no_void_pattern(pattern, item);
1017 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1018 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1019 rte_flow_error_set(error, EINVAL,
1020 RTE_FLOW_ERROR_TYPE_ITEM,
1021 item, "Not supported by syn filter");
1025 /* check if the first not void action is QUEUE. */
1026 act = next_no_void_action(actions, NULL);
1027 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1028 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1029 rte_flow_error_set(error, EINVAL,
1030 RTE_FLOW_ERROR_TYPE_ACTION,
1031 act, "Not supported action.");
1035 act_q = (const struct rte_flow_action_queue *)act->conf;
1036 filter->queue = act_q->index;
1037 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
1038 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1039 rte_flow_error_set(error, EINVAL,
1040 RTE_FLOW_ERROR_TYPE_ACTION,
1041 act, "Not supported action.");
1045 /* check if the next not void item is END */
1046 act = next_no_void_action(actions, act);
1047 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1048 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1049 rte_flow_error_set(error, EINVAL,
1050 RTE_FLOW_ERROR_TYPE_ACTION,
1051 act, "Not supported action.");
1056 /* must be input direction */
1057 if (!attr->ingress) {
1058 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1059 rte_flow_error_set(error, EINVAL,
1060 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1061 attr, "Only support ingress.");
1067 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1068 rte_flow_error_set(error, EINVAL,
1069 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1070 attr, "Not support egress.");
1074 /* Support 2 priorities, the lowest or highest. */
1075 if (!attr->priority) {
1076 filter->hig_pri = 0;
1077 } else if (attr->priority == (uint32_t)~0U) {
1078 filter->hig_pri = 1;
1080 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1081 rte_flow_error_set(error, EINVAL,
1082 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1083 attr, "Not support priority.");
1091 ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
1092 const struct rte_flow_attr *attr,
1093 const struct rte_flow_item pattern[],
1094 const struct rte_flow_action actions[],
1095 struct rte_eth_syn_filter *filter,
1096 struct rte_flow_error *error)
1099 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1101 MAC_TYPE_FILTER_SUP(hw->mac.type);
1103 ret = cons_parse_syn_filter(attr, pattern,
1104 actions, filter, error);
1106 if (filter->queue >= dev->data->nb_rx_queues)
1116 * Parse the rule to see if it is a L2 tunnel rule.
1117 * And get the L2 tunnel filter info BTW.
1118 * Only support E-tag now.
1120 * The first not void item can be E_TAG.
1121 * The next not void item must be END.
1123 * The first not void action should be VF or PF.
1124 * The next not void action should be END.
1128 e_cid_base 0x309 0xFFF
1130 * other members in mask and spec should set to 0x00.
1131 * item->last should be NULL.
1134 cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
1135 const struct rte_flow_attr *attr,
1136 const struct rte_flow_item pattern[],
1137 const struct rte_flow_action actions[],
1138 struct rte_eth_l2_tunnel_conf *filter,
1139 struct rte_flow_error *error)
1141 const struct rte_flow_item *item;
1142 const struct rte_flow_item_e_tag *e_tag_spec;
1143 const struct rte_flow_item_e_tag *e_tag_mask;
1144 const struct rte_flow_action *act;
1145 const struct rte_flow_action_vf *act_vf;
1146 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1149 rte_flow_error_set(error, EINVAL,
1150 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1151 NULL, "NULL pattern.");
1156 rte_flow_error_set(error, EINVAL,
1157 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1158 NULL, "NULL action.");
1163 rte_flow_error_set(error, EINVAL,
1164 RTE_FLOW_ERROR_TYPE_ATTR,
1165 NULL, "NULL attribute.");
1169 /* The first not void item should be e-tag. */
1170 item = next_no_void_pattern(pattern, NULL);
1171 if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1172 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1173 rte_flow_error_set(error, EINVAL,
1174 RTE_FLOW_ERROR_TYPE_ITEM,
1175 item, "Not supported by L2 tunnel filter");
1179 if (!item->spec || !item->mask) {
1180 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1181 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1182 item, "Not supported by L2 tunnel filter");
1186 /*Not supported last point for range*/
1188 rte_flow_error_set(error, EINVAL,
1189 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1190 item, "Not supported last point for range");
1194 e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
1195 e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
1197 /* Only care about GRP and E cid base. */
1198 if (e_tag_mask->epcp_edei_in_ecid_b ||
1199 e_tag_mask->in_ecid_e ||
1200 e_tag_mask->ecid_e ||
1201 e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1202 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1203 rte_flow_error_set(error, EINVAL,
1204 RTE_FLOW_ERROR_TYPE_ITEM,
1205 item, "Not supported by L2 tunnel filter");
1209 filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1211 * grp and e_cid_base are bit fields and only use 14 bits.
1212 * e-tag id is taken as little endian by HW.
1214 filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1216 /* check if the next not void item is END */
1217 item = next_no_void_pattern(pattern, item);
1218 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1219 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1220 rte_flow_error_set(error, EINVAL,
1221 RTE_FLOW_ERROR_TYPE_ITEM,
1222 item, "Not supported by L2 tunnel filter");
1227 /* must be input direction */
1228 if (!attr->ingress) {
1229 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1230 rte_flow_error_set(error, EINVAL,
1231 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1232 attr, "Only support ingress.");
1238 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1239 rte_flow_error_set(error, EINVAL,
1240 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1241 attr, "Not support egress.");
1246 if (attr->priority) {
1247 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1248 rte_flow_error_set(error, EINVAL,
1249 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1250 attr, "Not support priority.");
1254 /* check if the first not void action is VF or PF. */
1255 act = next_no_void_action(actions, NULL);
1256 if (act->type != RTE_FLOW_ACTION_TYPE_VF &&
1257 act->type != RTE_FLOW_ACTION_TYPE_PF) {
1258 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1259 rte_flow_error_set(error, EINVAL,
1260 RTE_FLOW_ERROR_TYPE_ACTION,
1261 act, "Not supported action.");
1265 if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
1266 act_vf = (const struct rte_flow_action_vf *)act->conf;
1267 filter->pool = act_vf->id;
1269 filter->pool = pci_dev->max_vfs;
1272 /* check if the next not void item is END */
1273 act = next_no_void_action(actions, act);
1274 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1275 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1276 rte_flow_error_set(error, EINVAL,
1277 RTE_FLOW_ERROR_TYPE_ACTION,
1278 act, "Not supported action.");
1286 ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1287 const struct rte_flow_attr *attr,
1288 const struct rte_flow_item pattern[],
1289 const struct rte_flow_action actions[],
1290 struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1291 struct rte_flow_error *error)
1294 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1295 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1298 ret = cons_parse_l2_tn_filter(dev, attr, pattern,
1299 actions, l2_tn_filter, error);
1301 if (hw->mac.type != ixgbe_mac_X550 &&
1302 hw->mac.type != ixgbe_mac_X550EM_x &&
1303 hw->mac.type != ixgbe_mac_X550EM_a) {
1304 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1305 rte_flow_error_set(error, EINVAL,
1306 RTE_FLOW_ERROR_TYPE_ITEM,
1307 NULL, "Not supported by L2 tunnel filter");
1311 vf_num = pci_dev->max_vfs;
1313 if (l2_tn_filter->pool > vf_num)
1319 /* Parse to get the attr and action info of flow director rule. */
1321 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1322 const struct rte_flow_action actions[],
1323 struct ixgbe_fdir_rule *rule,
1324 struct rte_flow_error *error)
1326 const struct rte_flow_action *act;
1327 const struct rte_flow_action_queue *act_q;
1328 const struct rte_flow_action_mark *mark;
1331 /* must be input direction */
1332 if (!attr->ingress) {
1333 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1334 rte_flow_error_set(error, EINVAL,
1335 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1336 attr, "Only support ingress.");
1342 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1343 rte_flow_error_set(error, EINVAL,
1344 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1345 attr, "Not support egress.");
1350 if (attr->priority) {
1351 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1352 rte_flow_error_set(error, EINVAL,
1353 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1354 attr, "Not support priority.");
1358 /* check if the first not void action is QUEUE or DROP. */
1359 act = next_no_void_action(actions, NULL);
1360 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1361 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1362 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1363 rte_flow_error_set(error, EINVAL,
1364 RTE_FLOW_ERROR_TYPE_ACTION,
1365 act, "Not supported action.");
1369 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1370 act_q = (const struct rte_flow_action_queue *)act->conf;
1371 rule->queue = act_q->index;
1373 /* signature mode does not support drop action. */
1374 if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1375 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1376 rte_flow_error_set(error, EINVAL,
1377 RTE_FLOW_ERROR_TYPE_ACTION,
1378 act, "Not supported action.");
1381 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1384 /* check if the next not void item is MARK */
1385 act = next_no_void_action(actions, act);
1386 if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1387 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1388 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1389 rte_flow_error_set(error, EINVAL,
1390 RTE_FLOW_ERROR_TYPE_ACTION,
1391 act, "Not supported action.");
1397 if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1398 mark = (const struct rte_flow_action_mark *)act->conf;
1399 rule->soft_id = mark->id;
1400 act = next_no_void_action(actions, act);
1403 /* check if the next not void item is END */
1404 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1405 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1406 rte_flow_error_set(error, EINVAL,
1407 RTE_FLOW_ERROR_TYPE_ACTION,
1408 act, "Not supported action.");
1415 /* search next no void pattern and skip fuzzy */
1417 const struct rte_flow_item *next_no_fuzzy_pattern(
1418 const struct rte_flow_item pattern[],
1419 const struct rte_flow_item *cur)
1421 const struct rte_flow_item *next =
1422 next_no_void_pattern(pattern, cur);
1424 if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
1426 next = next_no_void_pattern(pattern, next);
1430 static inline uint8_t signature_match(const struct rte_flow_item pattern[])
1432 const struct rte_flow_item_fuzzy *spec, *last, *mask;
1433 const struct rte_flow_item *item;
1434 uint32_t sh, lh, mh;
1439 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1442 if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
1444 (const struct rte_flow_item_fuzzy *)item->spec;
1446 (const struct rte_flow_item_fuzzy *)item->last;
1448 (const struct rte_flow_item_fuzzy *)item->mask;
1477 * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1478 * And get the flow director filter info BTW.
1479 * UDP/TCP/SCTP PATTERN:
1480 * The first not void item can be ETH or IPV4 or IPV6
1481 * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1482 * The next not void item could be UDP or TCP or SCTP (optional)
1483 * The next not void item could be RAW (for flexbyte, optional)
1484 * The next not void item must be END.
1485 * A Fuzzy Match pattern can appear at any place before END.
1486 * Fuzzy Match is optional for IPV4 but is required for IPV6
1488 * The first not void item must be ETH.
1489 * The second not void item must be MAC VLAN.
1490 * The next not void item must be END.
1492 * The first not void action should be QUEUE or DROP.
1493 * The second not void optional action should be MARK,
1494 * mark_id is a uint32_t number.
1495 * The next not void action should be END.
1496 * UDP/TCP/SCTP pattern example:
1499 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
1500 * dst_addr 192.167.3.50 0xFFFFFFFF
1501 * UDP/TCP/SCTP src_port 80 0xFFFF
1502 * dst_port 80 0xFFFF
1503 * FLEX relative 0 0x1
1506 * offset 12 0xFFFFFFFF
1509 * pattern[0] 0x86 0xFF
1510 * pattern[1] 0xDD 0xFF
1512 * MAC VLAN pattern example:
1515 {0xAC, 0x7B, 0xA1, {0xFF, 0xFF, 0xFF,
1516 0x2C, 0x6D, 0x36} 0xFF, 0xFF, 0xFF}
1517 * MAC VLAN tci 0x2016 0xEFFF
1519 * Other members in mask and spec should set to 0x00.
1520 * Item->last should be NULL.
1523 ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
1524 const struct rte_flow_attr *attr,
1525 const struct rte_flow_item pattern[],
1526 const struct rte_flow_action actions[],
1527 struct ixgbe_fdir_rule *rule,
1528 struct rte_flow_error *error)
1530 const struct rte_flow_item *item;
1531 const struct rte_flow_item_eth *eth_spec;
1532 const struct rte_flow_item_eth *eth_mask;
1533 const struct rte_flow_item_ipv4 *ipv4_spec;
1534 const struct rte_flow_item_ipv4 *ipv4_mask;
1535 const struct rte_flow_item_ipv6 *ipv6_spec;
1536 const struct rte_flow_item_ipv6 *ipv6_mask;
1537 const struct rte_flow_item_tcp *tcp_spec;
1538 const struct rte_flow_item_tcp *tcp_mask;
1539 const struct rte_flow_item_udp *udp_spec;
1540 const struct rte_flow_item_udp *udp_mask;
1541 const struct rte_flow_item_sctp *sctp_spec;
1542 const struct rte_flow_item_sctp *sctp_mask;
1543 const struct rte_flow_item_vlan *vlan_spec;
1544 const struct rte_flow_item_vlan *vlan_mask;
1545 const struct rte_flow_item_raw *raw_mask;
1546 const struct rte_flow_item_raw *raw_spec;
1549 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1552 rte_flow_error_set(error, EINVAL,
1553 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1554 NULL, "NULL pattern.");
1559 rte_flow_error_set(error, EINVAL,
1560 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1561 NULL, "NULL action.");
1566 rte_flow_error_set(error, EINVAL,
1567 RTE_FLOW_ERROR_TYPE_ATTR,
1568 NULL, "NULL attribute.");
1573 * Some fields may not be provided. Set spec to 0 and mask to default
1574 * value. So, we need not do anything for the not provided fields later.
1576 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1577 memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1578 rule->mask.vlan_tci_mask = 0;
1579 rule->mask.flex_bytes_mask = 0;
1582 * The first not void item should be
1583 * MAC or IPv4 or TCP or UDP or SCTP.
1585 item = next_no_fuzzy_pattern(pattern, NULL);
1586 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1587 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1588 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1589 item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1590 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1591 item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1592 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1593 rte_flow_error_set(error, EINVAL,
1594 RTE_FLOW_ERROR_TYPE_ITEM,
1595 item, "Not supported by fdir filter");
1599 if (signature_match(pattern))
1600 rule->mode = RTE_FDIR_MODE_SIGNATURE;
1602 rule->mode = RTE_FDIR_MODE_PERFECT;
1604 /*Not supported last point for range*/
1606 rte_flow_error_set(error, EINVAL,
1607 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1608 item, "Not supported last point for range");
1612 /* Get the MAC info. */
1613 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1615 * Only support vlan and dst MAC address,
1616 * others should be masked.
1618 if (item->spec && !item->mask) {
1619 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1620 rte_flow_error_set(error, EINVAL,
1621 RTE_FLOW_ERROR_TYPE_ITEM,
1622 item, "Not supported by fdir filter");
1627 rule->b_spec = TRUE;
1628 eth_spec = (const struct rte_flow_item_eth *)item->spec;
1630 /* Get the dst MAC. */
1631 for (j = 0; j < ETHER_ADDR_LEN; j++) {
1632 rule->ixgbe_fdir.formatted.inner_mac[j] =
1633 eth_spec->dst.addr_bytes[j];
1640 rule->b_mask = TRUE;
1641 eth_mask = (const struct rte_flow_item_eth *)item->mask;
1643 /* Ether type should be masked. */
1644 if (eth_mask->type ||
1645 rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1646 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1647 rte_flow_error_set(error, EINVAL,
1648 RTE_FLOW_ERROR_TYPE_ITEM,
1649 item, "Not supported by fdir filter");
1653 /* If ethernet has meaning, it means MAC VLAN mode. */
1654 rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1657 * src MAC address must be masked,
1658 * and don't support dst MAC address mask.
1660 for (j = 0; j < ETHER_ADDR_LEN; j++) {
1661 if (eth_mask->src.addr_bytes[j] ||
1662 eth_mask->dst.addr_bytes[j] != 0xFF) {
1664 sizeof(struct ixgbe_fdir_rule));
1665 rte_flow_error_set(error, EINVAL,
1666 RTE_FLOW_ERROR_TYPE_ITEM,
1667 item, "Not supported by fdir filter");
1672 /* When no VLAN, considered as full mask. */
1673 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1675 /*** If both spec and mask are item,
1676 * it means don't care about ETH.
1681 * Check if the next not void item is vlan or ipv4.
1682 * IPv6 is not supported.
1684 item = next_no_fuzzy_pattern(pattern, item);
1685 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1686 if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1687 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1688 rte_flow_error_set(error, EINVAL,
1689 RTE_FLOW_ERROR_TYPE_ITEM,
1690 item, "Not supported by fdir filter");
1694 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
1695 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1696 rte_flow_error_set(error, EINVAL,
1697 RTE_FLOW_ERROR_TYPE_ITEM,
1698 item, "Not supported by fdir filter");
1704 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1705 if (!(item->spec && item->mask)) {
1706 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1707 rte_flow_error_set(error, EINVAL,
1708 RTE_FLOW_ERROR_TYPE_ITEM,
1709 item, "Not supported by fdir filter");
1713 /*Not supported last point for range*/
1715 rte_flow_error_set(error, EINVAL,
1716 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1717 item, "Not supported last point for range");
1721 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
1722 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
1724 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1726 rule->mask.vlan_tci_mask = vlan_mask->tci;
1727 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
1728 /* More than one tags are not supported. */
1730 /* Next not void item must be END */
1731 item = next_no_fuzzy_pattern(pattern, item);
1732 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1733 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1734 rte_flow_error_set(error, EINVAL,
1735 RTE_FLOW_ERROR_TYPE_ITEM,
1736 item, "Not supported by fdir filter");
1741 /* Get the IPV4 info. */
1742 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1744 * Set the flow type even if there's no content
1745 * as we must have a flow type.
1747 rule->ixgbe_fdir.formatted.flow_type =
1748 IXGBE_ATR_FLOW_TYPE_IPV4;
1749 /*Not supported last point for range*/
1751 rte_flow_error_set(error, EINVAL,
1752 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1753 item, "Not supported last point for range");
1757 * Only care about src & dst addresses,
1758 * others should be masked.
1761 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1762 rte_flow_error_set(error, EINVAL,
1763 RTE_FLOW_ERROR_TYPE_ITEM,
1764 item, "Not supported by fdir filter");
1767 rule->b_mask = TRUE;
1769 (const struct rte_flow_item_ipv4 *)item->mask;
1770 if (ipv4_mask->hdr.version_ihl ||
1771 ipv4_mask->hdr.type_of_service ||
1772 ipv4_mask->hdr.total_length ||
1773 ipv4_mask->hdr.packet_id ||
1774 ipv4_mask->hdr.fragment_offset ||
1775 ipv4_mask->hdr.time_to_live ||
1776 ipv4_mask->hdr.next_proto_id ||
1777 ipv4_mask->hdr.hdr_checksum) {
1778 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1779 rte_flow_error_set(error, EINVAL,
1780 RTE_FLOW_ERROR_TYPE_ITEM,
1781 item, "Not supported by fdir filter");
1784 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1785 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1788 rule->b_spec = TRUE;
1790 (const struct rte_flow_item_ipv4 *)item->spec;
1791 rule->ixgbe_fdir.formatted.dst_ip[0] =
1792 ipv4_spec->hdr.dst_addr;
1793 rule->ixgbe_fdir.formatted.src_ip[0] =
1794 ipv4_spec->hdr.src_addr;
1798 * Check if the next not void item is
1799 * TCP or UDP or SCTP or END.
1801 item = next_no_fuzzy_pattern(pattern, item);
1802 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1803 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1804 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1805 item->type != RTE_FLOW_ITEM_TYPE_END &&
1806 item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1807 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1808 rte_flow_error_set(error, EINVAL,
1809 RTE_FLOW_ERROR_TYPE_ITEM,
1810 item, "Not supported by fdir filter");
1815 /* Get the IPV6 info. */
1816 if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1818 * Set the flow type even if there's no content
1819 * as we must have a flow type.
1821 rule->ixgbe_fdir.formatted.flow_type =
1822 IXGBE_ATR_FLOW_TYPE_IPV6;
1825 * 1. must signature match
1826 * 2. not support last
1827 * 3. mask must not null
1829 if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
1832 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1833 rte_flow_error_set(error, EINVAL,
1834 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1835 item, "Not supported last point for range");
1839 rule->b_mask = TRUE;
1841 (const struct rte_flow_item_ipv6 *)item->mask;
1842 if (ipv6_mask->hdr.vtc_flow ||
1843 ipv6_mask->hdr.payload_len ||
1844 ipv6_mask->hdr.proto ||
1845 ipv6_mask->hdr.hop_limits) {
1846 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1847 rte_flow_error_set(error, EINVAL,
1848 RTE_FLOW_ERROR_TYPE_ITEM,
1849 item, "Not supported by fdir filter");
1853 /* check src addr mask */
1854 for (j = 0; j < 16; j++) {
1855 if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
1856 rule->mask.src_ipv6_mask |= 1 << j;
1857 } else if (ipv6_mask->hdr.src_addr[j] != 0) {
1858 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1859 rte_flow_error_set(error, EINVAL,
1860 RTE_FLOW_ERROR_TYPE_ITEM,
1861 item, "Not supported by fdir filter");
1866 /* check dst addr mask */
1867 for (j = 0; j < 16; j++) {
1868 if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
1869 rule->mask.dst_ipv6_mask |= 1 << j;
1870 } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
1871 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1872 rte_flow_error_set(error, EINVAL,
1873 RTE_FLOW_ERROR_TYPE_ITEM,
1874 item, "Not supported by fdir filter");
1880 rule->b_spec = TRUE;
1882 (const struct rte_flow_item_ipv6 *)item->spec;
1883 rte_memcpy(rule->ixgbe_fdir.formatted.src_ip,
1884 ipv6_spec->hdr.src_addr, 16);
1885 rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip,
1886 ipv6_spec->hdr.dst_addr, 16);
1890 * Check if the next not void item is
1891 * TCP or UDP or SCTP or END.
1893 item = next_no_fuzzy_pattern(pattern, item);
1894 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1895 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1896 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1897 item->type != RTE_FLOW_ITEM_TYPE_END &&
1898 item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1899 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1900 rte_flow_error_set(error, EINVAL,
1901 RTE_FLOW_ERROR_TYPE_ITEM,
1902 item, "Not supported by fdir filter");
1907 /* Get the TCP info. */
1908 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1910 * Set the flow type even if there's no content
1911 * as we must have a flow type.
1913 rule->ixgbe_fdir.formatted.flow_type |=
1914 IXGBE_ATR_L4TYPE_TCP;
1915 /*Not supported last point for range*/
1917 rte_flow_error_set(error, EINVAL,
1918 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1919 item, "Not supported last point for range");
1923 * Only care about src & dst ports,
1924 * others should be masked.
1927 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1928 rte_flow_error_set(error, EINVAL,
1929 RTE_FLOW_ERROR_TYPE_ITEM,
1930 item, "Not supported by fdir filter");
1933 rule->b_mask = TRUE;
1934 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1935 if (tcp_mask->hdr.sent_seq ||
1936 tcp_mask->hdr.recv_ack ||
1937 tcp_mask->hdr.data_off ||
1938 tcp_mask->hdr.tcp_flags ||
1939 tcp_mask->hdr.rx_win ||
1940 tcp_mask->hdr.cksum ||
1941 tcp_mask->hdr.tcp_urp) {
1942 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1943 rte_flow_error_set(error, EINVAL,
1944 RTE_FLOW_ERROR_TYPE_ITEM,
1945 item, "Not supported by fdir filter");
1948 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1949 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1952 rule->b_spec = TRUE;
1953 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1954 rule->ixgbe_fdir.formatted.src_port =
1955 tcp_spec->hdr.src_port;
1956 rule->ixgbe_fdir.formatted.dst_port =
1957 tcp_spec->hdr.dst_port;
1960 item = next_no_fuzzy_pattern(pattern, item);
1961 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1962 item->type != RTE_FLOW_ITEM_TYPE_END) {
1963 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1964 rte_flow_error_set(error, EINVAL,
1965 RTE_FLOW_ERROR_TYPE_ITEM,
1966 item, "Not supported by fdir filter");
1972 /* Get the UDP info */
1973 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1975 * Set the flow type even if there's no content
1976 * as we must have a flow type.
1978 rule->ixgbe_fdir.formatted.flow_type |=
1979 IXGBE_ATR_L4TYPE_UDP;
1980 /*Not supported last point for range*/
1982 rte_flow_error_set(error, EINVAL,
1983 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1984 item, "Not supported last point for range");
1988 * Only care about src & dst ports,
1989 * others should be masked.
1992 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1993 rte_flow_error_set(error, EINVAL,
1994 RTE_FLOW_ERROR_TYPE_ITEM,
1995 item, "Not supported by fdir filter");
1998 rule->b_mask = TRUE;
1999 udp_mask = (const struct rte_flow_item_udp *)item->mask;
2000 if (udp_mask->hdr.dgram_len ||
2001 udp_mask->hdr.dgram_cksum) {
2002 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2003 rte_flow_error_set(error, EINVAL,
2004 RTE_FLOW_ERROR_TYPE_ITEM,
2005 item, "Not supported by fdir filter");
2008 rule->mask.src_port_mask = udp_mask->hdr.src_port;
2009 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
2012 rule->b_spec = TRUE;
2013 udp_spec = (const struct rte_flow_item_udp *)item->spec;
2014 rule->ixgbe_fdir.formatted.src_port =
2015 udp_spec->hdr.src_port;
2016 rule->ixgbe_fdir.formatted.dst_port =
2017 udp_spec->hdr.dst_port;
2020 item = next_no_fuzzy_pattern(pattern, item);
2021 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
2022 item->type != RTE_FLOW_ITEM_TYPE_END) {
2023 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2024 rte_flow_error_set(error, EINVAL,
2025 RTE_FLOW_ERROR_TYPE_ITEM,
2026 item, "Not supported by fdir filter");
2032 /* Get the SCTP info */
2033 if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
2035 * Set the flow type even if there's no content
2036 * as we must have a flow type.
2038 rule->ixgbe_fdir.formatted.flow_type |=
2039 IXGBE_ATR_L4TYPE_SCTP;
2040 /*Not supported last point for range*/
2042 rte_flow_error_set(error, EINVAL,
2043 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2044 item, "Not supported last point for range");
2048 /* only x550 family only support sctp port */
2049 if (hw->mac.type == ixgbe_mac_X550 ||
2050 hw->mac.type == ixgbe_mac_X550EM_x ||
2051 hw->mac.type == ixgbe_mac_X550EM_a) {
2053 * Only care about src & dst ports,
2054 * others should be masked.
2057 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2058 rte_flow_error_set(error, EINVAL,
2059 RTE_FLOW_ERROR_TYPE_ITEM,
2060 item, "Not supported by fdir filter");
2063 rule->b_mask = TRUE;
2065 (const struct rte_flow_item_sctp *)item->mask;
2066 if (sctp_mask->hdr.tag ||
2067 sctp_mask->hdr.cksum) {
2068 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2069 rte_flow_error_set(error, EINVAL,
2070 RTE_FLOW_ERROR_TYPE_ITEM,
2071 item, "Not supported by fdir filter");
2074 rule->mask.src_port_mask = sctp_mask->hdr.src_port;
2075 rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
2078 rule->b_spec = TRUE;
2080 (const struct rte_flow_item_sctp *)item->spec;
2081 rule->ixgbe_fdir.formatted.src_port =
2082 sctp_spec->hdr.src_port;
2083 rule->ixgbe_fdir.formatted.dst_port =
2084 sctp_spec->hdr.dst_port;
2086 /* others even sctp port is not supported */
2089 (const struct rte_flow_item_sctp *)item->mask;
2091 (sctp_mask->hdr.src_port ||
2092 sctp_mask->hdr.dst_port ||
2093 sctp_mask->hdr.tag ||
2094 sctp_mask->hdr.cksum)) {
2095 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2096 rte_flow_error_set(error, EINVAL,
2097 RTE_FLOW_ERROR_TYPE_ITEM,
2098 item, "Not supported by fdir filter");
2103 item = next_no_fuzzy_pattern(pattern, item);
2104 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
2105 item->type != RTE_FLOW_ITEM_TYPE_END) {
2106 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2107 rte_flow_error_set(error, EINVAL,
2108 RTE_FLOW_ERROR_TYPE_ITEM,
2109 item, "Not supported by fdir filter");
2114 /* Get the flex byte info */
2115 if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
2116 /* Not supported last point for range*/
2118 rte_flow_error_set(error, EINVAL,
2119 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2120 item, "Not supported last point for range");
2123 /* mask should not be null */
2124 if (!item->mask || !item->spec) {
2125 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2126 rte_flow_error_set(error, EINVAL,
2127 RTE_FLOW_ERROR_TYPE_ITEM,
2128 item, "Not supported by fdir filter");
2132 raw_mask = (const struct rte_flow_item_raw *)item->mask;
2135 if (raw_mask->relative != 0x1 ||
2136 raw_mask->search != 0x1 ||
2137 raw_mask->reserved != 0x0 ||
2138 (uint32_t)raw_mask->offset != 0xffffffff ||
2139 raw_mask->limit != 0xffff ||
2140 raw_mask->length != 0xffff) {
2141 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2142 rte_flow_error_set(error, EINVAL,
2143 RTE_FLOW_ERROR_TYPE_ITEM,
2144 item, "Not supported by fdir filter");
2148 raw_spec = (const struct rte_flow_item_raw *)item->spec;
2151 if (raw_spec->relative != 0 ||
2152 raw_spec->search != 0 ||
2153 raw_spec->reserved != 0 ||
2154 raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
2155 raw_spec->offset % 2 ||
2156 raw_spec->limit != 0 ||
2157 raw_spec->length != 2 ||
2158 /* pattern can't be 0xffff */
2159 (raw_spec->pattern[0] == 0xff &&
2160 raw_spec->pattern[1] == 0xff)) {
2161 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2162 rte_flow_error_set(error, EINVAL,
2163 RTE_FLOW_ERROR_TYPE_ITEM,
2164 item, "Not supported by fdir filter");
2168 /* check pattern mask */
2169 if (raw_mask->pattern[0] != 0xff ||
2170 raw_mask->pattern[1] != 0xff) {
2171 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2172 rte_flow_error_set(error, EINVAL,
2173 RTE_FLOW_ERROR_TYPE_ITEM,
2174 item, "Not supported by fdir filter");
2178 rule->mask.flex_bytes_mask = 0xffff;
2179 rule->ixgbe_fdir.formatted.flex_bytes =
2180 (((uint16_t)raw_spec->pattern[1]) << 8) |
2181 raw_spec->pattern[0];
2182 rule->flex_bytes_offset = raw_spec->offset;
2185 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2186 /* check if the next not void item is END */
2187 item = next_no_fuzzy_pattern(pattern, item);
2188 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2189 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2190 rte_flow_error_set(error, EINVAL,
2191 RTE_FLOW_ERROR_TYPE_ITEM,
2192 item, "Not supported by fdir filter");
2197 return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2200 #define NVGRE_PROTOCOL 0x6558
2203 * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
2204 * And get the flow director filter info BTW.
2206 * The first not void item must be ETH.
2207 * The second not void item must be IPV4/ IPV6.
2208 * The third not void item must be NVGRE.
2209 * The next not void item must be END.
2211 * The first not void item must be ETH.
2212 * The second not void item must be IPV4/ IPV6.
2213 * The third not void item must be NVGRE.
2214 * The next not void item must be END.
2216 * The first not void action should be QUEUE or DROP.
2217 * The second not void optional action should be MARK,
2218 * mark_id is a uint32_t number.
2219 * The next not void action should be END.
2220 * VxLAN pattern example:
2223 * IPV4/IPV6 NULL NULL
2225 * VxLAN vni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
2226 * MAC VLAN tci 0x2016 0xEFFF
2228 * NEGRV pattern example:
2231 * IPV4/IPV6 NULL NULL
2232 * NVGRE protocol 0x6558 0xFFFF
2233 * tni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
2234 * MAC VLAN tci 0x2016 0xEFFF
2236 * other members in mask and spec should set to 0x00.
2237 * item->last should be NULL.
2240 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
2241 const struct rte_flow_item pattern[],
2242 const struct rte_flow_action actions[],
2243 struct ixgbe_fdir_rule *rule,
2244 struct rte_flow_error *error)
2246 const struct rte_flow_item *item;
2247 const struct rte_flow_item_vxlan *vxlan_spec;
2248 const struct rte_flow_item_vxlan *vxlan_mask;
2249 const struct rte_flow_item_nvgre *nvgre_spec;
2250 const struct rte_flow_item_nvgre *nvgre_mask;
2251 const struct rte_flow_item_eth *eth_spec;
2252 const struct rte_flow_item_eth *eth_mask;
2253 const struct rte_flow_item_vlan *vlan_spec;
2254 const struct rte_flow_item_vlan *vlan_mask;
2258 rte_flow_error_set(error, EINVAL,
2259 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2260 NULL, "NULL pattern.");
2265 rte_flow_error_set(error, EINVAL,
2266 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2267 NULL, "NULL action.");
2272 rte_flow_error_set(error, EINVAL,
2273 RTE_FLOW_ERROR_TYPE_ATTR,
2274 NULL, "NULL attribute.");
2279 * Some fields may not be provided. Set spec to 0 and mask to default
2280 * value. So, we need not do anything for the not provided fields later.
2282 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2283 memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
2284 rule->mask.vlan_tci_mask = 0;
2287 * The first not void item should be
2288 * MAC or IPv4 or IPv6 or UDP or VxLAN.
2290 item = next_no_void_pattern(pattern, NULL);
2291 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
2292 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2293 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
2294 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2295 item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
2296 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2297 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2298 rte_flow_error_set(error, EINVAL,
2299 RTE_FLOW_ERROR_TYPE_ITEM,
2300 item, "Not supported by fdir filter");
2304 rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
2307 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
2308 /* Only used to describe the protocol stack. */
2309 if (item->spec || item->mask) {
2310 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2311 rte_flow_error_set(error, EINVAL,
2312 RTE_FLOW_ERROR_TYPE_ITEM,
2313 item, "Not supported by fdir filter");
2316 /* Not supported last point for range*/
2318 rte_flow_error_set(error, EINVAL,
2319 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2320 item, "Not supported last point for range");
2324 /* Check if the next not void item is IPv4 or IPv6. */
2325 item = next_no_void_pattern(pattern, item);
2326 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2327 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2328 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2329 rte_flow_error_set(error, EINVAL,
2330 RTE_FLOW_ERROR_TYPE_ITEM,
2331 item, "Not supported by fdir filter");
2337 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2338 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2339 /* Only used to describe the protocol stack. */
2340 if (item->spec || item->mask) {
2341 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2342 rte_flow_error_set(error, EINVAL,
2343 RTE_FLOW_ERROR_TYPE_ITEM,
2344 item, "Not supported by fdir filter");
2347 /*Not supported last point for range*/
2349 rte_flow_error_set(error, EINVAL,
2350 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2351 item, "Not supported last point for range");
2355 /* Check if the next not void item is UDP or NVGRE. */
2356 item = next_no_void_pattern(pattern, item);
2357 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2358 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2359 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2360 rte_flow_error_set(error, EINVAL,
2361 RTE_FLOW_ERROR_TYPE_ITEM,
2362 item, "Not supported by fdir filter");
2368 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2369 /* Only used to describe the protocol stack. */
2370 if (item->spec || item->mask) {
2371 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2372 rte_flow_error_set(error, EINVAL,
2373 RTE_FLOW_ERROR_TYPE_ITEM,
2374 item, "Not supported by fdir filter");
2377 /*Not supported last point for range*/
2379 rte_flow_error_set(error, EINVAL,
2380 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2381 item, "Not supported last point for range");
2385 /* Check if the next not void item is VxLAN. */
2386 item = next_no_void_pattern(pattern, item);
2387 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2388 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2389 rte_flow_error_set(error, EINVAL,
2390 RTE_FLOW_ERROR_TYPE_ITEM,
2391 item, "Not supported by fdir filter");
2396 /* Get the VxLAN info */
2397 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2398 rule->ixgbe_fdir.formatted.tunnel_type =
2399 RTE_FDIR_TUNNEL_TYPE_VXLAN;
2401 /* Only care about VNI, others should be masked. */
2403 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2404 rte_flow_error_set(error, EINVAL,
2405 RTE_FLOW_ERROR_TYPE_ITEM,
2406 item, "Not supported by fdir filter");
2409 /*Not supported last point for range*/
2411 rte_flow_error_set(error, EINVAL,
2412 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2413 item, "Not supported last point for range");
2416 rule->b_mask = TRUE;
2418 /* Tunnel type is always meaningful. */
2419 rule->mask.tunnel_type_mask = 1;
2422 (const struct rte_flow_item_vxlan *)item->mask;
2423 if (vxlan_mask->flags) {
2424 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2425 rte_flow_error_set(error, EINVAL,
2426 RTE_FLOW_ERROR_TYPE_ITEM,
2427 item, "Not supported by fdir filter");
2430 /* VNI must be totally masked or not. */
2431 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2432 vxlan_mask->vni[2]) &&
2433 ((vxlan_mask->vni[0] != 0xFF) ||
2434 (vxlan_mask->vni[1] != 0xFF) ||
2435 (vxlan_mask->vni[2] != 0xFF))) {
2436 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2437 rte_flow_error_set(error, EINVAL,
2438 RTE_FLOW_ERROR_TYPE_ITEM,
2439 item, "Not supported by fdir filter");
2443 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2444 RTE_DIM(vxlan_mask->vni));
2447 rule->b_spec = TRUE;
2448 vxlan_spec = (const struct rte_flow_item_vxlan *)
2450 rte_memcpy(((uint8_t *)
2451 &rule->ixgbe_fdir.formatted.tni_vni + 1),
2452 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2453 rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
2454 rule->ixgbe_fdir.formatted.tni_vni);
2458 /* Get the NVGRE info */
2459 if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2460 rule->ixgbe_fdir.formatted.tunnel_type =
2461 RTE_FDIR_TUNNEL_TYPE_NVGRE;
2464 * Only care about flags0, flags1, protocol and TNI,
2465 * others should be masked.
2468 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2469 rte_flow_error_set(error, EINVAL,
2470 RTE_FLOW_ERROR_TYPE_ITEM,
2471 item, "Not supported by fdir filter");
2474 /*Not supported last point for range*/
2476 rte_flow_error_set(error, EINVAL,
2477 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2478 item, "Not supported last point for range");
2481 rule->b_mask = TRUE;
2483 /* Tunnel type is always meaningful. */
2484 rule->mask.tunnel_type_mask = 1;
2487 (const struct rte_flow_item_nvgre *)item->mask;
2488 if (nvgre_mask->flow_id) {
2489 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2490 rte_flow_error_set(error, EINVAL,
2491 RTE_FLOW_ERROR_TYPE_ITEM,
2492 item, "Not supported by fdir filter");
2495 if (nvgre_mask->protocol &&
2496 nvgre_mask->protocol != 0xFFFF) {
2497 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2498 rte_flow_error_set(error, EINVAL,
2499 RTE_FLOW_ERROR_TYPE_ITEM,
2500 item, "Not supported by fdir filter");
2503 if (nvgre_mask->c_k_s_rsvd0_ver &&
2504 nvgre_mask->c_k_s_rsvd0_ver !=
2505 rte_cpu_to_be_16(0xFFFF)) {
2506 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2507 rte_flow_error_set(error, EINVAL,
2508 RTE_FLOW_ERROR_TYPE_ITEM,
2509 item, "Not supported by fdir filter");
2512 /* TNI must be totally masked or not. */
2513 if (nvgre_mask->tni[0] &&
2514 ((nvgre_mask->tni[0] != 0xFF) ||
2515 (nvgre_mask->tni[1] != 0xFF) ||
2516 (nvgre_mask->tni[2] != 0xFF))) {
2517 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2518 rte_flow_error_set(error, EINVAL,
2519 RTE_FLOW_ERROR_TYPE_ITEM,
2520 item, "Not supported by fdir filter");
2523 /* tni is a 24-bits bit field */
2524 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2525 RTE_DIM(nvgre_mask->tni));
2526 rule->mask.tunnel_id_mask <<= 8;
2529 rule->b_spec = TRUE;
2531 (const struct rte_flow_item_nvgre *)item->spec;
2532 if (nvgre_spec->c_k_s_rsvd0_ver !=
2533 rte_cpu_to_be_16(0x2000) &&
2534 nvgre_mask->c_k_s_rsvd0_ver) {
2535 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2536 rte_flow_error_set(error, EINVAL,
2537 RTE_FLOW_ERROR_TYPE_ITEM,
2538 item, "Not supported by fdir filter");
2541 if (nvgre_mask->protocol &&
2542 nvgre_spec->protocol !=
2543 rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2544 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2545 rte_flow_error_set(error, EINVAL,
2546 RTE_FLOW_ERROR_TYPE_ITEM,
2547 item, "Not supported by fdir filter");
2550 /* tni is a 24-bits bit field */
2551 rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2552 nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2553 rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2557 /* check if the next not void item is MAC */
2558 item = next_no_void_pattern(pattern, item);
2559 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2560 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2561 rte_flow_error_set(error, EINVAL,
2562 RTE_FLOW_ERROR_TYPE_ITEM,
2563 item, "Not supported by fdir filter");
2568 * Only support vlan and dst MAC address,
2569 * others should be masked.
2573 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2574 rte_flow_error_set(error, EINVAL,
2575 RTE_FLOW_ERROR_TYPE_ITEM,
2576 item, "Not supported by fdir filter");
2579 /*Not supported last point for range*/
2581 rte_flow_error_set(error, EINVAL,
2582 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2583 item, "Not supported last point for range");
2586 rule->b_mask = TRUE;
2587 eth_mask = (const struct rte_flow_item_eth *)item->mask;
2589 /* Ether type should be masked. */
2590 if (eth_mask->type) {
2591 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2592 rte_flow_error_set(error, EINVAL,
2593 RTE_FLOW_ERROR_TYPE_ITEM,
2594 item, "Not supported by fdir filter");
2598 /* src MAC address should be masked. */
2599 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2600 if (eth_mask->src.addr_bytes[j]) {
2602 sizeof(struct ixgbe_fdir_rule));
2603 rte_flow_error_set(error, EINVAL,
2604 RTE_FLOW_ERROR_TYPE_ITEM,
2605 item, "Not supported by fdir filter");
2609 rule->mask.mac_addr_byte_mask = 0;
2610 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2611 /* It's a per byte mask. */
2612 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2613 rule->mask.mac_addr_byte_mask |= 0x1 << j;
2614 } else if (eth_mask->dst.addr_bytes[j]) {
2615 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2616 rte_flow_error_set(error, EINVAL,
2617 RTE_FLOW_ERROR_TYPE_ITEM,
2618 item, "Not supported by fdir filter");
2623 /* When no vlan, considered as full mask. */
2624 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2627 rule->b_spec = TRUE;
2628 eth_spec = (const struct rte_flow_item_eth *)item->spec;
2630 /* Get the dst MAC. */
2631 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2632 rule->ixgbe_fdir.formatted.inner_mac[j] =
2633 eth_spec->dst.addr_bytes[j];
2638 * Check if the next not void item is vlan or ipv4.
2639 * IPv6 is not supported.
2641 item = next_no_void_pattern(pattern, item);
2642 if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2643 (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
2644 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2645 rte_flow_error_set(error, EINVAL,
2646 RTE_FLOW_ERROR_TYPE_ITEM,
2647 item, "Not supported by fdir filter");
2650 /*Not supported last point for range*/
2652 rte_flow_error_set(error, EINVAL,
2653 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2654 item, "Not supported last point for range");
2658 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2659 if (!(item->spec && item->mask)) {
2660 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2661 rte_flow_error_set(error, EINVAL,
2662 RTE_FLOW_ERROR_TYPE_ITEM,
2663 item, "Not supported by fdir filter");
2667 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
2668 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
2670 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2672 rule->mask.vlan_tci_mask = vlan_mask->tci;
2673 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
2674 /* More than one tags are not supported. */
2676 /* check if the next not void item is END */
2677 item = next_no_void_pattern(pattern, item);
2679 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2680 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2681 rte_flow_error_set(error, EINVAL,
2682 RTE_FLOW_ERROR_TYPE_ITEM,
2683 item, "Not supported by fdir filter");
2689 * If the tags is 0, it means don't care about the VLAN.
2693 return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2697 ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2698 const struct rte_flow_attr *attr,
2699 const struct rte_flow_item pattern[],
2700 const struct rte_flow_action actions[],
2701 struct ixgbe_fdir_rule *rule,
2702 struct rte_flow_error *error)
2705 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2706 enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2708 if (hw->mac.type != ixgbe_mac_82599EB &&
2709 hw->mac.type != ixgbe_mac_X540 &&
2710 hw->mac.type != ixgbe_mac_X550 &&
2711 hw->mac.type != ixgbe_mac_X550EM_x &&
2712 hw->mac.type != ixgbe_mac_X550EM_a)
2715 ret = ixgbe_parse_fdir_filter_normal(dev, attr, pattern,
2716 actions, rule, error);
2721 ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2722 actions, rule, error);
2729 if (hw->mac.type == ixgbe_mac_82599EB &&
2730 rule->fdirflags == IXGBE_FDIRCMD_DROP &&
2731 (rule->ixgbe_fdir.formatted.src_port != 0 ||
2732 rule->ixgbe_fdir.formatted.dst_port != 0))
2735 if (fdir_mode == RTE_FDIR_MODE_NONE ||
2736 fdir_mode != rule->mode)
2739 if (rule->queue >= dev->data->nb_rx_queues)
2746 ixgbe_filterlist_init(void)
2748 TAILQ_INIT(&filter_ntuple_list);
2749 TAILQ_INIT(&filter_ethertype_list);
2750 TAILQ_INIT(&filter_syn_list);
2751 TAILQ_INIT(&filter_fdir_list);
2752 TAILQ_INIT(&filter_l2_tunnel_list);
2753 TAILQ_INIT(&ixgbe_flow_list);
2757 ixgbe_filterlist_flush(void)
2759 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2760 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2761 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2762 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2763 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2764 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2766 while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2767 TAILQ_REMOVE(&filter_ntuple_list,
2770 rte_free(ntuple_filter_ptr);
2773 while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2774 TAILQ_REMOVE(&filter_ethertype_list,
2775 ethertype_filter_ptr,
2777 rte_free(ethertype_filter_ptr);
2780 while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2781 TAILQ_REMOVE(&filter_syn_list,
2784 rte_free(syn_filter_ptr);
2787 while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2788 TAILQ_REMOVE(&filter_l2_tunnel_list,
2791 rte_free(l2_tn_filter_ptr);
2794 while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2795 TAILQ_REMOVE(&filter_fdir_list,
2798 rte_free(fdir_rule_ptr);
2801 while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
2802 TAILQ_REMOVE(&ixgbe_flow_list,
2805 rte_free(ixgbe_flow_mem_ptr->flow);
2806 rte_free(ixgbe_flow_mem_ptr);
2811 * Create or destroy a flow rule.
2812 * Theorically one rule can match more than one filters.
2813 * We will let it use the filter which it hitt first.
2814 * So, the sequence matters.
2816 static struct rte_flow *
2817 ixgbe_flow_create(struct rte_eth_dev *dev,
2818 const struct rte_flow_attr *attr,
2819 const struct rte_flow_item pattern[],
2820 const struct rte_flow_action actions[],
2821 struct rte_flow_error *error)
2824 struct rte_eth_ntuple_filter ntuple_filter;
2825 struct rte_eth_ethertype_filter ethertype_filter;
2826 struct rte_eth_syn_filter syn_filter;
2827 struct ixgbe_fdir_rule fdir_rule;
2828 struct rte_eth_l2_tunnel_conf l2_tn_filter;
2829 struct ixgbe_hw_fdir_info *fdir_info =
2830 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2831 struct rte_flow *flow = NULL;
2832 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2833 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2834 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2835 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2836 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2837 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2838 uint8_t first_mask = FALSE;
2840 flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
2842 PMD_DRV_LOG(ERR, "failed to allocate memory");
2843 return (struct rte_flow *)flow;
2845 ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
2846 sizeof(struct ixgbe_flow_mem), 0);
2847 if (!ixgbe_flow_mem_ptr) {
2848 PMD_DRV_LOG(ERR, "failed to allocate memory");
2852 ixgbe_flow_mem_ptr->flow = flow;
2853 TAILQ_INSERT_TAIL(&ixgbe_flow_list,
2854 ixgbe_flow_mem_ptr, entries);
2856 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2857 ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2858 actions, &ntuple_filter, error);
2860 #ifdef RTE_LIBRTE_SECURITY
2861 /* ESP flow not really a flow*/
2862 if (ntuple_filter.proto == IPPROTO_ESP)
2867 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2869 ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
2870 sizeof(struct ixgbe_ntuple_filter_ele), 0);
2871 if (!ntuple_filter_ptr) {
2872 PMD_DRV_LOG(ERR, "failed to allocate memory");
2875 rte_memcpy(&ntuple_filter_ptr->filter_info,
2877 sizeof(struct rte_eth_ntuple_filter));
2878 TAILQ_INSERT_TAIL(&filter_ntuple_list,
2879 ntuple_filter_ptr, entries);
2880 flow->rule = ntuple_filter_ptr;
2881 flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2887 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2888 ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2889 actions, ðertype_filter, error);
2891 ret = ixgbe_add_del_ethertype_filter(dev,
2892 ðertype_filter, TRUE);
2894 ethertype_filter_ptr = rte_zmalloc(
2895 "ixgbe_ethertype_filter",
2896 sizeof(struct ixgbe_ethertype_filter_ele), 0);
2897 if (!ethertype_filter_ptr) {
2898 PMD_DRV_LOG(ERR, "failed to allocate memory");
2901 rte_memcpy(ðertype_filter_ptr->filter_info,
2903 sizeof(struct rte_eth_ethertype_filter));
2904 TAILQ_INSERT_TAIL(&filter_ethertype_list,
2905 ethertype_filter_ptr, entries);
2906 flow->rule = ethertype_filter_ptr;
2907 flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2913 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2914 ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2915 actions, &syn_filter, error);
2917 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
2919 syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
2920 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
2921 if (!syn_filter_ptr) {
2922 PMD_DRV_LOG(ERR, "failed to allocate memory");
2925 rte_memcpy(&syn_filter_ptr->filter_info,
2927 sizeof(struct rte_eth_syn_filter));
2928 TAILQ_INSERT_TAIL(&filter_syn_list,
2931 flow->rule = syn_filter_ptr;
2932 flow->filter_type = RTE_ETH_FILTER_SYN;
2938 memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2939 ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2940 actions, &fdir_rule, error);
2942 /* A mask cannot be deleted. */
2943 if (fdir_rule.b_mask) {
2944 if (!fdir_info->mask_added) {
2945 /* It's the first time the mask is set. */
2946 rte_memcpy(&fdir_info->mask,
2948 sizeof(struct ixgbe_hw_fdir_mask));
2949 fdir_info->flex_bytes_offset =
2950 fdir_rule.flex_bytes_offset;
2952 if (fdir_rule.mask.flex_bytes_mask)
2953 ixgbe_fdir_set_flexbytes_offset(dev,
2954 fdir_rule.flex_bytes_offset);
2956 ret = ixgbe_fdir_set_input_mask(dev);
2960 fdir_info->mask_added = TRUE;
2964 * Only support one global mask,
2965 * all the masks should be the same.
2967 ret = memcmp(&fdir_info->mask,
2969 sizeof(struct ixgbe_hw_fdir_mask));
2973 if (fdir_info->flex_bytes_offset !=
2974 fdir_rule.flex_bytes_offset)
2979 if (fdir_rule.b_spec) {
2980 ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
2983 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
2984 sizeof(struct ixgbe_fdir_rule_ele), 0);
2985 if (!fdir_rule_ptr) {
2986 PMD_DRV_LOG(ERR, "failed to allocate memory");
2989 rte_memcpy(&fdir_rule_ptr->filter_info,
2991 sizeof(struct ixgbe_fdir_rule));
2992 TAILQ_INSERT_TAIL(&filter_fdir_list,
2993 fdir_rule_ptr, entries);
2994 flow->rule = fdir_rule_ptr;
2995 flow->filter_type = RTE_ETH_FILTER_FDIR;
3002 * clean the mask_added flag if fail to
3006 fdir_info->mask_added = FALSE;
3014 memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
3015 ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
3016 actions, &l2_tn_filter, error);
3018 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
3020 l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
3021 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
3022 if (!l2_tn_filter_ptr) {
3023 PMD_DRV_LOG(ERR, "failed to allocate memory");
3026 rte_memcpy(&l2_tn_filter_ptr->filter_info,
3028 sizeof(struct rte_eth_l2_tunnel_conf));
3029 TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
3030 l2_tn_filter_ptr, entries);
3031 flow->rule = l2_tn_filter_ptr;
3032 flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
3038 TAILQ_REMOVE(&ixgbe_flow_list,
3039 ixgbe_flow_mem_ptr, entries);
3040 rte_flow_error_set(error, -ret,
3041 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
3042 "Failed to create flow.");
3043 rte_free(ixgbe_flow_mem_ptr);
3049 * Check if the flow rule is supported by ixgbe.
3050 * It only checkes the format. Don't guarantee the rule can be programmed into
3051 * the HW. Because there can be no enough room for the rule.
3054 ixgbe_flow_validate(struct rte_eth_dev *dev,
3055 const struct rte_flow_attr *attr,
3056 const struct rte_flow_item pattern[],
3057 const struct rte_flow_action actions[],
3058 struct rte_flow_error *error)
3060 struct rte_eth_ntuple_filter ntuple_filter;
3061 struct rte_eth_ethertype_filter ethertype_filter;
3062 struct rte_eth_syn_filter syn_filter;
3063 struct rte_eth_l2_tunnel_conf l2_tn_filter;
3064 struct ixgbe_fdir_rule fdir_rule;
3067 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
3068 ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
3069 actions, &ntuple_filter, error);
3073 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
3074 ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
3075 actions, ðertype_filter, error);
3079 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
3080 ret = ixgbe_parse_syn_filter(dev, attr, pattern,
3081 actions, &syn_filter, error);
3085 memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
3086 ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
3087 actions, &fdir_rule, error);
3091 memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
3092 ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
3093 actions, &l2_tn_filter, error);
3098 /* Destroy a flow rule on ixgbe. */
3100 ixgbe_flow_destroy(struct rte_eth_dev *dev,
3101 struct rte_flow *flow,
3102 struct rte_flow_error *error)
3105 struct rte_flow *pmd_flow = flow;
3106 enum rte_filter_type filter_type = pmd_flow->filter_type;
3107 struct rte_eth_ntuple_filter ntuple_filter;
3108 struct rte_eth_ethertype_filter ethertype_filter;
3109 struct rte_eth_syn_filter syn_filter;
3110 struct ixgbe_fdir_rule fdir_rule;
3111 struct rte_eth_l2_tunnel_conf l2_tn_filter;
3112 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
3113 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
3114 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
3115 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
3116 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
3117 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
3118 struct ixgbe_hw_fdir_info *fdir_info =
3119 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
3121 switch (filter_type) {
3122 case RTE_ETH_FILTER_NTUPLE:
3123 ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
3125 rte_memcpy(&ntuple_filter,
3126 &ntuple_filter_ptr->filter_info,
3127 sizeof(struct rte_eth_ntuple_filter));
3128 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
3130 TAILQ_REMOVE(&filter_ntuple_list,
3131 ntuple_filter_ptr, entries);
3132 rte_free(ntuple_filter_ptr);
3135 case RTE_ETH_FILTER_ETHERTYPE:
3136 ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
3138 rte_memcpy(ðertype_filter,
3139 ðertype_filter_ptr->filter_info,
3140 sizeof(struct rte_eth_ethertype_filter));
3141 ret = ixgbe_add_del_ethertype_filter(dev,
3142 ðertype_filter, FALSE);
3144 TAILQ_REMOVE(&filter_ethertype_list,
3145 ethertype_filter_ptr, entries);
3146 rte_free(ethertype_filter_ptr);
3149 case RTE_ETH_FILTER_SYN:
3150 syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
3152 rte_memcpy(&syn_filter,
3153 &syn_filter_ptr->filter_info,
3154 sizeof(struct rte_eth_syn_filter));
3155 ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
3157 TAILQ_REMOVE(&filter_syn_list,
3158 syn_filter_ptr, entries);
3159 rte_free(syn_filter_ptr);
3162 case RTE_ETH_FILTER_FDIR:
3163 fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
3164 rte_memcpy(&fdir_rule,
3165 &fdir_rule_ptr->filter_info,
3166 sizeof(struct ixgbe_fdir_rule));
3167 ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
3169 TAILQ_REMOVE(&filter_fdir_list,
3170 fdir_rule_ptr, entries);
3171 rte_free(fdir_rule_ptr);
3172 if (TAILQ_EMPTY(&filter_fdir_list))
3173 fdir_info->mask_added = false;
3176 case RTE_ETH_FILTER_L2_TUNNEL:
3177 l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
3179 rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
3180 sizeof(struct rte_eth_l2_tunnel_conf));
3181 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
3183 TAILQ_REMOVE(&filter_l2_tunnel_list,
3184 l2_tn_filter_ptr, entries);
3185 rte_free(l2_tn_filter_ptr);
3189 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
3196 rte_flow_error_set(error, EINVAL,
3197 RTE_FLOW_ERROR_TYPE_HANDLE,
3198 NULL, "Failed to destroy flow");
3202 TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
3203 if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
3204 TAILQ_REMOVE(&ixgbe_flow_list,
3205 ixgbe_flow_mem_ptr, entries);
3206 rte_free(ixgbe_flow_mem_ptr);
3214 /* Destroy all flow rules associated with a port on ixgbe. */
3216 ixgbe_flow_flush(struct rte_eth_dev *dev,
3217 struct rte_flow_error *error)
3221 ixgbe_clear_all_ntuple_filter(dev);
3222 ixgbe_clear_all_ethertype_filter(dev);
3223 ixgbe_clear_syn_filter(dev);
3225 ret = ixgbe_clear_all_fdir_filter(dev);
3227 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3228 NULL, "Failed to flush rule");
3232 ret = ixgbe_clear_all_l2_tn_filter(dev);
3234 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3235 NULL, "Failed to flush rule");
3239 ixgbe_filterlist_flush();
3244 const struct rte_flow_ops ixgbe_flow_ops = {
3245 .validate = ixgbe_flow_validate,
3246 .create = ixgbe_flow_create,
3247 .destroy = ixgbe_flow_destroy,
3248 .flush = ixgbe_flow_flush,