1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
13 #include <netinet/in.h>
14 #include <rte_byteorder.h>
15 #include <rte_common.h>
16 #include <rte_cycles.h>
18 #include <rte_interrupts.h>
20 #include <rte_debug.h>
22 #include <rte_atomic.h>
23 #include <rte_branch_prediction.h>
24 #include <rte_memory.h>
26 #include <rte_alarm.h>
27 #include <rte_ether.h>
28 #include <rte_ethdev_driver.h>
29 #include <rte_malloc.h>
30 #include <rte_random.h>
32 #include <rte_hash_crc.h>
34 #include <rte_flow_driver.h>
36 #include "ixgbe_logs.h"
37 #include "base/ixgbe_api.h"
38 #include "base/ixgbe_vf.h"
39 #include "base/ixgbe_common.h"
40 #include "ixgbe_ethdev.h"
41 #include "ixgbe_bypass.h"
42 #include "ixgbe_rxtx.h"
43 #include "base/ixgbe_type.h"
44 #include "base/ixgbe_phy.h"
45 #include "rte_pmd_ixgbe.h"
48 #define IXGBE_MIN_N_TUPLE_PRIO 1
49 #define IXGBE_MAX_N_TUPLE_PRIO 7
50 #define IXGBE_MAX_FLX_SOURCE_OFF 62
52 /* ntuple filter list structure */
53 struct ixgbe_ntuple_filter_ele {
54 TAILQ_ENTRY(ixgbe_ntuple_filter_ele) entries;
55 struct rte_eth_ntuple_filter filter_info;
57 /* ethertype filter list structure */
58 struct ixgbe_ethertype_filter_ele {
59 TAILQ_ENTRY(ixgbe_ethertype_filter_ele) entries;
60 struct rte_eth_ethertype_filter filter_info;
62 /* syn filter list structure */
63 struct ixgbe_eth_syn_filter_ele {
64 TAILQ_ENTRY(ixgbe_eth_syn_filter_ele) entries;
65 struct rte_eth_syn_filter filter_info;
67 /* fdir filter list structure */
68 struct ixgbe_fdir_rule_ele {
69 TAILQ_ENTRY(ixgbe_fdir_rule_ele) entries;
70 struct ixgbe_fdir_rule filter_info;
72 /* l2_tunnel filter list structure */
73 struct ixgbe_eth_l2_tunnel_conf_ele {
74 TAILQ_ENTRY(ixgbe_eth_l2_tunnel_conf_ele) entries;
75 struct rte_eth_l2_tunnel_conf filter_info;
77 /* rss filter list structure */
78 struct ixgbe_rss_conf_ele {
79 TAILQ_ENTRY(ixgbe_rss_conf_ele) entries;
80 struct ixgbe_rte_flow_rss_conf filter_info;
82 /* ixgbe_flow memory list structure */
83 struct ixgbe_flow_mem {
84 TAILQ_ENTRY(ixgbe_flow_mem) entries;
85 struct rte_flow *flow;
88 TAILQ_HEAD(ixgbe_ntuple_filter_list, ixgbe_ntuple_filter_ele);
89 TAILQ_HEAD(ixgbe_ethertype_filter_list, ixgbe_ethertype_filter_ele);
90 TAILQ_HEAD(ixgbe_syn_filter_list, ixgbe_eth_syn_filter_ele);
91 TAILQ_HEAD(ixgbe_fdir_rule_filter_list, ixgbe_fdir_rule_ele);
92 TAILQ_HEAD(ixgbe_l2_tunnel_filter_list, ixgbe_eth_l2_tunnel_conf_ele);
93 TAILQ_HEAD(ixgbe_rss_filter_list, ixgbe_rss_conf_ele);
94 TAILQ_HEAD(ixgbe_flow_mem_list, ixgbe_flow_mem);
96 static struct ixgbe_ntuple_filter_list filter_ntuple_list;
97 static struct ixgbe_ethertype_filter_list filter_ethertype_list;
98 static struct ixgbe_syn_filter_list filter_syn_list;
99 static struct ixgbe_fdir_rule_filter_list filter_fdir_list;
100 static struct ixgbe_l2_tunnel_filter_list filter_l2_tunnel_list;
101 static struct ixgbe_rss_filter_list filter_rss_list;
102 static struct ixgbe_flow_mem_list ixgbe_flow_list;
105 * Endless loop will never happen with below assumption
106 * 1. there is at least one no-void item(END)
107 * 2. cur is before END.
110 const struct rte_flow_item *next_no_void_pattern(
111 const struct rte_flow_item pattern[],
112 const struct rte_flow_item *cur)
114 const struct rte_flow_item *next =
115 cur ? cur + 1 : &pattern[0];
117 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
124 const struct rte_flow_action *next_no_void_action(
125 const struct rte_flow_action actions[],
126 const struct rte_flow_action *cur)
128 const struct rte_flow_action *next =
129 cur ? cur + 1 : &actions[0];
131 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
138 * Please aware there's an asumption for all the parsers.
139 * rte_flow_item is using big endian, rte_flow_attr and
140 * rte_flow_action are using CPU order.
141 * Because the pattern is used to describe the packets,
142 * normally the packets should use network order.
146 * Parse the rule to see if it is a n-tuple rule.
147 * And get the n-tuple filter info BTW.
149 * The first not void item can be ETH or IPV4.
150 * The second not void item must be IPV4 if the first one is ETH.
151 * The third not void item must be UDP or TCP.
152 * The next not void item must be END.
154 * The first not void action should be QUEUE.
155 * The next not void action should be END.
159 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
160 * dst_addr 192.167.3.50 0xFFFFFFFF
161 * next_proto_id 17 0xFF
162 * UDP/TCP/ src_port 80 0xFFFF
163 * SCTP dst_port 80 0xFFFF
165 * other members in mask and spec should set to 0x00.
166 * item->last should be NULL.
168 * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY.
172 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
173 const struct rte_flow_item pattern[],
174 const struct rte_flow_action actions[],
175 struct rte_eth_ntuple_filter *filter,
176 struct rte_flow_error *error)
178 const struct rte_flow_item *item;
179 const struct rte_flow_action *act;
180 const struct rte_flow_item_ipv4 *ipv4_spec;
181 const struct rte_flow_item_ipv4 *ipv4_mask;
182 const struct rte_flow_item_tcp *tcp_spec;
183 const struct rte_flow_item_tcp *tcp_mask;
184 const struct rte_flow_item_udp *udp_spec;
185 const struct rte_flow_item_udp *udp_mask;
186 const struct rte_flow_item_sctp *sctp_spec;
187 const struct rte_flow_item_sctp *sctp_mask;
188 const struct rte_flow_item_eth *eth_spec;
189 const struct rte_flow_item_eth *eth_mask;
190 const struct rte_flow_item_vlan *vlan_spec;
191 const struct rte_flow_item_vlan *vlan_mask;
192 struct rte_flow_item_eth eth_null;
193 struct rte_flow_item_vlan vlan_null;
196 rte_flow_error_set(error,
197 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
198 NULL, "NULL pattern.");
203 rte_flow_error_set(error, EINVAL,
204 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
205 NULL, "NULL action.");
209 rte_flow_error_set(error, EINVAL,
210 RTE_FLOW_ERROR_TYPE_ATTR,
211 NULL, "NULL attribute.");
215 memset(ð_null, 0, sizeof(struct rte_flow_item_eth));
216 memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan));
218 #ifdef RTE_LIBRTE_SECURITY
220 * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY
222 act = next_no_void_action(actions, NULL);
223 if (act->type == RTE_FLOW_ACTION_TYPE_SECURITY) {
224 const void *conf = act->conf;
225 /* check if the next not void item is END */
226 act = next_no_void_action(actions, act);
227 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
228 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
229 rte_flow_error_set(error, EINVAL,
230 RTE_FLOW_ERROR_TYPE_ACTION,
231 act, "Not supported action.");
235 /* get the IP pattern*/
236 item = next_no_void_pattern(pattern, NULL);
237 while (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
238 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
240 item->type == RTE_FLOW_ITEM_TYPE_END) {
241 rte_flow_error_set(error, EINVAL,
242 RTE_FLOW_ERROR_TYPE_ITEM,
243 item, "IP pattern missing.");
246 item = next_no_void_pattern(pattern, item);
249 filter->proto = IPPROTO_ESP;
250 return ixgbe_crypto_add_ingress_sa_from_flow(conf, item->spec,
251 item->type == RTE_FLOW_ITEM_TYPE_IPV6);
255 /* the first not void item can be MAC or IPv4 */
256 item = next_no_void_pattern(pattern, NULL);
258 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
259 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
260 rte_flow_error_set(error, EINVAL,
261 RTE_FLOW_ERROR_TYPE_ITEM,
262 item, "Not supported by ntuple filter");
266 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
267 eth_spec = (const struct rte_flow_item_eth *)item->spec;
268 eth_mask = (const struct rte_flow_item_eth *)item->mask;
269 /*Not supported last point for range*/
271 rte_flow_error_set(error,
273 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
274 item, "Not supported last point for range");
278 /* if the first item is MAC, the content should be NULL */
279 if ((item->spec || item->mask) &&
280 (memcmp(eth_spec, ð_null,
281 sizeof(struct rte_flow_item_eth)) ||
282 memcmp(eth_mask, ð_null,
283 sizeof(struct rte_flow_item_eth)))) {
284 rte_flow_error_set(error, EINVAL,
285 RTE_FLOW_ERROR_TYPE_ITEM,
286 item, "Not supported by ntuple filter");
289 /* check if the next not void item is IPv4 or Vlan */
290 item = next_no_void_pattern(pattern, item);
291 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
292 item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
293 rte_flow_error_set(error,
294 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
295 item, "Not supported by ntuple filter");
300 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
301 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
302 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
303 /*Not supported last point for range*/
305 rte_flow_error_set(error,
307 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
308 item, "Not supported last point for range");
311 /* the content should be NULL */
312 if ((item->spec || item->mask) &&
313 (memcmp(vlan_spec, &vlan_null,
314 sizeof(struct rte_flow_item_vlan)) ||
315 memcmp(vlan_mask, &vlan_null,
316 sizeof(struct rte_flow_item_vlan)))) {
318 rte_flow_error_set(error, EINVAL,
319 RTE_FLOW_ERROR_TYPE_ITEM,
320 item, "Not supported by ntuple filter");
323 /* check if the next not void item is IPv4 */
324 item = next_no_void_pattern(pattern, item);
325 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
326 rte_flow_error_set(error,
327 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
328 item, "Not supported by ntuple filter");
334 /* get the IPv4 info */
335 if (!item->spec || !item->mask) {
336 rte_flow_error_set(error, EINVAL,
337 RTE_FLOW_ERROR_TYPE_ITEM,
338 item, "Invalid ntuple mask");
341 /*Not supported last point for range*/
343 rte_flow_error_set(error, EINVAL,
344 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
345 item, "Not supported last point for range");
349 ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
351 * Only support src & dst addresses, protocol,
352 * others should be masked.
354 if (ipv4_mask->hdr.version_ihl ||
355 ipv4_mask->hdr.type_of_service ||
356 ipv4_mask->hdr.total_length ||
357 ipv4_mask->hdr.packet_id ||
358 ipv4_mask->hdr.fragment_offset ||
359 ipv4_mask->hdr.time_to_live ||
360 ipv4_mask->hdr.hdr_checksum) {
361 rte_flow_error_set(error,
362 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
363 item, "Not supported by ntuple filter");
367 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
368 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
369 filter->proto_mask = ipv4_mask->hdr.next_proto_id;
371 ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
372 filter->dst_ip = ipv4_spec->hdr.dst_addr;
373 filter->src_ip = ipv4_spec->hdr.src_addr;
374 filter->proto = ipv4_spec->hdr.next_proto_id;
377 /* check if the next not void item is TCP or UDP */
378 item = next_no_void_pattern(pattern, item);
379 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
380 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
381 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
382 item->type != RTE_FLOW_ITEM_TYPE_END) {
383 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
384 rte_flow_error_set(error, EINVAL,
385 RTE_FLOW_ERROR_TYPE_ITEM,
386 item, "Not supported by ntuple filter");
390 if ((item->type != RTE_FLOW_ITEM_TYPE_END) &&
391 (!item->spec && !item->mask)) {
395 /* get the TCP/UDP/SCTP info */
396 if (item->type != RTE_FLOW_ITEM_TYPE_END &&
397 (!item->spec || !item->mask)) {
398 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
399 rte_flow_error_set(error, EINVAL,
400 RTE_FLOW_ERROR_TYPE_ITEM,
401 item, "Invalid ntuple mask");
405 /*Not supported last point for range*/
407 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
408 rte_flow_error_set(error, EINVAL,
409 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
410 item, "Not supported last point for range");
415 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
416 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
419 * Only support src & dst ports, tcp flags,
420 * others should be masked.
422 if (tcp_mask->hdr.sent_seq ||
423 tcp_mask->hdr.recv_ack ||
424 tcp_mask->hdr.data_off ||
425 tcp_mask->hdr.rx_win ||
426 tcp_mask->hdr.cksum ||
427 tcp_mask->hdr.tcp_urp) {
429 sizeof(struct rte_eth_ntuple_filter));
430 rte_flow_error_set(error, EINVAL,
431 RTE_FLOW_ERROR_TYPE_ITEM,
432 item, "Not supported by ntuple filter");
436 filter->dst_port_mask = tcp_mask->hdr.dst_port;
437 filter->src_port_mask = tcp_mask->hdr.src_port;
438 if (tcp_mask->hdr.tcp_flags == 0xFF) {
439 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
440 } else if (!tcp_mask->hdr.tcp_flags) {
441 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
443 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
444 rte_flow_error_set(error, EINVAL,
445 RTE_FLOW_ERROR_TYPE_ITEM,
446 item, "Not supported by ntuple filter");
450 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
451 filter->dst_port = tcp_spec->hdr.dst_port;
452 filter->src_port = tcp_spec->hdr.src_port;
453 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
454 } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
455 udp_mask = (const struct rte_flow_item_udp *)item->mask;
458 * Only support src & dst ports,
459 * others should be masked.
461 if (udp_mask->hdr.dgram_len ||
462 udp_mask->hdr.dgram_cksum) {
464 sizeof(struct rte_eth_ntuple_filter));
465 rte_flow_error_set(error, EINVAL,
466 RTE_FLOW_ERROR_TYPE_ITEM,
467 item, "Not supported by ntuple filter");
471 filter->dst_port_mask = udp_mask->hdr.dst_port;
472 filter->src_port_mask = udp_mask->hdr.src_port;
474 udp_spec = (const struct rte_flow_item_udp *)item->spec;
475 filter->dst_port = udp_spec->hdr.dst_port;
476 filter->src_port = udp_spec->hdr.src_port;
477 } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
478 sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
481 * Only support src & dst ports,
482 * others should be masked.
484 if (sctp_mask->hdr.tag ||
485 sctp_mask->hdr.cksum) {
487 sizeof(struct rte_eth_ntuple_filter));
488 rte_flow_error_set(error, EINVAL,
489 RTE_FLOW_ERROR_TYPE_ITEM,
490 item, "Not supported by ntuple filter");
494 filter->dst_port_mask = sctp_mask->hdr.dst_port;
495 filter->src_port_mask = sctp_mask->hdr.src_port;
497 sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
498 filter->dst_port = sctp_spec->hdr.dst_port;
499 filter->src_port = sctp_spec->hdr.src_port;
504 /* check if the next not void item is END */
505 item = next_no_void_pattern(pattern, item);
506 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
507 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
508 rte_flow_error_set(error, EINVAL,
509 RTE_FLOW_ERROR_TYPE_ITEM,
510 item, "Not supported by ntuple filter");
517 * n-tuple only supports forwarding,
518 * check if the first not void action is QUEUE.
520 act = next_no_void_action(actions, NULL);
521 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
522 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
523 rte_flow_error_set(error, EINVAL,
524 RTE_FLOW_ERROR_TYPE_ACTION,
525 item, "Not supported action.");
529 ((const struct rte_flow_action_queue *)act->conf)->index;
531 /* check if the next not void item is END */
532 act = next_no_void_action(actions, act);
533 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
534 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
535 rte_flow_error_set(error, EINVAL,
536 RTE_FLOW_ERROR_TYPE_ACTION,
537 act, "Not supported action.");
542 /* must be input direction */
543 if (!attr->ingress) {
544 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
545 rte_flow_error_set(error, EINVAL,
546 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
547 attr, "Only support ingress.");
553 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
554 rte_flow_error_set(error, EINVAL,
555 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
556 attr, "Not support egress.");
560 if (attr->priority > 0xFFFF) {
561 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
562 rte_flow_error_set(error, EINVAL,
563 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
564 attr, "Error priority.");
567 filter->priority = (uint16_t)attr->priority;
568 if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
569 attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
570 filter->priority = 1;
575 /* a specific function for ixgbe because the flags is specific */
577 ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
578 const struct rte_flow_attr *attr,
579 const struct rte_flow_item pattern[],
580 const struct rte_flow_action actions[],
581 struct rte_eth_ntuple_filter *filter,
582 struct rte_flow_error *error)
585 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
587 MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
589 ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
594 #ifdef RTE_LIBRTE_SECURITY
595 /* ESP flow not really a flow*/
596 if (filter->proto == IPPROTO_ESP)
600 /* Ixgbe doesn't support tcp flags. */
601 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
602 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
603 rte_flow_error_set(error, EINVAL,
604 RTE_FLOW_ERROR_TYPE_ITEM,
605 NULL, "Not supported by ntuple filter");
609 /* Ixgbe doesn't support many priorities. */
610 if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
611 filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
612 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
613 rte_flow_error_set(error, EINVAL,
614 RTE_FLOW_ERROR_TYPE_ITEM,
615 NULL, "Priority not supported by ntuple filter");
619 if (filter->queue >= dev->data->nb_rx_queues)
622 /* fixed value for ixgbe */
623 filter->flags = RTE_5TUPLE_FLAGS;
628 * Parse the rule to see if it is a ethertype rule.
629 * And get the ethertype filter info BTW.
631 * The first not void item can be ETH.
632 * The next not void item must be END.
634 * The first not void action should be QUEUE.
635 * The next not void action should be END.
638 * ETH type 0x0807 0xFFFF
640 * other members in mask and spec should set to 0x00.
641 * item->last should be NULL.
644 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
645 const struct rte_flow_item *pattern,
646 const struct rte_flow_action *actions,
647 struct rte_eth_ethertype_filter *filter,
648 struct rte_flow_error *error)
650 const struct rte_flow_item *item;
651 const struct rte_flow_action *act;
652 const struct rte_flow_item_eth *eth_spec;
653 const struct rte_flow_item_eth *eth_mask;
654 const struct rte_flow_action_queue *act_q;
657 rte_flow_error_set(error, EINVAL,
658 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
659 NULL, "NULL pattern.");
664 rte_flow_error_set(error, EINVAL,
665 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
666 NULL, "NULL action.");
671 rte_flow_error_set(error, EINVAL,
672 RTE_FLOW_ERROR_TYPE_ATTR,
673 NULL, "NULL attribute.");
677 item = next_no_void_pattern(pattern, NULL);
678 /* The first non-void item should be MAC. */
679 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
680 rte_flow_error_set(error, EINVAL,
681 RTE_FLOW_ERROR_TYPE_ITEM,
682 item, "Not supported by ethertype filter");
686 /*Not supported last point for range*/
688 rte_flow_error_set(error, EINVAL,
689 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
690 item, "Not supported last point for range");
694 /* Get the MAC info. */
695 if (!item->spec || !item->mask) {
696 rte_flow_error_set(error, EINVAL,
697 RTE_FLOW_ERROR_TYPE_ITEM,
698 item, "Not supported by ethertype filter");
702 eth_spec = (const struct rte_flow_item_eth *)item->spec;
703 eth_mask = (const struct rte_flow_item_eth *)item->mask;
705 /* Mask bits of source MAC address must be full of 0.
706 * Mask bits of destination MAC address must be full
709 if (!is_zero_ether_addr(ð_mask->src) ||
710 (!is_zero_ether_addr(ð_mask->dst) &&
711 !is_broadcast_ether_addr(ð_mask->dst))) {
712 rte_flow_error_set(error, EINVAL,
713 RTE_FLOW_ERROR_TYPE_ITEM,
714 item, "Invalid ether address mask");
718 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
719 rte_flow_error_set(error, EINVAL,
720 RTE_FLOW_ERROR_TYPE_ITEM,
721 item, "Invalid ethertype mask");
725 /* If mask bits of destination MAC address
726 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
728 if (is_broadcast_ether_addr(ð_mask->dst)) {
729 filter->mac_addr = eth_spec->dst;
730 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
732 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
734 filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
736 /* Check if the next non-void item is END. */
737 item = next_no_void_pattern(pattern, item);
738 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
739 rte_flow_error_set(error, EINVAL,
740 RTE_FLOW_ERROR_TYPE_ITEM,
741 item, "Not supported by ethertype filter.");
747 act = next_no_void_action(actions, NULL);
748 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
749 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
750 rte_flow_error_set(error, EINVAL,
751 RTE_FLOW_ERROR_TYPE_ACTION,
752 act, "Not supported action.");
756 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
757 act_q = (const struct rte_flow_action_queue *)act->conf;
758 filter->queue = act_q->index;
760 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
763 /* Check if the next non-void item is END */
764 act = next_no_void_action(actions, act);
765 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
766 rte_flow_error_set(error, EINVAL,
767 RTE_FLOW_ERROR_TYPE_ACTION,
768 act, "Not supported action.");
773 /* Must be input direction */
774 if (!attr->ingress) {
775 rte_flow_error_set(error, EINVAL,
776 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
777 attr, "Only support ingress.");
783 rte_flow_error_set(error, EINVAL,
784 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
785 attr, "Not support egress.");
790 if (attr->priority) {
791 rte_flow_error_set(error, EINVAL,
792 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
793 attr, "Not support priority.");
799 rte_flow_error_set(error, EINVAL,
800 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
801 attr, "Not support group.");
809 ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
810 const struct rte_flow_attr *attr,
811 const struct rte_flow_item pattern[],
812 const struct rte_flow_action actions[],
813 struct rte_eth_ethertype_filter *filter,
814 struct rte_flow_error *error)
817 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
819 MAC_TYPE_FILTER_SUP(hw->mac.type);
821 ret = cons_parse_ethertype_filter(attr, pattern,
822 actions, filter, error);
827 /* Ixgbe doesn't support MAC address. */
828 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
829 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
830 rte_flow_error_set(error, EINVAL,
831 RTE_FLOW_ERROR_TYPE_ITEM,
832 NULL, "Not supported by ethertype filter");
836 if (filter->queue >= dev->data->nb_rx_queues) {
837 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
838 rte_flow_error_set(error, EINVAL,
839 RTE_FLOW_ERROR_TYPE_ITEM,
840 NULL, "queue index much too big");
844 if (filter->ether_type == ETHER_TYPE_IPv4 ||
845 filter->ether_type == ETHER_TYPE_IPv6) {
846 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
847 rte_flow_error_set(error, EINVAL,
848 RTE_FLOW_ERROR_TYPE_ITEM,
849 NULL, "IPv4/IPv6 not supported by ethertype filter");
853 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
854 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
855 rte_flow_error_set(error, EINVAL,
856 RTE_FLOW_ERROR_TYPE_ITEM,
857 NULL, "mac compare is unsupported");
861 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
862 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
863 rte_flow_error_set(error, EINVAL,
864 RTE_FLOW_ERROR_TYPE_ITEM,
865 NULL, "drop option is unsupported");
873 * Parse the rule to see if it is a TCP SYN rule.
874 * And get the TCP SYN filter info BTW.
876 * The first not void item must be ETH.
877 * The second not void item must be IPV4 or IPV6.
878 * The third not void item must be TCP.
879 * The next not void item must be END.
881 * The first not void action should be QUEUE.
882 * The next not void action should be END.
886 * IPV4/IPV6 NULL NULL
887 * TCP tcp_flags 0x02 0xFF
889 * other members in mask and spec should set to 0x00.
890 * item->last should be NULL.
893 cons_parse_syn_filter(const struct rte_flow_attr *attr,
894 const struct rte_flow_item pattern[],
895 const struct rte_flow_action actions[],
896 struct rte_eth_syn_filter *filter,
897 struct rte_flow_error *error)
899 const struct rte_flow_item *item;
900 const struct rte_flow_action *act;
901 const struct rte_flow_item_tcp *tcp_spec;
902 const struct rte_flow_item_tcp *tcp_mask;
903 const struct rte_flow_action_queue *act_q;
906 rte_flow_error_set(error, EINVAL,
907 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
908 NULL, "NULL pattern.");
913 rte_flow_error_set(error, EINVAL,
914 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
915 NULL, "NULL action.");
920 rte_flow_error_set(error, EINVAL,
921 RTE_FLOW_ERROR_TYPE_ATTR,
922 NULL, "NULL attribute.");
927 /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
928 item = next_no_void_pattern(pattern, NULL);
929 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
930 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
931 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
932 item->type != RTE_FLOW_ITEM_TYPE_TCP) {
933 rte_flow_error_set(error, EINVAL,
934 RTE_FLOW_ERROR_TYPE_ITEM,
935 item, "Not supported by syn filter");
938 /*Not supported last point for range*/
940 rte_flow_error_set(error, EINVAL,
941 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
942 item, "Not supported last point for range");
947 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
948 /* if the item is MAC, the content should be NULL */
949 if (item->spec || item->mask) {
950 rte_flow_error_set(error, EINVAL,
951 RTE_FLOW_ERROR_TYPE_ITEM,
952 item, "Invalid SYN address mask");
956 /* check if the next not void item is IPv4 or IPv6 */
957 item = next_no_void_pattern(pattern, item);
958 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
959 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
960 rte_flow_error_set(error, EINVAL,
961 RTE_FLOW_ERROR_TYPE_ITEM,
962 item, "Not supported by syn filter");
968 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
969 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
970 /* if the item is IP, the content should be NULL */
971 if (item->spec || item->mask) {
972 rte_flow_error_set(error, EINVAL,
973 RTE_FLOW_ERROR_TYPE_ITEM,
974 item, "Invalid SYN mask");
978 /* check if the next not void item is TCP */
979 item = next_no_void_pattern(pattern, item);
980 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
981 rte_flow_error_set(error, EINVAL,
982 RTE_FLOW_ERROR_TYPE_ITEM,
983 item, "Not supported by syn filter");
988 /* Get the TCP info. Only support SYN. */
989 if (!item->spec || !item->mask) {
990 rte_flow_error_set(error, EINVAL,
991 RTE_FLOW_ERROR_TYPE_ITEM,
992 item, "Invalid SYN mask");
995 /*Not supported last point for range*/
997 rte_flow_error_set(error, EINVAL,
998 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
999 item, "Not supported last point for range");
1003 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1004 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1005 if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
1006 tcp_mask->hdr.src_port ||
1007 tcp_mask->hdr.dst_port ||
1008 tcp_mask->hdr.sent_seq ||
1009 tcp_mask->hdr.recv_ack ||
1010 tcp_mask->hdr.data_off ||
1011 tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
1012 tcp_mask->hdr.rx_win ||
1013 tcp_mask->hdr.cksum ||
1014 tcp_mask->hdr.tcp_urp) {
1015 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1016 rte_flow_error_set(error, EINVAL,
1017 RTE_FLOW_ERROR_TYPE_ITEM,
1018 item, "Not supported by syn filter");
1022 /* check if the next not void item is END */
1023 item = next_no_void_pattern(pattern, item);
1024 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1025 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1026 rte_flow_error_set(error, EINVAL,
1027 RTE_FLOW_ERROR_TYPE_ITEM,
1028 item, "Not supported by syn filter");
1032 /* check if the first not void action is QUEUE. */
1033 act = next_no_void_action(actions, NULL);
1034 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1035 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1036 rte_flow_error_set(error, EINVAL,
1037 RTE_FLOW_ERROR_TYPE_ACTION,
1038 act, "Not supported action.");
1042 act_q = (const struct rte_flow_action_queue *)act->conf;
1043 filter->queue = act_q->index;
1044 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
1045 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1046 rte_flow_error_set(error, EINVAL,
1047 RTE_FLOW_ERROR_TYPE_ACTION,
1048 act, "Not supported action.");
1052 /* check if the next not void item is END */
1053 act = next_no_void_action(actions, act);
1054 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1055 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1056 rte_flow_error_set(error, EINVAL,
1057 RTE_FLOW_ERROR_TYPE_ACTION,
1058 act, "Not supported action.");
1063 /* must be input direction */
1064 if (!attr->ingress) {
1065 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1066 rte_flow_error_set(error, EINVAL,
1067 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1068 attr, "Only support ingress.");
1074 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1075 rte_flow_error_set(error, EINVAL,
1076 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1077 attr, "Not support egress.");
1081 /* Support 2 priorities, the lowest or highest. */
1082 if (!attr->priority) {
1083 filter->hig_pri = 0;
1084 } else if (attr->priority == (uint32_t)~0U) {
1085 filter->hig_pri = 1;
1087 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1088 rte_flow_error_set(error, EINVAL,
1089 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1090 attr, "Not support priority.");
1098 ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
1099 const struct rte_flow_attr *attr,
1100 const struct rte_flow_item pattern[],
1101 const struct rte_flow_action actions[],
1102 struct rte_eth_syn_filter *filter,
1103 struct rte_flow_error *error)
1106 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1108 MAC_TYPE_FILTER_SUP(hw->mac.type);
1110 ret = cons_parse_syn_filter(attr, pattern,
1111 actions, filter, error);
1113 if (filter->queue >= dev->data->nb_rx_queues)
1123 * Parse the rule to see if it is a L2 tunnel rule.
1124 * And get the L2 tunnel filter info BTW.
1125 * Only support E-tag now.
1127 * The first not void item can be E_TAG.
1128 * The next not void item must be END.
1130 * The first not void action should be VF or PF.
1131 * The next not void action should be END.
1135 e_cid_base 0x309 0xFFF
1137 * other members in mask and spec should set to 0x00.
1138 * item->last should be NULL.
1141 cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
1142 const struct rte_flow_attr *attr,
1143 const struct rte_flow_item pattern[],
1144 const struct rte_flow_action actions[],
1145 struct rte_eth_l2_tunnel_conf *filter,
1146 struct rte_flow_error *error)
1148 const struct rte_flow_item *item;
1149 const struct rte_flow_item_e_tag *e_tag_spec;
1150 const struct rte_flow_item_e_tag *e_tag_mask;
1151 const struct rte_flow_action *act;
1152 const struct rte_flow_action_vf *act_vf;
1153 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1156 rte_flow_error_set(error, EINVAL,
1157 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1158 NULL, "NULL pattern.");
1163 rte_flow_error_set(error, EINVAL,
1164 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1165 NULL, "NULL action.");
1170 rte_flow_error_set(error, EINVAL,
1171 RTE_FLOW_ERROR_TYPE_ATTR,
1172 NULL, "NULL attribute.");
1176 /* The first not void item should be e-tag. */
1177 item = next_no_void_pattern(pattern, NULL);
1178 if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1179 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1180 rte_flow_error_set(error, EINVAL,
1181 RTE_FLOW_ERROR_TYPE_ITEM,
1182 item, "Not supported by L2 tunnel filter");
1186 if (!item->spec || !item->mask) {
1187 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1188 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1189 item, "Not supported by L2 tunnel filter");
1193 /*Not supported last point for range*/
1195 rte_flow_error_set(error, EINVAL,
1196 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1197 item, "Not supported last point for range");
1201 e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
1202 e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
1204 /* Only care about GRP and E cid base. */
1205 if (e_tag_mask->epcp_edei_in_ecid_b ||
1206 e_tag_mask->in_ecid_e ||
1207 e_tag_mask->ecid_e ||
1208 e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1209 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1210 rte_flow_error_set(error, EINVAL,
1211 RTE_FLOW_ERROR_TYPE_ITEM,
1212 item, "Not supported by L2 tunnel filter");
1216 filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1218 * grp and e_cid_base are bit fields and only use 14 bits.
1219 * e-tag id is taken as little endian by HW.
1221 filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1223 /* check if the next not void item is END */
1224 item = next_no_void_pattern(pattern, item);
1225 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1226 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1227 rte_flow_error_set(error, EINVAL,
1228 RTE_FLOW_ERROR_TYPE_ITEM,
1229 item, "Not supported by L2 tunnel filter");
1234 /* must be input direction */
1235 if (!attr->ingress) {
1236 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1237 rte_flow_error_set(error, EINVAL,
1238 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1239 attr, "Only support ingress.");
1245 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1246 rte_flow_error_set(error, EINVAL,
1247 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1248 attr, "Not support egress.");
1253 if (attr->priority) {
1254 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1255 rte_flow_error_set(error, EINVAL,
1256 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1257 attr, "Not support priority.");
1261 /* check if the first not void action is VF or PF. */
1262 act = next_no_void_action(actions, NULL);
1263 if (act->type != RTE_FLOW_ACTION_TYPE_VF &&
1264 act->type != RTE_FLOW_ACTION_TYPE_PF) {
1265 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1266 rte_flow_error_set(error, EINVAL,
1267 RTE_FLOW_ERROR_TYPE_ACTION,
1268 act, "Not supported action.");
1272 if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
1273 act_vf = (const struct rte_flow_action_vf *)act->conf;
1274 filter->pool = act_vf->id;
1276 filter->pool = pci_dev->max_vfs;
1279 /* check if the next not void item is END */
1280 act = next_no_void_action(actions, act);
1281 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1282 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1283 rte_flow_error_set(error, EINVAL,
1284 RTE_FLOW_ERROR_TYPE_ACTION,
1285 act, "Not supported action.");
1293 ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1294 const struct rte_flow_attr *attr,
1295 const struct rte_flow_item pattern[],
1296 const struct rte_flow_action actions[],
1297 struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1298 struct rte_flow_error *error)
1301 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1302 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1305 ret = cons_parse_l2_tn_filter(dev, attr, pattern,
1306 actions, l2_tn_filter, error);
1308 if (hw->mac.type != ixgbe_mac_X550 &&
1309 hw->mac.type != ixgbe_mac_X550EM_x &&
1310 hw->mac.type != ixgbe_mac_X550EM_a) {
1311 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1312 rte_flow_error_set(error, EINVAL,
1313 RTE_FLOW_ERROR_TYPE_ITEM,
1314 NULL, "Not supported by L2 tunnel filter");
1318 vf_num = pci_dev->max_vfs;
1320 if (l2_tn_filter->pool > vf_num)
1326 /* Parse to get the attr and action info of flow director rule. */
1328 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1329 const struct rte_flow_action actions[],
1330 struct ixgbe_fdir_rule *rule,
1331 struct rte_flow_error *error)
1333 const struct rte_flow_action *act;
1334 const struct rte_flow_action_queue *act_q;
1335 const struct rte_flow_action_mark *mark;
1338 /* must be input direction */
1339 if (!attr->ingress) {
1340 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1341 rte_flow_error_set(error, EINVAL,
1342 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1343 attr, "Only support ingress.");
1349 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1350 rte_flow_error_set(error, EINVAL,
1351 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1352 attr, "Not support egress.");
1357 if (attr->priority) {
1358 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1359 rte_flow_error_set(error, EINVAL,
1360 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1361 attr, "Not support priority.");
1365 /* check if the first not void action is QUEUE or DROP. */
1366 act = next_no_void_action(actions, NULL);
1367 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1368 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1369 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1370 rte_flow_error_set(error, EINVAL,
1371 RTE_FLOW_ERROR_TYPE_ACTION,
1372 act, "Not supported action.");
1376 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1377 act_q = (const struct rte_flow_action_queue *)act->conf;
1378 rule->queue = act_q->index;
1380 /* signature mode does not support drop action. */
1381 if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1382 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1383 rte_flow_error_set(error, EINVAL,
1384 RTE_FLOW_ERROR_TYPE_ACTION,
1385 act, "Not supported action.");
1388 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1391 /* check if the next not void item is MARK */
1392 act = next_no_void_action(actions, act);
1393 if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1394 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1395 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1396 rte_flow_error_set(error, EINVAL,
1397 RTE_FLOW_ERROR_TYPE_ACTION,
1398 act, "Not supported action.");
1404 if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1405 mark = (const struct rte_flow_action_mark *)act->conf;
1406 rule->soft_id = mark->id;
1407 act = next_no_void_action(actions, act);
1410 /* check if the next not void item is END */
1411 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1412 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1413 rte_flow_error_set(error, EINVAL,
1414 RTE_FLOW_ERROR_TYPE_ACTION,
1415 act, "Not supported action.");
1422 /* search next no void pattern and skip fuzzy */
1424 const struct rte_flow_item *next_no_fuzzy_pattern(
1425 const struct rte_flow_item pattern[],
1426 const struct rte_flow_item *cur)
1428 const struct rte_flow_item *next =
1429 next_no_void_pattern(pattern, cur);
1431 if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
1433 next = next_no_void_pattern(pattern, next);
1437 static inline uint8_t signature_match(const struct rte_flow_item pattern[])
1439 const struct rte_flow_item_fuzzy *spec, *last, *mask;
1440 const struct rte_flow_item *item;
1441 uint32_t sh, lh, mh;
1446 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1449 if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
1451 (const struct rte_flow_item_fuzzy *)item->spec;
1453 (const struct rte_flow_item_fuzzy *)item->last;
1455 (const struct rte_flow_item_fuzzy *)item->mask;
1484 * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1485 * And get the flow director filter info BTW.
1486 * UDP/TCP/SCTP PATTERN:
1487 * The first not void item can be ETH or IPV4 or IPV6
1488 * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1489 * The next not void item could be UDP or TCP or SCTP (optional)
1490 * The next not void item could be RAW (for flexbyte, optional)
1491 * The next not void item must be END.
1492 * A Fuzzy Match pattern can appear at any place before END.
1493 * Fuzzy Match is optional for IPV4 but is required for IPV6
1495 * The first not void item must be ETH.
1496 * The second not void item must be MAC VLAN.
1497 * The next not void item must be END.
1499 * The first not void action should be QUEUE or DROP.
1500 * The second not void optional action should be MARK,
1501 * mark_id is a uint32_t number.
1502 * The next not void action should be END.
1503 * UDP/TCP/SCTP pattern example:
1506 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
1507 * dst_addr 192.167.3.50 0xFFFFFFFF
1508 * UDP/TCP/SCTP src_port 80 0xFFFF
1509 * dst_port 80 0xFFFF
1510 * FLEX relative 0 0x1
1513 * offset 12 0xFFFFFFFF
1516 * pattern[0] 0x86 0xFF
1517 * pattern[1] 0xDD 0xFF
1519 * MAC VLAN pattern example:
1522 {0xAC, 0x7B, 0xA1, {0xFF, 0xFF, 0xFF,
1523 0x2C, 0x6D, 0x36} 0xFF, 0xFF, 0xFF}
1524 * MAC VLAN tci 0x2016 0xEFFF
1526 * Other members in mask and spec should set to 0x00.
1527 * Item->last should be NULL.
1530 ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
1531 const struct rte_flow_attr *attr,
1532 const struct rte_flow_item pattern[],
1533 const struct rte_flow_action actions[],
1534 struct ixgbe_fdir_rule *rule,
1535 struct rte_flow_error *error)
1537 const struct rte_flow_item *item;
1538 const struct rte_flow_item_eth *eth_spec;
1539 const struct rte_flow_item_eth *eth_mask;
1540 const struct rte_flow_item_ipv4 *ipv4_spec;
1541 const struct rte_flow_item_ipv4 *ipv4_mask;
1542 const struct rte_flow_item_ipv6 *ipv6_spec;
1543 const struct rte_flow_item_ipv6 *ipv6_mask;
1544 const struct rte_flow_item_tcp *tcp_spec;
1545 const struct rte_flow_item_tcp *tcp_mask;
1546 const struct rte_flow_item_udp *udp_spec;
1547 const struct rte_flow_item_udp *udp_mask;
1548 const struct rte_flow_item_sctp *sctp_spec;
1549 const struct rte_flow_item_sctp *sctp_mask;
1550 const struct rte_flow_item_vlan *vlan_spec;
1551 const struct rte_flow_item_vlan *vlan_mask;
1552 const struct rte_flow_item_raw *raw_mask;
1553 const struct rte_flow_item_raw *raw_spec;
1556 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1559 rte_flow_error_set(error, EINVAL,
1560 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1561 NULL, "NULL pattern.");
1566 rte_flow_error_set(error, EINVAL,
1567 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1568 NULL, "NULL action.");
1573 rte_flow_error_set(error, EINVAL,
1574 RTE_FLOW_ERROR_TYPE_ATTR,
1575 NULL, "NULL attribute.");
1580 * Some fields may not be provided. Set spec to 0 and mask to default
1581 * value. So, we need not do anything for the not provided fields later.
1583 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1584 memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1585 rule->mask.vlan_tci_mask = 0;
1586 rule->mask.flex_bytes_mask = 0;
1589 * The first not void item should be
1590 * MAC or IPv4 or TCP or UDP or SCTP.
1592 item = next_no_fuzzy_pattern(pattern, NULL);
1593 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1594 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1595 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1596 item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1597 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1598 item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1599 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1600 rte_flow_error_set(error, EINVAL,
1601 RTE_FLOW_ERROR_TYPE_ITEM,
1602 item, "Not supported by fdir filter");
1606 if (signature_match(pattern))
1607 rule->mode = RTE_FDIR_MODE_SIGNATURE;
1609 rule->mode = RTE_FDIR_MODE_PERFECT;
1611 /*Not supported last point for range*/
1613 rte_flow_error_set(error, EINVAL,
1614 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1615 item, "Not supported last point for range");
1619 /* Get the MAC info. */
1620 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1622 * Only support vlan and dst MAC address,
1623 * others should be masked.
1625 if (item->spec && !item->mask) {
1626 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1627 rte_flow_error_set(error, EINVAL,
1628 RTE_FLOW_ERROR_TYPE_ITEM,
1629 item, "Not supported by fdir filter");
1634 rule->b_spec = TRUE;
1635 eth_spec = (const struct rte_flow_item_eth *)item->spec;
1637 /* Get the dst MAC. */
1638 for (j = 0; j < ETHER_ADDR_LEN; j++) {
1639 rule->ixgbe_fdir.formatted.inner_mac[j] =
1640 eth_spec->dst.addr_bytes[j];
1647 rule->b_mask = TRUE;
1648 eth_mask = (const struct rte_flow_item_eth *)item->mask;
1650 /* Ether type should be masked. */
1651 if (eth_mask->type ||
1652 rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1653 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1654 rte_flow_error_set(error, EINVAL,
1655 RTE_FLOW_ERROR_TYPE_ITEM,
1656 item, "Not supported by fdir filter");
1660 /* If ethernet has meaning, it means MAC VLAN mode. */
1661 rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1664 * src MAC address must be masked,
1665 * and don't support dst MAC address mask.
1667 for (j = 0; j < ETHER_ADDR_LEN; j++) {
1668 if (eth_mask->src.addr_bytes[j] ||
1669 eth_mask->dst.addr_bytes[j] != 0xFF) {
1671 sizeof(struct ixgbe_fdir_rule));
1672 rte_flow_error_set(error, EINVAL,
1673 RTE_FLOW_ERROR_TYPE_ITEM,
1674 item, "Not supported by fdir filter");
1679 /* When no VLAN, considered as full mask. */
1680 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1682 /*** If both spec and mask are item,
1683 * it means don't care about ETH.
1688 * Check if the next not void item is vlan or ipv4.
1689 * IPv6 is not supported.
1691 item = next_no_fuzzy_pattern(pattern, item);
1692 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1693 if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1694 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1695 rte_flow_error_set(error, EINVAL,
1696 RTE_FLOW_ERROR_TYPE_ITEM,
1697 item, "Not supported by fdir filter");
1701 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
1702 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1703 rte_flow_error_set(error, EINVAL,
1704 RTE_FLOW_ERROR_TYPE_ITEM,
1705 item, "Not supported by fdir filter");
1711 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1712 if (!(item->spec && item->mask)) {
1713 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1714 rte_flow_error_set(error, EINVAL,
1715 RTE_FLOW_ERROR_TYPE_ITEM,
1716 item, "Not supported by fdir filter");
1720 /*Not supported last point for range*/
1722 rte_flow_error_set(error, EINVAL,
1723 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1724 item, "Not supported last point for range");
1728 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
1729 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
1731 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1733 rule->mask.vlan_tci_mask = vlan_mask->tci;
1734 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
1735 /* More than one tags are not supported. */
1737 /* Next not void item must be END */
1738 item = next_no_fuzzy_pattern(pattern, item);
1739 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1740 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1741 rte_flow_error_set(error, EINVAL,
1742 RTE_FLOW_ERROR_TYPE_ITEM,
1743 item, "Not supported by fdir filter");
1748 /* Get the IPV4 info. */
1749 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1751 * Set the flow type even if there's no content
1752 * as we must have a flow type.
1754 rule->ixgbe_fdir.formatted.flow_type =
1755 IXGBE_ATR_FLOW_TYPE_IPV4;
1756 /*Not supported last point for range*/
1758 rte_flow_error_set(error, EINVAL,
1759 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1760 item, "Not supported last point for range");
1764 * Only care about src & dst addresses,
1765 * others should be masked.
1768 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1769 rte_flow_error_set(error, EINVAL,
1770 RTE_FLOW_ERROR_TYPE_ITEM,
1771 item, "Not supported by fdir filter");
1774 rule->b_mask = TRUE;
1776 (const struct rte_flow_item_ipv4 *)item->mask;
1777 if (ipv4_mask->hdr.version_ihl ||
1778 ipv4_mask->hdr.type_of_service ||
1779 ipv4_mask->hdr.total_length ||
1780 ipv4_mask->hdr.packet_id ||
1781 ipv4_mask->hdr.fragment_offset ||
1782 ipv4_mask->hdr.time_to_live ||
1783 ipv4_mask->hdr.next_proto_id ||
1784 ipv4_mask->hdr.hdr_checksum) {
1785 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1786 rte_flow_error_set(error, EINVAL,
1787 RTE_FLOW_ERROR_TYPE_ITEM,
1788 item, "Not supported by fdir filter");
1791 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1792 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1795 rule->b_spec = TRUE;
1797 (const struct rte_flow_item_ipv4 *)item->spec;
1798 rule->ixgbe_fdir.formatted.dst_ip[0] =
1799 ipv4_spec->hdr.dst_addr;
1800 rule->ixgbe_fdir.formatted.src_ip[0] =
1801 ipv4_spec->hdr.src_addr;
1805 * Check if the next not void item is
1806 * TCP or UDP or SCTP or END.
1808 item = next_no_fuzzy_pattern(pattern, item);
1809 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1810 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1811 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1812 item->type != RTE_FLOW_ITEM_TYPE_END &&
1813 item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1814 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1815 rte_flow_error_set(error, EINVAL,
1816 RTE_FLOW_ERROR_TYPE_ITEM,
1817 item, "Not supported by fdir filter");
1822 /* Get the IPV6 info. */
1823 if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1825 * Set the flow type even if there's no content
1826 * as we must have a flow type.
1828 rule->ixgbe_fdir.formatted.flow_type =
1829 IXGBE_ATR_FLOW_TYPE_IPV6;
1832 * 1. must signature match
1833 * 2. not support last
1834 * 3. mask must not null
1836 if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
1839 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1840 rte_flow_error_set(error, EINVAL,
1841 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1842 item, "Not supported last point for range");
1846 rule->b_mask = TRUE;
1848 (const struct rte_flow_item_ipv6 *)item->mask;
1849 if (ipv6_mask->hdr.vtc_flow ||
1850 ipv6_mask->hdr.payload_len ||
1851 ipv6_mask->hdr.proto ||
1852 ipv6_mask->hdr.hop_limits) {
1853 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1854 rte_flow_error_set(error, EINVAL,
1855 RTE_FLOW_ERROR_TYPE_ITEM,
1856 item, "Not supported by fdir filter");
1860 /* check src addr mask */
1861 for (j = 0; j < 16; j++) {
1862 if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
1863 rule->mask.src_ipv6_mask |= 1 << j;
1864 } else if (ipv6_mask->hdr.src_addr[j] != 0) {
1865 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1866 rte_flow_error_set(error, EINVAL,
1867 RTE_FLOW_ERROR_TYPE_ITEM,
1868 item, "Not supported by fdir filter");
1873 /* check dst addr mask */
1874 for (j = 0; j < 16; j++) {
1875 if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
1876 rule->mask.dst_ipv6_mask |= 1 << j;
1877 } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
1878 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1879 rte_flow_error_set(error, EINVAL,
1880 RTE_FLOW_ERROR_TYPE_ITEM,
1881 item, "Not supported by fdir filter");
1887 rule->b_spec = TRUE;
1889 (const struct rte_flow_item_ipv6 *)item->spec;
1890 rte_memcpy(rule->ixgbe_fdir.formatted.src_ip,
1891 ipv6_spec->hdr.src_addr, 16);
1892 rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip,
1893 ipv6_spec->hdr.dst_addr, 16);
1897 * Check if the next not void item is
1898 * TCP or UDP or SCTP or END.
1900 item = next_no_fuzzy_pattern(pattern, item);
1901 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1902 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1903 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1904 item->type != RTE_FLOW_ITEM_TYPE_END &&
1905 item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1906 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1907 rte_flow_error_set(error, EINVAL,
1908 RTE_FLOW_ERROR_TYPE_ITEM,
1909 item, "Not supported by fdir filter");
1914 /* Get the TCP info. */
1915 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1917 * Set the flow type even if there's no content
1918 * as we must have a flow type.
1920 rule->ixgbe_fdir.formatted.flow_type |=
1921 IXGBE_ATR_L4TYPE_TCP;
1922 /*Not supported last point for range*/
1924 rte_flow_error_set(error, EINVAL,
1925 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1926 item, "Not supported last point for range");
1930 * Only care about src & dst ports,
1931 * others should be masked.
1934 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1935 rte_flow_error_set(error, EINVAL,
1936 RTE_FLOW_ERROR_TYPE_ITEM,
1937 item, "Not supported by fdir filter");
1940 rule->b_mask = TRUE;
1941 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1942 if (tcp_mask->hdr.sent_seq ||
1943 tcp_mask->hdr.recv_ack ||
1944 tcp_mask->hdr.data_off ||
1945 tcp_mask->hdr.tcp_flags ||
1946 tcp_mask->hdr.rx_win ||
1947 tcp_mask->hdr.cksum ||
1948 tcp_mask->hdr.tcp_urp) {
1949 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1950 rte_flow_error_set(error, EINVAL,
1951 RTE_FLOW_ERROR_TYPE_ITEM,
1952 item, "Not supported by fdir filter");
1955 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1956 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1959 rule->b_spec = TRUE;
1960 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1961 rule->ixgbe_fdir.formatted.src_port =
1962 tcp_spec->hdr.src_port;
1963 rule->ixgbe_fdir.formatted.dst_port =
1964 tcp_spec->hdr.dst_port;
1967 item = next_no_fuzzy_pattern(pattern, item);
1968 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1969 item->type != RTE_FLOW_ITEM_TYPE_END) {
1970 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1971 rte_flow_error_set(error, EINVAL,
1972 RTE_FLOW_ERROR_TYPE_ITEM,
1973 item, "Not supported by fdir filter");
1979 /* Get the UDP info */
1980 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1982 * Set the flow type even if there's no content
1983 * as we must have a flow type.
1985 rule->ixgbe_fdir.formatted.flow_type |=
1986 IXGBE_ATR_L4TYPE_UDP;
1987 /*Not supported last point for range*/
1989 rte_flow_error_set(error, EINVAL,
1990 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1991 item, "Not supported last point for range");
1995 * Only care about src & dst ports,
1996 * others should be masked.
1999 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2000 rte_flow_error_set(error, EINVAL,
2001 RTE_FLOW_ERROR_TYPE_ITEM,
2002 item, "Not supported by fdir filter");
2005 rule->b_mask = TRUE;
2006 udp_mask = (const struct rte_flow_item_udp *)item->mask;
2007 if (udp_mask->hdr.dgram_len ||
2008 udp_mask->hdr.dgram_cksum) {
2009 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2010 rte_flow_error_set(error, EINVAL,
2011 RTE_FLOW_ERROR_TYPE_ITEM,
2012 item, "Not supported by fdir filter");
2015 rule->mask.src_port_mask = udp_mask->hdr.src_port;
2016 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
2019 rule->b_spec = TRUE;
2020 udp_spec = (const struct rte_flow_item_udp *)item->spec;
2021 rule->ixgbe_fdir.formatted.src_port =
2022 udp_spec->hdr.src_port;
2023 rule->ixgbe_fdir.formatted.dst_port =
2024 udp_spec->hdr.dst_port;
2027 item = next_no_fuzzy_pattern(pattern, item);
2028 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
2029 item->type != RTE_FLOW_ITEM_TYPE_END) {
2030 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2031 rte_flow_error_set(error, EINVAL,
2032 RTE_FLOW_ERROR_TYPE_ITEM,
2033 item, "Not supported by fdir filter");
2039 /* Get the SCTP info */
2040 if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
2042 * Set the flow type even if there's no content
2043 * as we must have a flow type.
2045 rule->ixgbe_fdir.formatted.flow_type |=
2046 IXGBE_ATR_L4TYPE_SCTP;
2047 /*Not supported last point for range*/
2049 rte_flow_error_set(error, EINVAL,
2050 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2051 item, "Not supported last point for range");
2055 /* only x550 family only support sctp port */
2056 if (hw->mac.type == ixgbe_mac_X550 ||
2057 hw->mac.type == ixgbe_mac_X550EM_x ||
2058 hw->mac.type == ixgbe_mac_X550EM_a) {
2060 * Only care about src & dst ports,
2061 * others should be masked.
2064 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2065 rte_flow_error_set(error, EINVAL,
2066 RTE_FLOW_ERROR_TYPE_ITEM,
2067 item, "Not supported by fdir filter");
2070 rule->b_mask = TRUE;
2072 (const struct rte_flow_item_sctp *)item->mask;
2073 if (sctp_mask->hdr.tag ||
2074 sctp_mask->hdr.cksum) {
2075 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2076 rte_flow_error_set(error, EINVAL,
2077 RTE_FLOW_ERROR_TYPE_ITEM,
2078 item, "Not supported by fdir filter");
2081 rule->mask.src_port_mask = sctp_mask->hdr.src_port;
2082 rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
2085 rule->b_spec = TRUE;
2087 (const struct rte_flow_item_sctp *)item->spec;
2088 rule->ixgbe_fdir.formatted.src_port =
2089 sctp_spec->hdr.src_port;
2090 rule->ixgbe_fdir.formatted.dst_port =
2091 sctp_spec->hdr.dst_port;
2093 /* others even sctp port is not supported */
2096 (const struct rte_flow_item_sctp *)item->mask;
2098 (sctp_mask->hdr.src_port ||
2099 sctp_mask->hdr.dst_port ||
2100 sctp_mask->hdr.tag ||
2101 sctp_mask->hdr.cksum)) {
2102 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2103 rte_flow_error_set(error, EINVAL,
2104 RTE_FLOW_ERROR_TYPE_ITEM,
2105 item, "Not supported by fdir filter");
2110 item = next_no_fuzzy_pattern(pattern, item);
2111 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
2112 item->type != RTE_FLOW_ITEM_TYPE_END) {
2113 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2114 rte_flow_error_set(error, EINVAL,
2115 RTE_FLOW_ERROR_TYPE_ITEM,
2116 item, "Not supported by fdir filter");
2121 /* Get the flex byte info */
2122 if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
2123 /* Not supported last point for range*/
2125 rte_flow_error_set(error, EINVAL,
2126 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2127 item, "Not supported last point for range");
2130 /* mask should not be null */
2131 if (!item->mask || !item->spec) {
2132 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2133 rte_flow_error_set(error, EINVAL,
2134 RTE_FLOW_ERROR_TYPE_ITEM,
2135 item, "Not supported by fdir filter");
2139 raw_mask = (const struct rte_flow_item_raw *)item->mask;
2142 if (raw_mask->relative != 0x1 ||
2143 raw_mask->search != 0x1 ||
2144 raw_mask->reserved != 0x0 ||
2145 (uint32_t)raw_mask->offset != 0xffffffff ||
2146 raw_mask->limit != 0xffff ||
2147 raw_mask->length != 0xffff) {
2148 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2149 rte_flow_error_set(error, EINVAL,
2150 RTE_FLOW_ERROR_TYPE_ITEM,
2151 item, "Not supported by fdir filter");
2155 raw_spec = (const struct rte_flow_item_raw *)item->spec;
2158 if (raw_spec->relative != 0 ||
2159 raw_spec->search != 0 ||
2160 raw_spec->reserved != 0 ||
2161 raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
2162 raw_spec->offset % 2 ||
2163 raw_spec->limit != 0 ||
2164 raw_spec->length != 2 ||
2165 /* pattern can't be 0xffff */
2166 (raw_spec->pattern[0] == 0xff &&
2167 raw_spec->pattern[1] == 0xff)) {
2168 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2169 rte_flow_error_set(error, EINVAL,
2170 RTE_FLOW_ERROR_TYPE_ITEM,
2171 item, "Not supported by fdir filter");
2175 /* check pattern mask */
2176 if (raw_mask->pattern[0] != 0xff ||
2177 raw_mask->pattern[1] != 0xff) {
2178 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2179 rte_flow_error_set(error, EINVAL,
2180 RTE_FLOW_ERROR_TYPE_ITEM,
2181 item, "Not supported by fdir filter");
2185 rule->mask.flex_bytes_mask = 0xffff;
2186 rule->ixgbe_fdir.formatted.flex_bytes =
2187 (((uint16_t)raw_spec->pattern[1]) << 8) |
2188 raw_spec->pattern[0];
2189 rule->flex_bytes_offset = raw_spec->offset;
2192 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2193 /* check if the next not void item is END */
2194 item = next_no_fuzzy_pattern(pattern, item);
2195 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2196 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2197 rte_flow_error_set(error, EINVAL,
2198 RTE_FLOW_ERROR_TYPE_ITEM,
2199 item, "Not supported by fdir filter");
2204 return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2207 #define NVGRE_PROTOCOL 0x6558
2210 * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
2211 * And get the flow director filter info BTW.
2213 * The first not void item must be ETH.
2214 * The second not void item must be IPV4/ IPV6.
2215 * The third not void item must be NVGRE.
2216 * The next not void item must be END.
2218 * The first not void item must be ETH.
2219 * The second not void item must be IPV4/ IPV6.
2220 * The third not void item must be NVGRE.
2221 * The next not void item must be END.
2223 * The first not void action should be QUEUE or DROP.
2224 * The second not void optional action should be MARK,
2225 * mark_id is a uint32_t number.
2226 * The next not void action should be END.
2227 * VxLAN pattern example:
2230 * IPV4/IPV6 NULL NULL
2232 * VxLAN vni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
2233 * MAC VLAN tci 0x2016 0xEFFF
2235 * NEGRV pattern example:
2238 * IPV4/IPV6 NULL NULL
2239 * NVGRE protocol 0x6558 0xFFFF
2240 * tni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
2241 * MAC VLAN tci 0x2016 0xEFFF
2243 * other members in mask and spec should set to 0x00.
2244 * item->last should be NULL.
2247 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
2248 const struct rte_flow_item pattern[],
2249 const struct rte_flow_action actions[],
2250 struct ixgbe_fdir_rule *rule,
2251 struct rte_flow_error *error)
2253 const struct rte_flow_item *item;
2254 const struct rte_flow_item_vxlan *vxlan_spec;
2255 const struct rte_flow_item_vxlan *vxlan_mask;
2256 const struct rte_flow_item_nvgre *nvgre_spec;
2257 const struct rte_flow_item_nvgre *nvgre_mask;
2258 const struct rte_flow_item_eth *eth_spec;
2259 const struct rte_flow_item_eth *eth_mask;
2260 const struct rte_flow_item_vlan *vlan_spec;
2261 const struct rte_flow_item_vlan *vlan_mask;
2265 rte_flow_error_set(error, EINVAL,
2266 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2267 NULL, "NULL pattern.");
2272 rte_flow_error_set(error, EINVAL,
2273 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2274 NULL, "NULL action.");
2279 rte_flow_error_set(error, EINVAL,
2280 RTE_FLOW_ERROR_TYPE_ATTR,
2281 NULL, "NULL attribute.");
2286 * Some fields may not be provided. Set spec to 0 and mask to default
2287 * value. So, we need not do anything for the not provided fields later.
2289 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2290 memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
2291 rule->mask.vlan_tci_mask = 0;
2294 * The first not void item should be
2295 * MAC or IPv4 or IPv6 or UDP or VxLAN.
2297 item = next_no_void_pattern(pattern, NULL);
2298 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
2299 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2300 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
2301 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2302 item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
2303 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2304 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2305 rte_flow_error_set(error, EINVAL,
2306 RTE_FLOW_ERROR_TYPE_ITEM,
2307 item, "Not supported by fdir filter");
2311 rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
2314 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
2315 /* Only used to describe the protocol stack. */
2316 if (item->spec || item->mask) {
2317 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2318 rte_flow_error_set(error, EINVAL,
2319 RTE_FLOW_ERROR_TYPE_ITEM,
2320 item, "Not supported by fdir filter");
2323 /* Not supported last point for range*/
2325 rte_flow_error_set(error, EINVAL,
2326 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2327 item, "Not supported last point for range");
2331 /* Check if the next not void item is IPv4 or IPv6. */
2332 item = next_no_void_pattern(pattern, item);
2333 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2334 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2335 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2336 rte_flow_error_set(error, EINVAL,
2337 RTE_FLOW_ERROR_TYPE_ITEM,
2338 item, "Not supported by fdir filter");
2344 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2345 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2346 /* Only used to describe the protocol stack. */
2347 if (item->spec || item->mask) {
2348 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2349 rte_flow_error_set(error, EINVAL,
2350 RTE_FLOW_ERROR_TYPE_ITEM,
2351 item, "Not supported by fdir filter");
2354 /*Not supported last point for range*/
2356 rte_flow_error_set(error, EINVAL,
2357 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2358 item, "Not supported last point for range");
2362 /* Check if the next not void item is UDP or NVGRE. */
2363 item = next_no_void_pattern(pattern, item);
2364 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2365 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2366 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2367 rte_flow_error_set(error, EINVAL,
2368 RTE_FLOW_ERROR_TYPE_ITEM,
2369 item, "Not supported by fdir filter");
2375 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2376 /* Only used to describe the protocol stack. */
2377 if (item->spec || item->mask) {
2378 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2379 rte_flow_error_set(error, EINVAL,
2380 RTE_FLOW_ERROR_TYPE_ITEM,
2381 item, "Not supported by fdir filter");
2384 /*Not supported last point for range*/
2386 rte_flow_error_set(error, EINVAL,
2387 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2388 item, "Not supported last point for range");
2392 /* Check if the next not void item is VxLAN. */
2393 item = next_no_void_pattern(pattern, item);
2394 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2395 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2396 rte_flow_error_set(error, EINVAL,
2397 RTE_FLOW_ERROR_TYPE_ITEM,
2398 item, "Not supported by fdir filter");
2403 /* Get the VxLAN info */
2404 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2405 rule->ixgbe_fdir.formatted.tunnel_type =
2406 RTE_FDIR_TUNNEL_TYPE_VXLAN;
2408 /* Only care about VNI, others should be masked. */
2410 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2411 rte_flow_error_set(error, EINVAL,
2412 RTE_FLOW_ERROR_TYPE_ITEM,
2413 item, "Not supported by fdir filter");
2416 /*Not supported last point for range*/
2418 rte_flow_error_set(error, EINVAL,
2419 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2420 item, "Not supported last point for range");
2423 rule->b_mask = TRUE;
2425 /* Tunnel type is always meaningful. */
2426 rule->mask.tunnel_type_mask = 1;
2429 (const struct rte_flow_item_vxlan *)item->mask;
2430 if (vxlan_mask->flags) {
2431 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2432 rte_flow_error_set(error, EINVAL,
2433 RTE_FLOW_ERROR_TYPE_ITEM,
2434 item, "Not supported by fdir filter");
2437 /* VNI must be totally masked or not. */
2438 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2439 vxlan_mask->vni[2]) &&
2440 ((vxlan_mask->vni[0] != 0xFF) ||
2441 (vxlan_mask->vni[1] != 0xFF) ||
2442 (vxlan_mask->vni[2] != 0xFF))) {
2443 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2444 rte_flow_error_set(error, EINVAL,
2445 RTE_FLOW_ERROR_TYPE_ITEM,
2446 item, "Not supported by fdir filter");
2450 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2451 RTE_DIM(vxlan_mask->vni));
2454 rule->b_spec = TRUE;
2455 vxlan_spec = (const struct rte_flow_item_vxlan *)
2457 rte_memcpy(((uint8_t *)
2458 &rule->ixgbe_fdir.formatted.tni_vni + 1),
2459 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2460 rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
2461 rule->ixgbe_fdir.formatted.tni_vni);
2465 /* Get the NVGRE info */
2466 if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2467 rule->ixgbe_fdir.formatted.tunnel_type =
2468 RTE_FDIR_TUNNEL_TYPE_NVGRE;
2471 * Only care about flags0, flags1, protocol and TNI,
2472 * others should be masked.
2475 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2476 rte_flow_error_set(error, EINVAL,
2477 RTE_FLOW_ERROR_TYPE_ITEM,
2478 item, "Not supported by fdir filter");
2481 /*Not supported last point for range*/
2483 rte_flow_error_set(error, EINVAL,
2484 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2485 item, "Not supported last point for range");
2488 rule->b_mask = TRUE;
2490 /* Tunnel type is always meaningful. */
2491 rule->mask.tunnel_type_mask = 1;
2494 (const struct rte_flow_item_nvgre *)item->mask;
2495 if (nvgre_mask->flow_id) {
2496 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2497 rte_flow_error_set(error, EINVAL,
2498 RTE_FLOW_ERROR_TYPE_ITEM,
2499 item, "Not supported by fdir filter");
2502 if (nvgre_mask->protocol &&
2503 nvgre_mask->protocol != 0xFFFF) {
2504 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2505 rte_flow_error_set(error, EINVAL,
2506 RTE_FLOW_ERROR_TYPE_ITEM,
2507 item, "Not supported by fdir filter");
2510 if (nvgre_mask->c_k_s_rsvd0_ver &&
2511 nvgre_mask->c_k_s_rsvd0_ver !=
2512 rte_cpu_to_be_16(0xFFFF)) {
2513 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2514 rte_flow_error_set(error, EINVAL,
2515 RTE_FLOW_ERROR_TYPE_ITEM,
2516 item, "Not supported by fdir filter");
2519 /* TNI must be totally masked or not. */
2520 if (nvgre_mask->tni[0] &&
2521 ((nvgre_mask->tni[0] != 0xFF) ||
2522 (nvgre_mask->tni[1] != 0xFF) ||
2523 (nvgre_mask->tni[2] != 0xFF))) {
2524 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2525 rte_flow_error_set(error, EINVAL,
2526 RTE_FLOW_ERROR_TYPE_ITEM,
2527 item, "Not supported by fdir filter");
2530 /* tni is a 24-bits bit field */
2531 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2532 RTE_DIM(nvgre_mask->tni));
2533 rule->mask.tunnel_id_mask <<= 8;
2536 rule->b_spec = TRUE;
2538 (const struct rte_flow_item_nvgre *)item->spec;
2539 if (nvgre_spec->c_k_s_rsvd0_ver !=
2540 rte_cpu_to_be_16(0x2000) &&
2541 nvgre_mask->c_k_s_rsvd0_ver) {
2542 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2543 rte_flow_error_set(error, EINVAL,
2544 RTE_FLOW_ERROR_TYPE_ITEM,
2545 item, "Not supported by fdir filter");
2548 if (nvgre_mask->protocol &&
2549 nvgre_spec->protocol !=
2550 rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2551 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2552 rte_flow_error_set(error, EINVAL,
2553 RTE_FLOW_ERROR_TYPE_ITEM,
2554 item, "Not supported by fdir filter");
2557 /* tni is a 24-bits bit field */
2558 rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2559 nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2560 rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2564 /* check if the next not void item is MAC */
2565 item = next_no_void_pattern(pattern, item);
2566 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2567 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2568 rte_flow_error_set(error, EINVAL,
2569 RTE_FLOW_ERROR_TYPE_ITEM,
2570 item, "Not supported by fdir filter");
2575 * Only support vlan and dst MAC address,
2576 * others should be masked.
2580 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2581 rte_flow_error_set(error, EINVAL,
2582 RTE_FLOW_ERROR_TYPE_ITEM,
2583 item, "Not supported by fdir filter");
2586 /*Not supported last point for range*/
2588 rte_flow_error_set(error, EINVAL,
2589 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2590 item, "Not supported last point for range");
2593 rule->b_mask = TRUE;
2594 eth_mask = (const struct rte_flow_item_eth *)item->mask;
2596 /* Ether type should be masked. */
2597 if (eth_mask->type) {
2598 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2599 rte_flow_error_set(error, EINVAL,
2600 RTE_FLOW_ERROR_TYPE_ITEM,
2601 item, "Not supported by fdir filter");
2605 /* src MAC address should be masked. */
2606 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2607 if (eth_mask->src.addr_bytes[j]) {
2609 sizeof(struct ixgbe_fdir_rule));
2610 rte_flow_error_set(error, EINVAL,
2611 RTE_FLOW_ERROR_TYPE_ITEM,
2612 item, "Not supported by fdir filter");
2616 rule->mask.mac_addr_byte_mask = 0;
2617 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2618 /* It's a per byte mask. */
2619 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2620 rule->mask.mac_addr_byte_mask |= 0x1 << j;
2621 } else if (eth_mask->dst.addr_bytes[j]) {
2622 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2623 rte_flow_error_set(error, EINVAL,
2624 RTE_FLOW_ERROR_TYPE_ITEM,
2625 item, "Not supported by fdir filter");
2630 /* When no vlan, considered as full mask. */
2631 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2634 rule->b_spec = TRUE;
2635 eth_spec = (const struct rte_flow_item_eth *)item->spec;
2637 /* Get the dst MAC. */
2638 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2639 rule->ixgbe_fdir.formatted.inner_mac[j] =
2640 eth_spec->dst.addr_bytes[j];
2645 * Check if the next not void item is vlan or ipv4.
2646 * IPv6 is not supported.
2648 item = next_no_void_pattern(pattern, item);
2649 if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2650 (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
2651 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2652 rte_flow_error_set(error, EINVAL,
2653 RTE_FLOW_ERROR_TYPE_ITEM,
2654 item, "Not supported by fdir filter");
2657 /*Not supported last point for range*/
2659 rte_flow_error_set(error, EINVAL,
2660 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2661 item, "Not supported last point for range");
2665 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2666 if (!(item->spec && item->mask)) {
2667 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2668 rte_flow_error_set(error, EINVAL,
2669 RTE_FLOW_ERROR_TYPE_ITEM,
2670 item, "Not supported by fdir filter");
2674 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
2675 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
2677 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2679 rule->mask.vlan_tci_mask = vlan_mask->tci;
2680 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
2681 /* More than one tags are not supported. */
2683 /* check if the next not void item is END */
2684 item = next_no_void_pattern(pattern, item);
2686 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2687 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2688 rte_flow_error_set(error, EINVAL,
2689 RTE_FLOW_ERROR_TYPE_ITEM,
2690 item, "Not supported by fdir filter");
2696 * If the tags is 0, it means don't care about the VLAN.
2700 return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2704 ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2705 const struct rte_flow_attr *attr,
2706 const struct rte_flow_item pattern[],
2707 const struct rte_flow_action actions[],
2708 struct ixgbe_fdir_rule *rule,
2709 struct rte_flow_error *error)
2712 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2713 enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2715 if (hw->mac.type != ixgbe_mac_82599EB &&
2716 hw->mac.type != ixgbe_mac_X540 &&
2717 hw->mac.type != ixgbe_mac_X550 &&
2718 hw->mac.type != ixgbe_mac_X550EM_x &&
2719 hw->mac.type != ixgbe_mac_X550EM_a)
2722 ret = ixgbe_parse_fdir_filter_normal(dev, attr, pattern,
2723 actions, rule, error);
2728 ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2729 actions, rule, error);
2736 if (hw->mac.type == ixgbe_mac_82599EB &&
2737 rule->fdirflags == IXGBE_FDIRCMD_DROP &&
2738 (rule->ixgbe_fdir.formatted.src_port != 0 ||
2739 rule->ixgbe_fdir.formatted.dst_port != 0))
2742 if (fdir_mode == RTE_FDIR_MODE_NONE ||
2743 fdir_mode != rule->mode)
2746 if (rule->queue >= dev->data->nb_rx_queues)
2753 ixgbe_parse_rss_filter(struct rte_eth_dev *dev,
2754 const struct rte_flow_attr *attr,
2755 const struct rte_flow_action actions[],
2756 struct ixgbe_rte_flow_rss_conf *rss_conf,
2757 struct rte_flow_error *error)
2759 const struct rte_flow_action *act;
2760 const struct rte_flow_action_rss *rss;
2764 * rss only supports forwarding,
2765 * check if the first not void action is RSS.
2767 act = next_no_void_action(actions, NULL);
2768 if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
2769 memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
2770 rte_flow_error_set(error, EINVAL,
2771 RTE_FLOW_ERROR_TYPE_ACTION,
2772 act, "Not supported action.");
2776 rss = (const struct rte_flow_action_rss *)act->conf;
2778 if (!rss || !rss->num) {
2779 rte_flow_error_set(error, EINVAL,
2780 RTE_FLOW_ERROR_TYPE_ACTION,
2786 for (n = 0; n < rss->num; n++) {
2787 if (rss->queue[n] >= dev->data->nb_rx_queues) {
2788 rte_flow_error_set(error, EINVAL,
2789 RTE_FLOW_ERROR_TYPE_ACTION,
2791 "queue id > max number of queues");
2796 rss_conf->rss_conf = *rss->rss_conf;
2798 rss_conf->rss_conf.rss_hf = IXGBE_RSS_OFFLOAD_ALL;
2800 for (n = 0; n < rss->num; ++n)
2801 rss_conf->queue[n] = rss->queue[n];
2802 rss_conf->num = rss->num;
2804 /* check if the next not void item is END */
2805 act = next_no_void_action(actions, act);
2806 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
2807 memset(rss_conf, 0, sizeof(struct rte_eth_rss_conf));
2808 rte_flow_error_set(error, EINVAL,
2809 RTE_FLOW_ERROR_TYPE_ACTION,
2810 act, "Not supported action.");
2815 /* must be input direction */
2816 if (!attr->ingress) {
2817 memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
2818 rte_flow_error_set(error, EINVAL,
2819 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
2820 attr, "Only support ingress.");
2826 memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
2827 rte_flow_error_set(error, EINVAL,
2828 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2829 attr, "Not support egress.");
2833 if (attr->priority > 0xFFFF) {
2834 memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
2835 rte_flow_error_set(error, EINVAL,
2836 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
2837 attr, "Error priority.");
2844 /* remove the rss filter */
2846 ixgbe_clear_rss_filter(struct rte_eth_dev *dev)
2848 struct ixgbe_filter_info *filter_info =
2849 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2851 if (filter_info->rss_info.num)
2852 ixgbe_config_rss_filter(dev, &filter_info->rss_info, FALSE);
2856 ixgbe_filterlist_init(void)
2858 TAILQ_INIT(&filter_ntuple_list);
2859 TAILQ_INIT(&filter_ethertype_list);
2860 TAILQ_INIT(&filter_syn_list);
2861 TAILQ_INIT(&filter_fdir_list);
2862 TAILQ_INIT(&filter_l2_tunnel_list);
2863 TAILQ_INIT(&filter_rss_list);
2864 TAILQ_INIT(&ixgbe_flow_list);
2868 ixgbe_filterlist_flush(void)
2870 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2871 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2872 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2873 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2874 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2875 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2876 struct ixgbe_rss_conf_ele *rss_filter_ptr;
2878 while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2879 TAILQ_REMOVE(&filter_ntuple_list,
2882 rte_free(ntuple_filter_ptr);
2885 while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2886 TAILQ_REMOVE(&filter_ethertype_list,
2887 ethertype_filter_ptr,
2889 rte_free(ethertype_filter_ptr);
2892 while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2893 TAILQ_REMOVE(&filter_syn_list,
2896 rte_free(syn_filter_ptr);
2899 while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2900 TAILQ_REMOVE(&filter_l2_tunnel_list,
2903 rte_free(l2_tn_filter_ptr);
2906 while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2907 TAILQ_REMOVE(&filter_fdir_list,
2910 rte_free(fdir_rule_ptr);
2913 while ((rss_filter_ptr = TAILQ_FIRST(&filter_rss_list))) {
2914 TAILQ_REMOVE(&filter_rss_list,
2917 rte_free(rss_filter_ptr);
2920 while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
2921 TAILQ_REMOVE(&ixgbe_flow_list,
2924 rte_free(ixgbe_flow_mem_ptr->flow);
2925 rte_free(ixgbe_flow_mem_ptr);
2930 * Create or destroy a flow rule.
2931 * Theorically one rule can match more than one filters.
2932 * We will let it use the filter which it hitt first.
2933 * So, the sequence matters.
2935 static struct rte_flow *
2936 ixgbe_flow_create(struct rte_eth_dev *dev,
2937 const struct rte_flow_attr *attr,
2938 const struct rte_flow_item pattern[],
2939 const struct rte_flow_action actions[],
2940 struct rte_flow_error *error)
2943 struct rte_eth_ntuple_filter ntuple_filter;
2944 struct rte_eth_ethertype_filter ethertype_filter;
2945 struct rte_eth_syn_filter syn_filter;
2946 struct ixgbe_fdir_rule fdir_rule;
2947 struct rte_eth_l2_tunnel_conf l2_tn_filter;
2948 struct ixgbe_hw_fdir_info *fdir_info =
2949 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2950 struct ixgbe_rte_flow_rss_conf rss_conf;
2951 struct rte_flow *flow = NULL;
2952 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2953 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2954 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2955 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2956 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2957 struct ixgbe_rss_conf_ele *rss_filter_ptr;
2958 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2959 uint8_t first_mask = FALSE;
2961 flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
2963 PMD_DRV_LOG(ERR, "failed to allocate memory");
2964 return (struct rte_flow *)flow;
2966 ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
2967 sizeof(struct ixgbe_flow_mem), 0);
2968 if (!ixgbe_flow_mem_ptr) {
2969 PMD_DRV_LOG(ERR, "failed to allocate memory");
2973 ixgbe_flow_mem_ptr->flow = flow;
2974 TAILQ_INSERT_TAIL(&ixgbe_flow_list,
2975 ixgbe_flow_mem_ptr, entries);
2977 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2978 ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2979 actions, &ntuple_filter, error);
2981 #ifdef RTE_LIBRTE_SECURITY
2982 /* ESP flow not really a flow*/
2983 if (ntuple_filter.proto == IPPROTO_ESP)
2988 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2990 ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
2991 sizeof(struct ixgbe_ntuple_filter_ele), 0);
2992 if (!ntuple_filter_ptr) {
2993 PMD_DRV_LOG(ERR, "failed to allocate memory");
2996 rte_memcpy(&ntuple_filter_ptr->filter_info,
2998 sizeof(struct rte_eth_ntuple_filter));
2999 TAILQ_INSERT_TAIL(&filter_ntuple_list,
3000 ntuple_filter_ptr, entries);
3001 flow->rule = ntuple_filter_ptr;
3002 flow->filter_type = RTE_ETH_FILTER_NTUPLE;
3008 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
3009 ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
3010 actions, ðertype_filter, error);
3012 ret = ixgbe_add_del_ethertype_filter(dev,
3013 ðertype_filter, TRUE);
3015 ethertype_filter_ptr = rte_zmalloc(
3016 "ixgbe_ethertype_filter",
3017 sizeof(struct ixgbe_ethertype_filter_ele), 0);
3018 if (!ethertype_filter_ptr) {
3019 PMD_DRV_LOG(ERR, "failed to allocate memory");
3022 rte_memcpy(ðertype_filter_ptr->filter_info,
3024 sizeof(struct rte_eth_ethertype_filter));
3025 TAILQ_INSERT_TAIL(&filter_ethertype_list,
3026 ethertype_filter_ptr, entries);
3027 flow->rule = ethertype_filter_ptr;
3028 flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
3034 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
3035 ret = ixgbe_parse_syn_filter(dev, attr, pattern,
3036 actions, &syn_filter, error);
3038 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
3040 syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
3041 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
3042 if (!syn_filter_ptr) {
3043 PMD_DRV_LOG(ERR, "failed to allocate memory");
3046 rte_memcpy(&syn_filter_ptr->filter_info,
3048 sizeof(struct rte_eth_syn_filter));
3049 TAILQ_INSERT_TAIL(&filter_syn_list,
3052 flow->rule = syn_filter_ptr;
3053 flow->filter_type = RTE_ETH_FILTER_SYN;
3059 memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
3060 ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
3061 actions, &fdir_rule, error);
3063 /* A mask cannot be deleted. */
3064 if (fdir_rule.b_mask) {
3065 if (!fdir_info->mask_added) {
3066 /* It's the first time the mask is set. */
3067 rte_memcpy(&fdir_info->mask,
3069 sizeof(struct ixgbe_hw_fdir_mask));
3070 fdir_info->flex_bytes_offset =
3071 fdir_rule.flex_bytes_offset;
3073 if (fdir_rule.mask.flex_bytes_mask)
3074 ixgbe_fdir_set_flexbytes_offset(dev,
3075 fdir_rule.flex_bytes_offset);
3077 ret = ixgbe_fdir_set_input_mask(dev);
3081 fdir_info->mask_added = TRUE;
3085 * Only support one global mask,
3086 * all the masks should be the same.
3088 ret = memcmp(&fdir_info->mask,
3090 sizeof(struct ixgbe_hw_fdir_mask));
3094 if (fdir_info->flex_bytes_offset !=
3095 fdir_rule.flex_bytes_offset)
3100 if (fdir_rule.b_spec) {
3101 ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
3104 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
3105 sizeof(struct ixgbe_fdir_rule_ele), 0);
3106 if (!fdir_rule_ptr) {
3107 PMD_DRV_LOG(ERR, "failed to allocate memory");
3110 rte_memcpy(&fdir_rule_ptr->filter_info,
3112 sizeof(struct ixgbe_fdir_rule));
3113 TAILQ_INSERT_TAIL(&filter_fdir_list,
3114 fdir_rule_ptr, entries);
3115 flow->rule = fdir_rule_ptr;
3116 flow->filter_type = RTE_ETH_FILTER_FDIR;
3123 * clean the mask_added flag if fail to
3127 fdir_info->mask_added = FALSE;
3135 memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
3136 ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
3137 actions, &l2_tn_filter, error);
3139 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
3141 l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
3142 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
3143 if (!l2_tn_filter_ptr) {
3144 PMD_DRV_LOG(ERR, "failed to allocate memory");
3147 rte_memcpy(&l2_tn_filter_ptr->filter_info,
3149 sizeof(struct rte_eth_l2_tunnel_conf));
3150 TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
3151 l2_tn_filter_ptr, entries);
3152 flow->rule = l2_tn_filter_ptr;
3153 flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
3158 memset(&rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
3159 ret = ixgbe_parse_rss_filter(dev, attr,
3160 actions, &rss_conf, error);
3162 ret = ixgbe_config_rss_filter(dev, &rss_conf, TRUE);
3164 rss_filter_ptr = rte_zmalloc("ixgbe_rss_filter",
3165 sizeof(struct ixgbe_rss_conf_ele), 0);
3166 if (!rss_filter_ptr) {
3167 PMD_DRV_LOG(ERR, "failed to allocate memory");
3170 rte_memcpy(&rss_filter_ptr->filter_info,
3172 sizeof(struct ixgbe_rte_flow_rss_conf));
3173 TAILQ_INSERT_TAIL(&filter_rss_list,
3174 rss_filter_ptr, entries);
3175 flow->rule = rss_filter_ptr;
3176 flow->filter_type = RTE_ETH_FILTER_HASH;
3182 TAILQ_REMOVE(&ixgbe_flow_list,
3183 ixgbe_flow_mem_ptr, entries);
3184 rte_flow_error_set(error, -ret,
3185 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
3186 "Failed to create flow.");
3187 rte_free(ixgbe_flow_mem_ptr);
3193 * Check if the flow rule is supported by ixgbe.
3194 * It only checkes the format. Don't guarantee the rule can be programmed into
3195 * the HW. Because there can be no enough room for the rule.
3198 ixgbe_flow_validate(struct rte_eth_dev *dev,
3199 const struct rte_flow_attr *attr,
3200 const struct rte_flow_item pattern[],
3201 const struct rte_flow_action actions[],
3202 struct rte_flow_error *error)
3204 struct rte_eth_ntuple_filter ntuple_filter;
3205 struct rte_eth_ethertype_filter ethertype_filter;
3206 struct rte_eth_syn_filter syn_filter;
3207 struct rte_eth_l2_tunnel_conf l2_tn_filter;
3208 struct ixgbe_fdir_rule fdir_rule;
3209 struct ixgbe_rte_flow_rss_conf rss_conf;
3212 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
3213 ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
3214 actions, &ntuple_filter, error);
3218 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
3219 ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
3220 actions, ðertype_filter, error);
3224 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
3225 ret = ixgbe_parse_syn_filter(dev, attr, pattern,
3226 actions, &syn_filter, error);
3230 memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
3231 ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
3232 actions, &fdir_rule, error);
3236 memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
3237 ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
3238 actions, &l2_tn_filter, error);
3242 memset(&rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
3243 ret = ixgbe_parse_rss_filter(dev, attr,
3244 actions, &rss_conf, error);
3249 /* Destroy a flow rule on ixgbe. */
3251 ixgbe_flow_destroy(struct rte_eth_dev *dev,
3252 struct rte_flow *flow,
3253 struct rte_flow_error *error)
3256 struct rte_flow *pmd_flow = flow;
3257 enum rte_filter_type filter_type = pmd_flow->filter_type;
3258 struct rte_eth_ntuple_filter ntuple_filter;
3259 struct rte_eth_ethertype_filter ethertype_filter;
3260 struct rte_eth_syn_filter syn_filter;
3261 struct ixgbe_fdir_rule fdir_rule;
3262 struct rte_eth_l2_tunnel_conf l2_tn_filter;
3263 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
3264 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
3265 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
3266 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
3267 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
3268 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
3269 struct ixgbe_hw_fdir_info *fdir_info =
3270 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
3271 struct ixgbe_rss_conf_ele *rss_filter_ptr;
3273 switch (filter_type) {
3274 case RTE_ETH_FILTER_NTUPLE:
3275 ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
3277 rte_memcpy(&ntuple_filter,
3278 &ntuple_filter_ptr->filter_info,
3279 sizeof(struct rte_eth_ntuple_filter));
3280 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
3282 TAILQ_REMOVE(&filter_ntuple_list,
3283 ntuple_filter_ptr, entries);
3284 rte_free(ntuple_filter_ptr);
3287 case RTE_ETH_FILTER_ETHERTYPE:
3288 ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
3290 rte_memcpy(ðertype_filter,
3291 ðertype_filter_ptr->filter_info,
3292 sizeof(struct rte_eth_ethertype_filter));
3293 ret = ixgbe_add_del_ethertype_filter(dev,
3294 ðertype_filter, FALSE);
3296 TAILQ_REMOVE(&filter_ethertype_list,
3297 ethertype_filter_ptr, entries);
3298 rte_free(ethertype_filter_ptr);
3301 case RTE_ETH_FILTER_SYN:
3302 syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
3304 rte_memcpy(&syn_filter,
3305 &syn_filter_ptr->filter_info,
3306 sizeof(struct rte_eth_syn_filter));
3307 ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
3309 TAILQ_REMOVE(&filter_syn_list,
3310 syn_filter_ptr, entries);
3311 rte_free(syn_filter_ptr);
3314 case RTE_ETH_FILTER_FDIR:
3315 fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
3316 rte_memcpy(&fdir_rule,
3317 &fdir_rule_ptr->filter_info,
3318 sizeof(struct ixgbe_fdir_rule));
3319 ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
3321 TAILQ_REMOVE(&filter_fdir_list,
3322 fdir_rule_ptr, entries);
3323 rte_free(fdir_rule_ptr);
3324 if (TAILQ_EMPTY(&filter_fdir_list))
3325 fdir_info->mask_added = false;
3328 case RTE_ETH_FILTER_L2_TUNNEL:
3329 l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
3331 rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
3332 sizeof(struct rte_eth_l2_tunnel_conf));
3333 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
3335 TAILQ_REMOVE(&filter_l2_tunnel_list,
3336 l2_tn_filter_ptr, entries);
3337 rte_free(l2_tn_filter_ptr);
3340 case RTE_ETH_FILTER_HASH:
3341 rss_filter_ptr = (struct ixgbe_rss_conf_ele *)
3343 ret = ixgbe_config_rss_filter(dev,
3344 &rss_filter_ptr->filter_info, FALSE);
3346 TAILQ_REMOVE(&filter_rss_list,
3347 rss_filter_ptr, entries);
3348 rte_free(rss_filter_ptr);
3352 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
3359 rte_flow_error_set(error, EINVAL,
3360 RTE_FLOW_ERROR_TYPE_HANDLE,
3361 NULL, "Failed to destroy flow");
3365 TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
3366 if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
3367 TAILQ_REMOVE(&ixgbe_flow_list,
3368 ixgbe_flow_mem_ptr, entries);
3369 rte_free(ixgbe_flow_mem_ptr);
3377 /* Destroy all flow rules associated with a port on ixgbe. */
3379 ixgbe_flow_flush(struct rte_eth_dev *dev,
3380 struct rte_flow_error *error)
3384 ixgbe_clear_all_ntuple_filter(dev);
3385 ixgbe_clear_all_ethertype_filter(dev);
3386 ixgbe_clear_syn_filter(dev);
3388 ret = ixgbe_clear_all_fdir_filter(dev);
3390 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3391 NULL, "Failed to flush rule");
3395 ret = ixgbe_clear_all_l2_tn_filter(dev);
3397 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3398 NULL, "Failed to flush rule");
3402 ixgbe_clear_rss_filter(dev);
3404 ixgbe_filterlist_flush();
3409 const struct rte_flow_ops ixgbe_flow_ops = {
3410 .validate = ixgbe_flow_validate,
3411 .create = ixgbe_flow_create,
3412 .destroy = ixgbe_flow_destroy,
3413 .flush = ixgbe_flow_flush,