1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
13 #include <netinet/in.h>
14 #include <rte_byteorder.h>
15 #include <rte_common.h>
16 #include <rte_cycles.h>
18 #include <rte_interrupts.h>
20 #include <rte_debug.h>
22 #include <rte_atomic.h>
23 #include <rte_branch_prediction.h>
24 #include <rte_memory.h>
26 #include <rte_alarm.h>
27 #include <rte_ether.h>
28 #include <rte_ethdev_driver.h>
29 #include <rte_malloc.h>
30 #include <rte_random.h>
32 #include <rte_hash_crc.h>
34 #include <rte_flow_driver.h>
36 #include "ixgbe_logs.h"
37 #include "base/ixgbe_api.h"
38 #include "base/ixgbe_vf.h"
39 #include "base/ixgbe_common.h"
40 #include "ixgbe_ethdev.h"
41 #include "ixgbe_bypass.h"
42 #include "ixgbe_rxtx.h"
43 #include "base/ixgbe_type.h"
44 #include "base/ixgbe_phy.h"
45 #include "rte_pmd_ixgbe.h"
48 #define IXGBE_MIN_N_TUPLE_PRIO 1
49 #define IXGBE_MAX_N_TUPLE_PRIO 7
50 #define IXGBE_MAX_FLX_SOURCE_OFF 62
52 /* ntuple filter list structure */
53 struct ixgbe_ntuple_filter_ele {
54 TAILQ_ENTRY(ixgbe_ntuple_filter_ele) entries;
55 struct rte_eth_ntuple_filter filter_info;
57 /* ethertype filter list structure */
58 struct ixgbe_ethertype_filter_ele {
59 TAILQ_ENTRY(ixgbe_ethertype_filter_ele) entries;
60 struct rte_eth_ethertype_filter filter_info;
62 /* syn filter list structure */
63 struct ixgbe_eth_syn_filter_ele {
64 TAILQ_ENTRY(ixgbe_eth_syn_filter_ele) entries;
65 struct rte_eth_syn_filter filter_info;
67 /* fdir filter list structure */
68 struct ixgbe_fdir_rule_ele {
69 TAILQ_ENTRY(ixgbe_fdir_rule_ele) entries;
70 struct ixgbe_fdir_rule filter_info;
72 /* l2_tunnel filter list structure */
73 struct ixgbe_eth_l2_tunnel_conf_ele {
74 TAILQ_ENTRY(ixgbe_eth_l2_tunnel_conf_ele) entries;
75 struct rte_eth_l2_tunnel_conf filter_info;
77 /* rss filter list structure */
78 struct ixgbe_rss_conf_ele {
79 TAILQ_ENTRY(ixgbe_rss_conf_ele) entries;
80 struct ixgbe_rte_flow_rss_conf filter_info;
82 /* ixgbe_flow memory list structure */
83 struct ixgbe_flow_mem {
84 TAILQ_ENTRY(ixgbe_flow_mem) entries;
85 struct rte_flow *flow;
88 TAILQ_HEAD(ixgbe_ntuple_filter_list, ixgbe_ntuple_filter_ele);
89 TAILQ_HEAD(ixgbe_ethertype_filter_list, ixgbe_ethertype_filter_ele);
90 TAILQ_HEAD(ixgbe_syn_filter_list, ixgbe_eth_syn_filter_ele);
91 TAILQ_HEAD(ixgbe_fdir_rule_filter_list, ixgbe_fdir_rule_ele);
92 TAILQ_HEAD(ixgbe_l2_tunnel_filter_list, ixgbe_eth_l2_tunnel_conf_ele);
93 TAILQ_HEAD(ixgbe_rss_filter_list, ixgbe_rss_conf_ele);
94 TAILQ_HEAD(ixgbe_flow_mem_list, ixgbe_flow_mem);
96 static struct ixgbe_ntuple_filter_list filter_ntuple_list;
97 static struct ixgbe_ethertype_filter_list filter_ethertype_list;
98 static struct ixgbe_syn_filter_list filter_syn_list;
99 static struct ixgbe_fdir_rule_filter_list filter_fdir_list;
100 static struct ixgbe_l2_tunnel_filter_list filter_l2_tunnel_list;
101 static struct ixgbe_rss_filter_list filter_rss_list;
102 static struct ixgbe_flow_mem_list ixgbe_flow_list;
105 * Endless loop will never happen with below assumption
106 * 1. there is at least one no-void item(END)
107 * 2. cur is before END.
110 const struct rte_flow_item *next_no_void_pattern(
111 const struct rte_flow_item pattern[],
112 const struct rte_flow_item *cur)
114 const struct rte_flow_item *next =
115 cur ? cur + 1 : &pattern[0];
117 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
124 const struct rte_flow_action *next_no_void_action(
125 const struct rte_flow_action actions[],
126 const struct rte_flow_action *cur)
128 const struct rte_flow_action *next =
129 cur ? cur + 1 : &actions[0];
131 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
138 * Please aware there's an asumption for all the parsers.
139 * rte_flow_item is using big endian, rte_flow_attr and
140 * rte_flow_action are using CPU order.
141 * Because the pattern is used to describe the packets,
142 * normally the packets should use network order.
146 * Parse the rule to see if it is a n-tuple rule.
147 * And get the n-tuple filter info BTW.
149 * The first not void item can be ETH or IPV4.
150 * The second not void item must be IPV4 if the first one is ETH.
151 * The third not void item must be UDP or TCP.
152 * The next not void item must be END.
154 * The first not void action should be QUEUE.
155 * The next not void action should be END.
159 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
160 * dst_addr 192.167.3.50 0xFFFFFFFF
161 * next_proto_id 17 0xFF
162 * UDP/TCP/ src_port 80 0xFFFF
163 * SCTP dst_port 80 0xFFFF
165 * other members in mask and spec should set to 0x00.
166 * item->last should be NULL.
168 * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY.
172 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
173 const struct rte_flow_item pattern[],
174 const struct rte_flow_action actions[],
175 struct rte_eth_ntuple_filter *filter,
176 struct rte_flow_error *error)
178 const struct rte_flow_item *item;
179 const struct rte_flow_action *act;
180 const struct rte_flow_item_ipv4 *ipv4_spec;
181 const struct rte_flow_item_ipv4 *ipv4_mask;
182 const struct rte_flow_item_tcp *tcp_spec;
183 const struct rte_flow_item_tcp *tcp_mask;
184 const struct rte_flow_item_udp *udp_spec;
185 const struct rte_flow_item_udp *udp_mask;
186 const struct rte_flow_item_sctp *sctp_spec;
187 const struct rte_flow_item_sctp *sctp_mask;
188 const struct rte_flow_item_eth *eth_spec;
189 const struct rte_flow_item_eth *eth_mask;
190 const struct rte_flow_item_vlan *vlan_spec;
191 const struct rte_flow_item_vlan *vlan_mask;
192 struct rte_flow_item_eth eth_null;
193 struct rte_flow_item_vlan vlan_null;
196 rte_flow_error_set(error,
197 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
198 NULL, "NULL pattern.");
203 rte_flow_error_set(error, EINVAL,
204 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
205 NULL, "NULL action.");
209 rte_flow_error_set(error, EINVAL,
210 RTE_FLOW_ERROR_TYPE_ATTR,
211 NULL, "NULL attribute.");
215 memset(ð_null, 0, sizeof(struct rte_flow_item_eth));
216 memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan));
218 #ifdef RTE_LIBRTE_SECURITY
220 * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY
222 act = next_no_void_action(actions, NULL);
223 if (act->type == RTE_FLOW_ACTION_TYPE_SECURITY) {
224 const void *conf = act->conf;
225 /* check if the next not void item is END */
226 act = next_no_void_action(actions, act);
227 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
228 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
229 rte_flow_error_set(error, EINVAL,
230 RTE_FLOW_ERROR_TYPE_ACTION,
231 act, "Not supported action.");
235 /* get the IP pattern*/
236 item = next_no_void_pattern(pattern, NULL);
237 while (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
238 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
240 item->type == RTE_FLOW_ITEM_TYPE_END) {
241 rte_flow_error_set(error, EINVAL,
242 RTE_FLOW_ERROR_TYPE_ITEM,
243 item, "IP pattern missing.");
246 item = next_no_void_pattern(pattern, item);
249 filter->proto = IPPROTO_ESP;
250 return ixgbe_crypto_add_ingress_sa_from_flow(conf, item->spec,
251 item->type == RTE_FLOW_ITEM_TYPE_IPV6);
255 /* the first not void item can be MAC or IPv4 */
256 item = next_no_void_pattern(pattern, NULL);
258 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
259 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
260 rte_flow_error_set(error, EINVAL,
261 RTE_FLOW_ERROR_TYPE_ITEM,
262 item, "Not supported by ntuple filter");
266 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
267 eth_spec = item->spec;
268 eth_mask = item->mask;
269 /*Not supported last point for range*/
271 rte_flow_error_set(error,
273 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
274 item, "Not supported last point for range");
278 /* if the first item is MAC, the content should be NULL */
279 if ((item->spec || item->mask) &&
280 (memcmp(eth_spec, ð_null,
281 sizeof(struct rte_flow_item_eth)) ||
282 memcmp(eth_mask, ð_null,
283 sizeof(struct rte_flow_item_eth)))) {
284 rte_flow_error_set(error, EINVAL,
285 RTE_FLOW_ERROR_TYPE_ITEM,
286 item, "Not supported by ntuple filter");
289 /* check if the next not void item is IPv4 or Vlan */
290 item = next_no_void_pattern(pattern, item);
291 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
292 item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
293 rte_flow_error_set(error,
294 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
295 item, "Not supported by ntuple filter");
300 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
301 vlan_spec = item->spec;
302 vlan_mask = item->mask;
303 /*Not supported last point for range*/
305 rte_flow_error_set(error,
307 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
308 item, "Not supported last point for range");
311 /* the content should be NULL */
312 if ((item->spec || item->mask) &&
313 (memcmp(vlan_spec, &vlan_null,
314 sizeof(struct rte_flow_item_vlan)) ||
315 memcmp(vlan_mask, &vlan_null,
316 sizeof(struct rte_flow_item_vlan)))) {
318 rte_flow_error_set(error, EINVAL,
319 RTE_FLOW_ERROR_TYPE_ITEM,
320 item, "Not supported by ntuple filter");
323 /* check if the next not void item is IPv4 */
324 item = next_no_void_pattern(pattern, item);
325 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
326 rte_flow_error_set(error,
327 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
328 item, "Not supported by ntuple filter");
334 /* get the IPv4 info */
335 if (!item->spec || !item->mask) {
336 rte_flow_error_set(error, EINVAL,
337 RTE_FLOW_ERROR_TYPE_ITEM,
338 item, "Invalid ntuple mask");
341 /*Not supported last point for range*/
343 rte_flow_error_set(error, EINVAL,
344 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
345 item, "Not supported last point for range");
349 ipv4_mask = item->mask;
351 * Only support src & dst addresses, protocol,
352 * others should be masked.
354 if (ipv4_mask->hdr.version_ihl ||
355 ipv4_mask->hdr.type_of_service ||
356 ipv4_mask->hdr.total_length ||
357 ipv4_mask->hdr.packet_id ||
358 ipv4_mask->hdr.fragment_offset ||
359 ipv4_mask->hdr.time_to_live ||
360 ipv4_mask->hdr.hdr_checksum) {
361 rte_flow_error_set(error,
362 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
363 item, "Not supported by ntuple filter");
367 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
368 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
369 filter->proto_mask = ipv4_mask->hdr.next_proto_id;
371 ipv4_spec = item->spec;
372 filter->dst_ip = ipv4_spec->hdr.dst_addr;
373 filter->src_ip = ipv4_spec->hdr.src_addr;
374 filter->proto = ipv4_spec->hdr.next_proto_id;
377 /* check if the next not void item is TCP or UDP */
378 item = next_no_void_pattern(pattern, item);
379 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
380 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
381 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
382 item->type != RTE_FLOW_ITEM_TYPE_END) {
383 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
384 rte_flow_error_set(error, EINVAL,
385 RTE_FLOW_ERROR_TYPE_ITEM,
386 item, "Not supported by ntuple filter");
390 if ((item->type != RTE_FLOW_ITEM_TYPE_END) &&
391 (!item->spec && !item->mask)) {
395 /* get the TCP/UDP/SCTP info */
396 if (item->type != RTE_FLOW_ITEM_TYPE_END &&
397 (!item->spec || !item->mask)) {
398 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
399 rte_flow_error_set(error, EINVAL,
400 RTE_FLOW_ERROR_TYPE_ITEM,
401 item, "Invalid ntuple mask");
405 /*Not supported last point for range*/
407 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
408 rte_flow_error_set(error, EINVAL,
409 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
410 item, "Not supported last point for range");
415 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
416 tcp_mask = item->mask;
419 * Only support src & dst ports, tcp flags,
420 * others should be masked.
422 if (tcp_mask->hdr.sent_seq ||
423 tcp_mask->hdr.recv_ack ||
424 tcp_mask->hdr.data_off ||
425 tcp_mask->hdr.rx_win ||
426 tcp_mask->hdr.cksum ||
427 tcp_mask->hdr.tcp_urp) {
429 sizeof(struct rte_eth_ntuple_filter));
430 rte_flow_error_set(error, EINVAL,
431 RTE_FLOW_ERROR_TYPE_ITEM,
432 item, "Not supported by ntuple filter");
436 filter->dst_port_mask = tcp_mask->hdr.dst_port;
437 filter->src_port_mask = tcp_mask->hdr.src_port;
438 if (tcp_mask->hdr.tcp_flags == 0xFF) {
439 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
440 } else if (!tcp_mask->hdr.tcp_flags) {
441 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
443 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
444 rte_flow_error_set(error, EINVAL,
445 RTE_FLOW_ERROR_TYPE_ITEM,
446 item, "Not supported by ntuple filter");
450 tcp_spec = item->spec;
451 filter->dst_port = tcp_spec->hdr.dst_port;
452 filter->src_port = tcp_spec->hdr.src_port;
453 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
454 } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
455 udp_mask = item->mask;
458 * Only support src & dst ports,
459 * others should be masked.
461 if (udp_mask->hdr.dgram_len ||
462 udp_mask->hdr.dgram_cksum) {
464 sizeof(struct rte_eth_ntuple_filter));
465 rte_flow_error_set(error, EINVAL,
466 RTE_FLOW_ERROR_TYPE_ITEM,
467 item, "Not supported by ntuple filter");
471 filter->dst_port_mask = udp_mask->hdr.dst_port;
472 filter->src_port_mask = udp_mask->hdr.src_port;
474 udp_spec = item->spec;
475 filter->dst_port = udp_spec->hdr.dst_port;
476 filter->src_port = udp_spec->hdr.src_port;
477 } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
478 sctp_mask = item->mask;
481 * Only support src & dst ports,
482 * others should be masked.
484 if (sctp_mask->hdr.tag ||
485 sctp_mask->hdr.cksum) {
487 sizeof(struct rte_eth_ntuple_filter));
488 rte_flow_error_set(error, EINVAL,
489 RTE_FLOW_ERROR_TYPE_ITEM,
490 item, "Not supported by ntuple filter");
494 filter->dst_port_mask = sctp_mask->hdr.dst_port;
495 filter->src_port_mask = sctp_mask->hdr.src_port;
497 sctp_spec = item->spec;
498 filter->dst_port = sctp_spec->hdr.dst_port;
499 filter->src_port = sctp_spec->hdr.src_port;
504 /* check if the next not void item is END */
505 item = next_no_void_pattern(pattern, item);
506 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
507 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
508 rte_flow_error_set(error, EINVAL,
509 RTE_FLOW_ERROR_TYPE_ITEM,
510 item, "Not supported by ntuple filter");
517 * n-tuple only supports forwarding,
518 * check if the first not void action is QUEUE.
520 act = next_no_void_action(actions, NULL);
521 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
522 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
523 rte_flow_error_set(error, EINVAL,
524 RTE_FLOW_ERROR_TYPE_ACTION,
525 item, "Not supported action.");
529 ((const struct rte_flow_action_queue *)act->conf)->index;
531 /* check if the next not void item is END */
532 act = next_no_void_action(actions, act);
533 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
534 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
535 rte_flow_error_set(error, EINVAL,
536 RTE_FLOW_ERROR_TYPE_ACTION,
537 act, "Not supported action.");
542 /* must be input direction */
543 if (!attr->ingress) {
544 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
545 rte_flow_error_set(error, EINVAL,
546 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
547 attr, "Only support ingress.");
553 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
554 rte_flow_error_set(error, EINVAL,
555 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
556 attr, "Not support egress.");
561 if (attr->transfer) {
562 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
563 rte_flow_error_set(error, EINVAL,
564 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
565 attr, "No support for transfer.");
569 if (attr->priority > 0xFFFF) {
570 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
571 rte_flow_error_set(error, EINVAL,
572 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
573 attr, "Error priority.");
576 filter->priority = (uint16_t)attr->priority;
577 if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
578 attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
579 filter->priority = 1;
584 /* a specific function for ixgbe because the flags is specific */
586 ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
587 const struct rte_flow_attr *attr,
588 const struct rte_flow_item pattern[],
589 const struct rte_flow_action actions[],
590 struct rte_eth_ntuple_filter *filter,
591 struct rte_flow_error *error)
594 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
596 MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
598 ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
603 #ifdef RTE_LIBRTE_SECURITY
604 /* ESP flow not really a flow*/
605 if (filter->proto == IPPROTO_ESP)
609 /* Ixgbe doesn't support tcp flags. */
610 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
611 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
612 rte_flow_error_set(error, EINVAL,
613 RTE_FLOW_ERROR_TYPE_ITEM,
614 NULL, "Not supported by ntuple filter");
618 /* Ixgbe doesn't support many priorities. */
619 if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
620 filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
621 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
622 rte_flow_error_set(error, EINVAL,
623 RTE_FLOW_ERROR_TYPE_ITEM,
624 NULL, "Priority not supported by ntuple filter");
628 if (filter->queue >= dev->data->nb_rx_queues)
631 /* fixed value for ixgbe */
632 filter->flags = RTE_5TUPLE_FLAGS;
637 * Parse the rule to see if it is a ethertype rule.
638 * And get the ethertype filter info BTW.
640 * The first not void item can be ETH.
641 * The next not void item must be END.
643 * The first not void action should be QUEUE.
644 * The next not void action should be END.
647 * ETH type 0x0807 0xFFFF
649 * other members in mask and spec should set to 0x00.
650 * item->last should be NULL.
653 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
654 const struct rte_flow_item *pattern,
655 const struct rte_flow_action *actions,
656 struct rte_eth_ethertype_filter *filter,
657 struct rte_flow_error *error)
659 const struct rte_flow_item *item;
660 const struct rte_flow_action *act;
661 const struct rte_flow_item_eth *eth_spec;
662 const struct rte_flow_item_eth *eth_mask;
663 const struct rte_flow_action_queue *act_q;
666 rte_flow_error_set(error, EINVAL,
667 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
668 NULL, "NULL pattern.");
673 rte_flow_error_set(error, EINVAL,
674 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
675 NULL, "NULL action.");
680 rte_flow_error_set(error, EINVAL,
681 RTE_FLOW_ERROR_TYPE_ATTR,
682 NULL, "NULL attribute.");
686 item = next_no_void_pattern(pattern, NULL);
687 /* The first non-void item should be MAC. */
688 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
689 rte_flow_error_set(error, EINVAL,
690 RTE_FLOW_ERROR_TYPE_ITEM,
691 item, "Not supported by ethertype filter");
695 /*Not supported last point for range*/
697 rte_flow_error_set(error, EINVAL,
698 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
699 item, "Not supported last point for range");
703 /* Get the MAC info. */
704 if (!item->spec || !item->mask) {
705 rte_flow_error_set(error, EINVAL,
706 RTE_FLOW_ERROR_TYPE_ITEM,
707 item, "Not supported by ethertype filter");
711 eth_spec = item->spec;
712 eth_mask = item->mask;
714 /* Mask bits of source MAC address must be full of 0.
715 * Mask bits of destination MAC address must be full
718 if (!is_zero_ether_addr(ð_mask->src) ||
719 (!is_zero_ether_addr(ð_mask->dst) &&
720 !is_broadcast_ether_addr(ð_mask->dst))) {
721 rte_flow_error_set(error, EINVAL,
722 RTE_FLOW_ERROR_TYPE_ITEM,
723 item, "Invalid ether address mask");
727 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
728 rte_flow_error_set(error, EINVAL,
729 RTE_FLOW_ERROR_TYPE_ITEM,
730 item, "Invalid ethertype mask");
734 /* If mask bits of destination MAC address
735 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
737 if (is_broadcast_ether_addr(ð_mask->dst)) {
738 filter->mac_addr = eth_spec->dst;
739 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
741 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
743 filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
745 /* Check if the next non-void item is END. */
746 item = next_no_void_pattern(pattern, item);
747 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
748 rte_flow_error_set(error, EINVAL,
749 RTE_FLOW_ERROR_TYPE_ITEM,
750 item, "Not supported by ethertype filter.");
756 act = next_no_void_action(actions, NULL);
757 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
758 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
759 rte_flow_error_set(error, EINVAL,
760 RTE_FLOW_ERROR_TYPE_ACTION,
761 act, "Not supported action.");
765 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
766 act_q = (const struct rte_flow_action_queue *)act->conf;
767 filter->queue = act_q->index;
769 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
772 /* Check if the next non-void item is END */
773 act = next_no_void_action(actions, act);
774 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
775 rte_flow_error_set(error, EINVAL,
776 RTE_FLOW_ERROR_TYPE_ACTION,
777 act, "Not supported action.");
782 /* Must be input direction */
783 if (!attr->ingress) {
784 rte_flow_error_set(error, EINVAL,
785 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
786 attr, "Only support ingress.");
792 rte_flow_error_set(error, EINVAL,
793 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
794 attr, "Not support egress.");
799 if (attr->transfer) {
800 rte_flow_error_set(error, EINVAL,
801 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
802 attr, "No support for transfer.");
807 if (attr->priority) {
808 rte_flow_error_set(error, EINVAL,
809 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
810 attr, "Not support priority.");
816 rte_flow_error_set(error, EINVAL,
817 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
818 attr, "Not support group.");
826 ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
827 const struct rte_flow_attr *attr,
828 const struct rte_flow_item pattern[],
829 const struct rte_flow_action actions[],
830 struct rte_eth_ethertype_filter *filter,
831 struct rte_flow_error *error)
834 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
836 MAC_TYPE_FILTER_SUP(hw->mac.type);
838 ret = cons_parse_ethertype_filter(attr, pattern,
839 actions, filter, error);
844 /* Ixgbe doesn't support MAC address. */
845 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
846 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
847 rte_flow_error_set(error, EINVAL,
848 RTE_FLOW_ERROR_TYPE_ITEM,
849 NULL, "Not supported by ethertype filter");
853 if (filter->queue >= dev->data->nb_rx_queues) {
854 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
855 rte_flow_error_set(error, EINVAL,
856 RTE_FLOW_ERROR_TYPE_ITEM,
857 NULL, "queue index much too big");
861 if (filter->ether_type == ETHER_TYPE_IPv4 ||
862 filter->ether_type == ETHER_TYPE_IPv6) {
863 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
864 rte_flow_error_set(error, EINVAL,
865 RTE_FLOW_ERROR_TYPE_ITEM,
866 NULL, "IPv4/IPv6 not supported by ethertype filter");
870 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
871 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
872 rte_flow_error_set(error, EINVAL,
873 RTE_FLOW_ERROR_TYPE_ITEM,
874 NULL, "mac compare is unsupported");
878 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
879 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
880 rte_flow_error_set(error, EINVAL,
881 RTE_FLOW_ERROR_TYPE_ITEM,
882 NULL, "drop option is unsupported");
890 * Parse the rule to see if it is a TCP SYN rule.
891 * And get the TCP SYN filter info BTW.
893 * The first not void item must be ETH.
894 * The second not void item must be IPV4 or IPV6.
895 * The third not void item must be TCP.
896 * The next not void item must be END.
898 * The first not void action should be QUEUE.
899 * The next not void action should be END.
903 * IPV4/IPV6 NULL NULL
904 * TCP tcp_flags 0x02 0xFF
906 * other members in mask and spec should set to 0x00.
907 * item->last should be NULL.
910 cons_parse_syn_filter(const struct rte_flow_attr *attr,
911 const struct rte_flow_item pattern[],
912 const struct rte_flow_action actions[],
913 struct rte_eth_syn_filter *filter,
914 struct rte_flow_error *error)
916 const struct rte_flow_item *item;
917 const struct rte_flow_action *act;
918 const struct rte_flow_item_tcp *tcp_spec;
919 const struct rte_flow_item_tcp *tcp_mask;
920 const struct rte_flow_action_queue *act_q;
923 rte_flow_error_set(error, EINVAL,
924 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
925 NULL, "NULL pattern.");
930 rte_flow_error_set(error, EINVAL,
931 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
932 NULL, "NULL action.");
937 rte_flow_error_set(error, EINVAL,
938 RTE_FLOW_ERROR_TYPE_ATTR,
939 NULL, "NULL attribute.");
944 /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
945 item = next_no_void_pattern(pattern, NULL);
946 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
947 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
948 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
949 item->type != RTE_FLOW_ITEM_TYPE_TCP) {
950 rte_flow_error_set(error, EINVAL,
951 RTE_FLOW_ERROR_TYPE_ITEM,
952 item, "Not supported by syn filter");
955 /*Not supported last point for range*/
957 rte_flow_error_set(error, EINVAL,
958 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
959 item, "Not supported last point for range");
964 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
965 /* if the item is MAC, the content should be NULL */
966 if (item->spec || item->mask) {
967 rte_flow_error_set(error, EINVAL,
968 RTE_FLOW_ERROR_TYPE_ITEM,
969 item, "Invalid SYN address mask");
973 /* check if the next not void item is IPv4 or IPv6 */
974 item = next_no_void_pattern(pattern, item);
975 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
976 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
977 rte_flow_error_set(error, EINVAL,
978 RTE_FLOW_ERROR_TYPE_ITEM,
979 item, "Not supported by syn filter");
985 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
986 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
987 /* if the item is IP, the content should be NULL */
988 if (item->spec || item->mask) {
989 rte_flow_error_set(error, EINVAL,
990 RTE_FLOW_ERROR_TYPE_ITEM,
991 item, "Invalid SYN mask");
995 /* check if the next not void item is TCP */
996 item = next_no_void_pattern(pattern, item);
997 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
998 rte_flow_error_set(error, EINVAL,
999 RTE_FLOW_ERROR_TYPE_ITEM,
1000 item, "Not supported by syn filter");
1005 /* Get the TCP info. Only support SYN. */
1006 if (!item->spec || !item->mask) {
1007 rte_flow_error_set(error, EINVAL,
1008 RTE_FLOW_ERROR_TYPE_ITEM,
1009 item, "Invalid SYN mask");
1012 /*Not supported last point for range*/
1014 rte_flow_error_set(error, EINVAL,
1015 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1016 item, "Not supported last point for range");
1020 tcp_spec = item->spec;
1021 tcp_mask = item->mask;
1022 if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
1023 tcp_mask->hdr.src_port ||
1024 tcp_mask->hdr.dst_port ||
1025 tcp_mask->hdr.sent_seq ||
1026 tcp_mask->hdr.recv_ack ||
1027 tcp_mask->hdr.data_off ||
1028 tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
1029 tcp_mask->hdr.rx_win ||
1030 tcp_mask->hdr.cksum ||
1031 tcp_mask->hdr.tcp_urp) {
1032 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1033 rte_flow_error_set(error, EINVAL,
1034 RTE_FLOW_ERROR_TYPE_ITEM,
1035 item, "Not supported by syn filter");
1039 /* check if the next not void item is END */
1040 item = next_no_void_pattern(pattern, item);
1041 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1042 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1043 rte_flow_error_set(error, EINVAL,
1044 RTE_FLOW_ERROR_TYPE_ITEM,
1045 item, "Not supported by syn filter");
1049 /* check if the first not void action is QUEUE. */
1050 act = next_no_void_action(actions, NULL);
1051 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1052 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1053 rte_flow_error_set(error, EINVAL,
1054 RTE_FLOW_ERROR_TYPE_ACTION,
1055 act, "Not supported action.");
1059 act_q = (const struct rte_flow_action_queue *)act->conf;
1060 filter->queue = act_q->index;
1061 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
1062 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1063 rte_flow_error_set(error, EINVAL,
1064 RTE_FLOW_ERROR_TYPE_ACTION,
1065 act, "Not supported action.");
1069 /* check if the next not void item is END */
1070 act = next_no_void_action(actions, act);
1071 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1072 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1073 rte_flow_error_set(error, EINVAL,
1074 RTE_FLOW_ERROR_TYPE_ACTION,
1075 act, "Not supported action.");
1080 /* must be input direction */
1081 if (!attr->ingress) {
1082 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1083 rte_flow_error_set(error, EINVAL,
1084 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1085 attr, "Only support ingress.");
1091 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1092 rte_flow_error_set(error, EINVAL,
1093 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1094 attr, "Not support egress.");
1099 if (attr->transfer) {
1100 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1101 rte_flow_error_set(error, EINVAL,
1102 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1103 attr, "No support for transfer.");
1107 /* Support 2 priorities, the lowest or highest. */
1108 if (!attr->priority) {
1109 filter->hig_pri = 0;
1110 } else if (attr->priority == (uint32_t)~0U) {
1111 filter->hig_pri = 1;
1113 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1114 rte_flow_error_set(error, EINVAL,
1115 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1116 attr, "Not support priority.");
1124 ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
1125 const struct rte_flow_attr *attr,
1126 const struct rte_flow_item pattern[],
1127 const struct rte_flow_action actions[],
1128 struct rte_eth_syn_filter *filter,
1129 struct rte_flow_error *error)
1132 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1134 MAC_TYPE_FILTER_SUP(hw->mac.type);
1136 ret = cons_parse_syn_filter(attr, pattern,
1137 actions, filter, error);
1139 if (filter->queue >= dev->data->nb_rx_queues)
1149 * Parse the rule to see if it is a L2 tunnel rule.
1150 * And get the L2 tunnel filter info BTW.
1151 * Only support E-tag now.
1153 * The first not void item can be E_TAG.
1154 * The next not void item must be END.
1156 * The first not void action should be VF or PF.
1157 * The next not void action should be END.
1161 e_cid_base 0x309 0xFFF
1163 * other members in mask and spec should set to 0x00.
1164 * item->last should be NULL.
1167 cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
1168 const struct rte_flow_attr *attr,
1169 const struct rte_flow_item pattern[],
1170 const struct rte_flow_action actions[],
1171 struct rte_eth_l2_tunnel_conf *filter,
1172 struct rte_flow_error *error)
1174 const struct rte_flow_item *item;
1175 const struct rte_flow_item_e_tag *e_tag_spec;
1176 const struct rte_flow_item_e_tag *e_tag_mask;
1177 const struct rte_flow_action *act;
1178 const struct rte_flow_action_vf *act_vf;
1179 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1182 rte_flow_error_set(error, EINVAL,
1183 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1184 NULL, "NULL pattern.");
1189 rte_flow_error_set(error, EINVAL,
1190 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1191 NULL, "NULL action.");
1196 rte_flow_error_set(error, EINVAL,
1197 RTE_FLOW_ERROR_TYPE_ATTR,
1198 NULL, "NULL attribute.");
1202 /* The first not void item should be e-tag. */
1203 item = next_no_void_pattern(pattern, NULL);
1204 if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1205 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1206 rte_flow_error_set(error, EINVAL,
1207 RTE_FLOW_ERROR_TYPE_ITEM,
1208 item, "Not supported by L2 tunnel filter");
1212 if (!item->spec || !item->mask) {
1213 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1214 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1215 item, "Not supported by L2 tunnel filter");
1219 /*Not supported last point for range*/
1221 rte_flow_error_set(error, EINVAL,
1222 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1223 item, "Not supported last point for range");
1227 e_tag_spec = item->spec;
1228 e_tag_mask = item->mask;
1230 /* Only care about GRP and E cid base. */
1231 if (e_tag_mask->epcp_edei_in_ecid_b ||
1232 e_tag_mask->in_ecid_e ||
1233 e_tag_mask->ecid_e ||
1234 e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1235 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1236 rte_flow_error_set(error, EINVAL,
1237 RTE_FLOW_ERROR_TYPE_ITEM,
1238 item, "Not supported by L2 tunnel filter");
1242 filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1244 * grp and e_cid_base are bit fields and only use 14 bits.
1245 * e-tag id is taken as little endian by HW.
1247 filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1249 /* check if the next not void item is END */
1250 item = next_no_void_pattern(pattern, item);
1251 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1252 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1253 rte_flow_error_set(error, EINVAL,
1254 RTE_FLOW_ERROR_TYPE_ITEM,
1255 item, "Not supported by L2 tunnel filter");
1260 /* must be input direction */
1261 if (!attr->ingress) {
1262 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1263 rte_flow_error_set(error, EINVAL,
1264 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1265 attr, "Only support ingress.");
1271 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1272 rte_flow_error_set(error, EINVAL,
1273 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1274 attr, "Not support egress.");
1279 if (attr->transfer) {
1280 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1281 rte_flow_error_set(error, EINVAL,
1282 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1283 attr, "No support for transfer.");
1288 if (attr->priority) {
1289 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1290 rte_flow_error_set(error, EINVAL,
1291 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1292 attr, "Not support priority.");
1296 /* check if the first not void action is VF or PF. */
1297 act = next_no_void_action(actions, NULL);
1298 if (act->type != RTE_FLOW_ACTION_TYPE_VF &&
1299 act->type != RTE_FLOW_ACTION_TYPE_PF) {
1300 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1301 rte_flow_error_set(error, EINVAL,
1302 RTE_FLOW_ERROR_TYPE_ACTION,
1303 act, "Not supported action.");
1307 if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
1308 act_vf = (const struct rte_flow_action_vf *)act->conf;
1309 filter->pool = act_vf->id;
1311 filter->pool = pci_dev->max_vfs;
1314 /* check if the next not void item is END */
1315 act = next_no_void_action(actions, act);
1316 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1317 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1318 rte_flow_error_set(error, EINVAL,
1319 RTE_FLOW_ERROR_TYPE_ACTION,
1320 act, "Not supported action.");
1328 ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1329 const struct rte_flow_attr *attr,
1330 const struct rte_flow_item pattern[],
1331 const struct rte_flow_action actions[],
1332 struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1333 struct rte_flow_error *error)
1336 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1337 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1340 ret = cons_parse_l2_tn_filter(dev, attr, pattern,
1341 actions, l2_tn_filter, error);
1343 if (hw->mac.type != ixgbe_mac_X550 &&
1344 hw->mac.type != ixgbe_mac_X550EM_x &&
1345 hw->mac.type != ixgbe_mac_X550EM_a) {
1346 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1347 rte_flow_error_set(error, EINVAL,
1348 RTE_FLOW_ERROR_TYPE_ITEM,
1349 NULL, "Not supported by L2 tunnel filter");
1353 vf_num = pci_dev->max_vfs;
1355 if (l2_tn_filter->pool > vf_num)
1361 /* Parse to get the attr and action info of flow director rule. */
1363 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1364 const struct rte_flow_action actions[],
1365 struct ixgbe_fdir_rule *rule,
1366 struct rte_flow_error *error)
1368 const struct rte_flow_action *act;
1369 const struct rte_flow_action_queue *act_q;
1370 const struct rte_flow_action_mark *mark;
1373 /* must be input direction */
1374 if (!attr->ingress) {
1375 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1376 rte_flow_error_set(error, EINVAL,
1377 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1378 attr, "Only support ingress.");
1384 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1385 rte_flow_error_set(error, EINVAL,
1386 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1387 attr, "Not support egress.");
1392 if (attr->transfer) {
1393 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1394 rte_flow_error_set(error, EINVAL,
1395 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1396 attr, "No support for transfer.");
1401 if (attr->priority) {
1402 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1403 rte_flow_error_set(error, EINVAL,
1404 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1405 attr, "Not support priority.");
1409 /* check if the first not void action is QUEUE or DROP. */
1410 act = next_no_void_action(actions, NULL);
1411 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1412 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1413 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1414 rte_flow_error_set(error, EINVAL,
1415 RTE_FLOW_ERROR_TYPE_ACTION,
1416 act, "Not supported action.");
1420 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1421 act_q = (const struct rte_flow_action_queue *)act->conf;
1422 rule->queue = act_q->index;
1424 /* signature mode does not support drop action. */
1425 if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1426 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1427 rte_flow_error_set(error, EINVAL,
1428 RTE_FLOW_ERROR_TYPE_ACTION,
1429 act, "Not supported action.");
1432 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1435 /* check if the next not void item is MARK */
1436 act = next_no_void_action(actions, act);
1437 if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1438 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1439 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1440 rte_flow_error_set(error, EINVAL,
1441 RTE_FLOW_ERROR_TYPE_ACTION,
1442 act, "Not supported action.");
1448 if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1449 mark = (const struct rte_flow_action_mark *)act->conf;
1450 rule->soft_id = mark->id;
1451 act = next_no_void_action(actions, act);
1454 /* check if the next not void item is END */
1455 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1456 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1457 rte_flow_error_set(error, EINVAL,
1458 RTE_FLOW_ERROR_TYPE_ACTION,
1459 act, "Not supported action.");
1466 /* search next no void pattern and skip fuzzy */
1468 const struct rte_flow_item *next_no_fuzzy_pattern(
1469 const struct rte_flow_item pattern[],
1470 const struct rte_flow_item *cur)
1472 const struct rte_flow_item *next =
1473 next_no_void_pattern(pattern, cur);
1475 if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
1477 next = next_no_void_pattern(pattern, next);
1481 static inline uint8_t signature_match(const struct rte_flow_item pattern[])
1483 const struct rte_flow_item_fuzzy *spec, *last, *mask;
1484 const struct rte_flow_item *item;
1485 uint32_t sh, lh, mh;
1490 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1493 if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
1525 * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1526 * And get the flow director filter info BTW.
1527 * UDP/TCP/SCTP PATTERN:
1528 * The first not void item can be ETH or IPV4 or IPV6
1529 * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1530 * The next not void item could be UDP or TCP or SCTP (optional)
1531 * The next not void item could be RAW (for flexbyte, optional)
1532 * The next not void item must be END.
1533 * A Fuzzy Match pattern can appear at any place before END.
1534 * Fuzzy Match is optional for IPV4 but is required for IPV6
1536 * The first not void item must be ETH.
1537 * The second not void item must be MAC VLAN.
1538 * The next not void item must be END.
1540 * The first not void action should be QUEUE or DROP.
1541 * The second not void optional action should be MARK,
1542 * mark_id is a uint32_t number.
1543 * The next not void action should be END.
1544 * UDP/TCP/SCTP pattern example:
1547 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
1548 * dst_addr 192.167.3.50 0xFFFFFFFF
1549 * UDP/TCP/SCTP src_port 80 0xFFFF
1550 * dst_port 80 0xFFFF
1551 * FLEX relative 0 0x1
1554 * offset 12 0xFFFFFFFF
1557 * pattern[0] 0x86 0xFF
1558 * pattern[1] 0xDD 0xFF
1560 * MAC VLAN pattern example:
1563 {0xAC, 0x7B, 0xA1, {0xFF, 0xFF, 0xFF,
1564 0x2C, 0x6D, 0x36} 0xFF, 0xFF, 0xFF}
1565 * MAC VLAN tci 0x2016 0xEFFF
1567 * Other members in mask and spec should set to 0x00.
1568 * Item->last should be NULL.
1571 ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
1572 const struct rte_flow_attr *attr,
1573 const struct rte_flow_item pattern[],
1574 const struct rte_flow_action actions[],
1575 struct ixgbe_fdir_rule *rule,
1576 struct rte_flow_error *error)
1578 const struct rte_flow_item *item;
1579 const struct rte_flow_item_eth *eth_spec;
1580 const struct rte_flow_item_eth *eth_mask;
1581 const struct rte_flow_item_ipv4 *ipv4_spec;
1582 const struct rte_flow_item_ipv4 *ipv4_mask;
1583 const struct rte_flow_item_ipv6 *ipv6_spec;
1584 const struct rte_flow_item_ipv6 *ipv6_mask;
1585 const struct rte_flow_item_tcp *tcp_spec;
1586 const struct rte_flow_item_tcp *tcp_mask;
1587 const struct rte_flow_item_udp *udp_spec;
1588 const struct rte_flow_item_udp *udp_mask;
1589 const struct rte_flow_item_sctp *sctp_spec;
1590 const struct rte_flow_item_sctp *sctp_mask;
1591 const struct rte_flow_item_vlan *vlan_spec;
1592 const struct rte_flow_item_vlan *vlan_mask;
1593 const struct rte_flow_item_raw *raw_mask;
1594 const struct rte_flow_item_raw *raw_spec;
1597 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1600 rte_flow_error_set(error, EINVAL,
1601 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1602 NULL, "NULL pattern.");
1607 rte_flow_error_set(error, EINVAL,
1608 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1609 NULL, "NULL action.");
1614 rte_flow_error_set(error, EINVAL,
1615 RTE_FLOW_ERROR_TYPE_ATTR,
1616 NULL, "NULL attribute.");
1621 * Some fields may not be provided. Set spec to 0 and mask to default
1622 * value. So, we need not do anything for the not provided fields later.
1624 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1625 memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1626 rule->mask.vlan_tci_mask = 0;
1627 rule->mask.flex_bytes_mask = 0;
1630 * The first not void item should be
1631 * MAC or IPv4 or TCP or UDP or SCTP.
1633 item = next_no_fuzzy_pattern(pattern, NULL);
1634 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1635 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1636 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1637 item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1638 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1639 item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1640 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1641 rte_flow_error_set(error, EINVAL,
1642 RTE_FLOW_ERROR_TYPE_ITEM,
1643 item, "Not supported by fdir filter");
1647 if (signature_match(pattern))
1648 rule->mode = RTE_FDIR_MODE_SIGNATURE;
1650 rule->mode = RTE_FDIR_MODE_PERFECT;
1652 /*Not supported last point for range*/
1654 rte_flow_error_set(error, EINVAL,
1655 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1656 item, "Not supported last point for range");
1660 /* Get the MAC info. */
1661 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1663 * Only support vlan and dst MAC address,
1664 * others should be masked.
1666 if (item->spec && !item->mask) {
1667 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1668 rte_flow_error_set(error, EINVAL,
1669 RTE_FLOW_ERROR_TYPE_ITEM,
1670 item, "Not supported by fdir filter");
1675 rule->b_spec = TRUE;
1676 eth_spec = item->spec;
1678 /* Get the dst MAC. */
1679 for (j = 0; j < ETHER_ADDR_LEN; j++) {
1680 rule->ixgbe_fdir.formatted.inner_mac[j] =
1681 eth_spec->dst.addr_bytes[j];
1688 rule->b_mask = TRUE;
1689 eth_mask = item->mask;
1691 /* Ether type should be masked. */
1692 if (eth_mask->type ||
1693 rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1694 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1695 rte_flow_error_set(error, EINVAL,
1696 RTE_FLOW_ERROR_TYPE_ITEM,
1697 item, "Not supported by fdir filter");
1701 /* If ethernet has meaning, it means MAC VLAN mode. */
1702 rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1705 * src MAC address must be masked,
1706 * and don't support dst MAC address mask.
1708 for (j = 0; j < ETHER_ADDR_LEN; j++) {
1709 if (eth_mask->src.addr_bytes[j] ||
1710 eth_mask->dst.addr_bytes[j] != 0xFF) {
1712 sizeof(struct ixgbe_fdir_rule));
1713 rte_flow_error_set(error, EINVAL,
1714 RTE_FLOW_ERROR_TYPE_ITEM,
1715 item, "Not supported by fdir filter");
1720 /* When no VLAN, considered as full mask. */
1721 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1723 /*** If both spec and mask are item,
1724 * it means don't care about ETH.
1729 * Check if the next not void item is vlan or ipv4.
1730 * IPv6 is not supported.
1732 item = next_no_fuzzy_pattern(pattern, item);
1733 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1734 if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1735 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1736 rte_flow_error_set(error, EINVAL,
1737 RTE_FLOW_ERROR_TYPE_ITEM,
1738 item, "Not supported by fdir filter");
1742 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1743 item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1744 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1745 rte_flow_error_set(error, EINVAL,
1746 RTE_FLOW_ERROR_TYPE_ITEM,
1747 item, "Not supported by fdir filter");
1753 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1754 if (!(item->spec && item->mask)) {
1755 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1756 rte_flow_error_set(error, EINVAL,
1757 RTE_FLOW_ERROR_TYPE_ITEM,
1758 item, "Not supported by fdir filter");
1762 /*Not supported last point for range*/
1764 rte_flow_error_set(error, EINVAL,
1765 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1766 item, "Not supported last point for range");
1770 vlan_spec = item->spec;
1771 vlan_mask = item->mask;
1773 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1775 rule->mask.vlan_tci_mask = vlan_mask->tci;
1776 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
1777 /* More than one tags are not supported. */
1779 /* Next not void item must be END */
1780 item = next_no_fuzzy_pattern(pattern, item);
1781 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1782 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1783 rte_flow_error_set(error, EINVAL,
1784 RTE_FLOW_ERROR_TYPE_ITEM,
1785 item, "Not supported by fdir filter");
1790 /* Get the IPV4 info. */
1791 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1793 * Set the flow type even if there's no content
1794 * as we must have a flow type.
1796 rule->ixgbe_fdir.formatted.flow_type =
1797 IXGBE_ATR_FLOW_TYPE_IPV4;
1798 /*Not supported last point for range*/
1800 rte_flow_error_set(error, EINVAL,
1801 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1802 item, "Not supported last point for range");
1806 * Only care about src & dst addresses,
1807 * others should be masked.
1810 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1811 rte_flow_error_set(error, EINVAL,
1812 RTE_FLOW_ERROR_TYPE_ITEM,
1813 item, "Not supported by fdir filter");
1816 rule->b_mask = TRUE;
1817 ipv4_mask = item->mask;
1818 if (ipv4_mask->hdr.version_ihl ||
1819 ipv4_mask->hdr.type_of_service ||
1820 ipv4_mask->hdr.total_length ||
1821 ipv4_mask->hdr.packet_id ||
1822 ipv4_mask->hdr.fragment_offset ||
1823 ipv4_mask->hdr.time_to_live ||
1824 ipv4_mask->hdr.next_proto_id ||
1825 ipv4_mask->hdr.hdr_checksum) {
1826 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1827 rte_flow_error_set(error, EINVAL,
1828 RTE_FLOW_ERROR_TYPE_ITEM,
1829 item, "Not supported by fdir filter");
1832 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1833 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1836 rule->b_spec = TRUE;
1837 ipv4_spec = item->spec;
1838 rule->ixgbe_fdir.formatted.dst_ip[0] =
1839 ipv4_spec->hdr.dst_addr;
1840 rule->ixgbe_fdir.formatted.src_ip[0] =
1841 ipv4_spec->hdr.src_addr;
1845 * Check if the next not void item is
1846 * TCP or UDP or SCTP or END.
1848 item = next_no_fuzzy_pattern(pattern, item);
1849 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1850 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1851 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1852 item->type != RTE_FLOW_ITEM_TYPE_END &&
1853 item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1854 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1855 rte_flow_error_set(error, EINVAL,
1856 RTE_FLOW_ERROR_TYPE_ITEM,
1857 item, "Not supported by fdir filter");
1862 /* Get the IPV6 info. */
1863 if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1865 * Set the flow type even if there's no content
1866 * as we must have a flow type.
1868 rule->ixgbe_fdir.formatted.flow_type =
1869 IXGBE_ATR_FLOW_TYPE_IPV6;
1872 * 1. must signature match
1873 * 2. not support last
1874 * 3. mask must not null
1876 if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
1879 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1880 rte_flow_error_set(error, EINVAL,
1881 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1882 item, "Not supported last point for range");
1886 rule->b_mask = TRUE;
1887 ipv6_mask = item->mask;
1888 if (ipv6_mask->hdr.vtc_flow ||
1889 ipv6_mask->hdr.payload_len ||
1890 ipv6_mask->hdr.proto ||
1891 ipv6_mask->hdr.hop_limits) {
1892 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1893 rte_flow_error_set(error, EINVAL,
1894 RTE_FLOW_ERROR_TYPE_ITEM,
1895 item, "Not supported by fdir filter");
1899 /* check src addr mask */
1900 for (j = 0; j < 16; j++) {
1901 if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
1902 rule->mask.src_ipv6_mask |= 1 << j;
1903 } else if (ipv6_mask->hdr.src_addr[j] != 0) {
1904 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1905 rte_flow_error_set(error, EINVAL,
1906 RTE_FLOW_ERROR_TYPE_ITEM,
1907 item, "Not supported by fdir filter");
1912 /* check dst addr mask */
1913 for (j = 0; j < 16; j++) {
1914 if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
1915 rule->mask.dst_ipv6_mask |= 1 << j;
1916 } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
1917 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1918 rte_flow_error_set(error, EINVAL,
1919 RTE_FLOW_ERROR_TYPE_ITEM,
1920 item, "Not supported by fdir filter");
1926 rule->b_spec = TRUE;
1927 ipv6_spec = item->spec;
1928 rte_memcpy(rule->ixgbe_fdir.formatted.src_ip,
1929 ipv6_spec->hdr.src_addr, 16);
1930 rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip,
1931 ipv6_spec->hdr.dst_addr, 16);
1935 * Check if the next not void item is
1936 * TCP or UDP or SCTP or END.
1938 item = next_no_fuzzy_pattern(pattern, item);
1939 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1940 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1941 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1942 item->type != RTE_FLOW_ITEM_TYPE_END &&
1943 item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1944 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1945 rte_flow_error_set(error, EINVAL,
1946 RTE_FLOW_ERROR_TYPE_ITEM,
1947 item, "Not supported by fdir filter");
1952 /* Get the TCP info. */
1953 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1955 * Set the flow type even if there's no content
1956 * as we must have a flow type.
1958 rule->ixgbe_fdir.formatted.flow_type |=
1959 IXGBE_ATR_L4TYPE_TCP;
1960 /*Not supported last point for range*/
1962 rte_flow_error_set(error, EINVAL,
1963 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1964 item, "Not supported last point for range");
1968 * Only care about src & dst ports,
1969 * others should be masked.
1972 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1973 rte_flow_error_set(error, EINVAL,
1974 RTE_FLOW_ERROR_TYPE_ITEM,
1975 item, "Not supported by fdir filter");
1978 rule->b_mask = TRUE;
1979 tcp_mask = item->mask;
1980 if (tcp_mask->hdr.sent_seq ||
1981 tcp_mask->hdr.recv_ack ||
1982 tcp_mask->hdr.data_off ||
1983 tcp_mask->hdr.tcp_flags ||
1984 tcp_mask->hdr.rx_win ||
1985 tcp_mask->hdr.cksum ||
1986 tcp_mask->hdr.tcp_urp) {
1987 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1988 rte_flow_error_set(error, EINVAL,
1989 RTE_FLOW_ERROR_TYPE_ITEM,
1990 item, "Not supported by fdir filter");
1993 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1994 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1997 rule->b_spec = TRUE;
1998 tcp_spec = item->spec;
1999 rule->ixgbe_fdir.formatted.src_port =
2000 tcp_spec->hdr.src_port;
2001 rule->ixgbe_fdir.formatted.dst_port =
2002 tcp_spec->hdr.dst_port;
2005 item = next_no_fuzzy_pattern(pattern, item);
2006 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
2007 item->type != RTE_FLOW_ITEM_TYPE_END) {
2008 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2009 rte_flow_error_set(error, EINVAL,
2010 RTE_FLOW_ERROR_TYPE_ITEM,
2011 item, "Not supported by fdir filter");
2017 /* Get the UDP info */
2018 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2020 * Set the flow type even if there's no content
2021 * as we must have a flow type.
2023 rule->ixgbe_fdir.formatted.flow_type |=
2024 IXGBE_ATR_L4TYPE_UDP;
2025 /*Not supported last point for range*/
2027 rte_flow_error_set(error, EINVAL,
2028 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2029 item, "Not supported last point for range");
2033 * Only care about src & dst ports,
2034 * others should be masked.
2037 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2038 rte_flow_error_set(error, EINVAL,
2039 RTE_FLOW_ERROR_TYPE_ITEM,
2040 item, "Not supported by fdir filter");
2043 rule->b_mask = TRUE;
2044 udp_mask = item->mask;
2045 if (udp_mask->hdr.dgram_len ||
2046 udp_mask->hdr.dgram_cksum) {
2047 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2048 rte_flow_error_set(error, EINVAL,
2049 RTE_FLOW_ERROR_TYPE_ITEM,
2050 item, "Not supported by fdir filter");
2053 rule->mask.src_port_mask = udp_mask->hdr.src_port;
2054 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
2057 rule->b_spec = TRUE;
2058 udp_spec = item->spec;
2059 rule->ixgbe_fdir.formatted.src_port =
2060 udp_spec->hdr.src_port;
2061 rule->ixgbe_fdir.formatted.dst_port =
2062 udp_spec->hdr.dst_port;
2065 item = next_no_fuzzy_pattern(pattern, item);
2066 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
2067 item->type != RTE_FLOW_ITEM_TYPE_END) {
2068 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2069 rte_flow_error_set(error, EINVAL,
2070 RTE_FLOW_ERROR_TYPE_ITEM,
2071 item, "Not supported by fdir filter");
2077 /* Get the SCTP info */
2078 if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
2080 * Set the flow type even if there's no content
2081 * as we must have a flow type.
2083 rule->ixgbe_fdir.formatted.flow_type |=
2084 IXGBE_ATR_L4TYPE_SCTP;
2085 /*Not supported last point for range*/
2087 rte_flow_error_set(error, EINVAL,
2088 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2089 item, "Not supported last point for range");
2093 /* only x550 family only support sctp port */
2094 if (hw->mac.type == ixgbe_mac_X550 ||
2095 hw->mac.type == ixgbe_mac_X550EM_x ||
2096 hw->mac.type == ixgbe_mac_X550EM_a) {
2098 * Only care about src & dst ports,
2099 * others should be masked.
2102 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2103 rte_flow_error_set(error, EINVAL,
2104 RTE_FLOW_ERROR_TYPE_ITEM,
2105 item, "Not supported by fdir filter");
2108 rule->b_mask = TRUE;
2109 sctp_mask = item->mask;
2110 if (sctp_mask->hdr.tag ||
2111 sctp_mask->hdr.cksum) {
2112 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2113 rte_flow_error_set(error, EINVAL,
2114 RTE_FLOW_ERROR_TYPE_ITEM,
2115 item, "Not supported by fdir filter");
2118 rule->mask.src_port_mask = sctp_mask->hdr.src_port;
2119 rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
2122 rule->b_spec = TRUE;
2123 sctp_spec = item->spec;
2124 rule->ixgbe_fdir.formatted.src_port =
2125 sctp_spec->hdr.src_port;
2126 rule->ixgbe_fdir.formatted.dst_port =
2127 sctp_spec->hdr.dst_port;
2129 /* others even sctp port is not supported */
2131 sctp_mask = item->mask;
2133 (sctp_mask->hdr.src_port ||
2134 sctp_mask->hdr.dst_port ||
2135 sctp_mask->hdr.tag ||
2136 sctp_mask->hdr.cksum)) {
2137 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2138 rte_flow_error_set(error, EINVAL,
2139 RTE_FLOW_ERROR_TYPE_ITEM,
2140 item, "Not supported by fdir filter");
2145 item = next_no_fuzzy_pattern(pattern, item);
2146 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
2147 item->type != RTE_FLOW_ITEM_TYPE_END) {
2148 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2149 rte_flow_error_set(error, EINVAL,
2150 RTE_FLOW_ERROR_TYPE_ITEM,
2151 item, "Not supported by fdir filter");
2156 /* Get the flex byte info */
2157 if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
2158 /* Not supported last point for range*/
2160 rte_flow_error_set(error, EINVAL,
2161 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2162 item, "Not supported last point for range");
2165 /* mask should not be null */
2166 if (!item->mask || !item->spec) {
2167 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2168 rte_flow_error_set(error, EINVAL,
2169 RTE_FLOW_ERROR_TYPE_ITEM,
2170 item, "Not supported by fdir filter");
2174 raw_mask = item->mask;
2177 if (raw_mask->relative != 0x1 ||
2178 raw_mask->search != 0x1 ||
2179 raw_mask->reserved != 0x0 ||
2180 (uint32_t)raw_mask->offset != 0xffffffff ||
2181 raw_mask->limit != 0xffff ||
2182 raw_mask->length != 0xffff) {
2183 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2184 rte_flow_error_set(error, EINVAL,
2185 RTE_FLOW_ERROR_TYPE_ITEM,
2186 item, "Not supported by fdir filter");
2190 raw_spec = item->spec;
2193 if (raw_spec->relative != 0 ||
2194 raw_spec->search != 0 ||
2195 raw_spec->reserved != 0 ||
2196 raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
2197 raw_spec->offset % 2 ||
2198 raw_spec->limit != 0 ||
2199 raw_spec->length != 2 ||
2200 /* pattern can't be 0xffff */
2201 (raw_spec->pattern[0] == 0xff &&
2202 raw_spec->pattern[1] == 0xff)) {
2203 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2204 rte_flow_error_set(error, EINVAL,
2205 RTE_FLOW_ERROR_TYPE_ITEM,
2206 item, "Not supported by fdir filter");
2210 /* check pattern mask */
2211 if (raw_mask->pattern[0] != 0xff ||
2212 raw_mask->pattern[1] != 0xff) {
2213 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2214 rte_flow_error_set(error, EINVAL,
2215 RTE_FLOW_ERROR_TYPE_ITEM,
2216 item, "Not supported by fdir filter");
2220 rule->mask.flex_bytes_mask = 0xffff;
2221 rule->ixgbe_fdir.formatted.flex_bytes =
2222 (((uint16_t)raw_spec->pattern[1]) << 8) |
2223 raw_spec->pattern[0];
2224 rule->flex_bytes_offset = raw_spec->offset;
2227 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2228 /* check if the next not void item is END */
2229 item = next_no_fuzzy_pattern(pattern, item);
2230 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2231 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2232 rte_flow_error_set(error, EINVAL,
2233 RTE_FLOW_ERROR_TYPE_ITEM,
2234 item, "Not supported by fdir filter");
2239 return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2242 #define NVGRE_PROTOCOL 0x6558
2245 * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
2246 * And get the flow director filter info BTW.
2248 * The first not void item must be ETH.
2249 * The second not void item must be IPV4/ IPV6.
2250 * The third not void item must be NVGRE.
2251 * The next not void item must be END.
2253 * The first not void item must be ETH.
2254 * The second not void item must be IPV4/ IPV6.
2255 * The third not void item must be NVGRE.
2256 * The next not void item must be END.
2258 * The first not void action should be QUEUE or DROP.
2259 * The second not void optional action should be MARK,
2260 * mark_id is a uint32_t number.
2261 * The next not void action should be END.
2262 * VxLAN pattern example:
2265 * IPV4/IPV6 NULL NULL
2267 * VxLAN vni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
2268 * MAC VLAN tci 0x2016 0xEFFF
2270 * NEGRV pattern example:
2273 * IPV4/IPV6 NULL NULL
2274 * NVGRE protocol 0x6558 0xFFFF
2275 * tni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
2276 * MAC VLAN tci 0x2016 0xEFFF
2278 * other members in mask and spec should set to 0x00.
2279 * item->last should be NULL.
2282 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
2283 const struct rte_flow_item pattern[],
2284 const struct rte_flow_action actions[],
2285 struct ixgbe_fdir_rule *rule,
2286 struct rte_flow_error *error)
2288 const struct rte_flow_item *item;
2289 const struct rte_flow_item_vxlan *vxlan_spec;
2290 const struct rte_flow_item_vxlan *vxlan_mask;
2291 const struct rte_flow_item_nvgre *nvgre_spec;
2292 const struct rte_flow_item_nvgre *nvgre_mask;
2293 const struct rte_flow_item_eth *eth_spec;
2294 const struct rte_flow_item_eth *eth_mask;
2295 const struct rte_flow_item_vlan *vlan_spec;
2296 const struct rte_flow_item_vlan *vlan_mask;
2300 rte_flow_error_set(error, EINVAL,
2301 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2302 NULL, "NULL pattern.");
2307 rte_flow_error_set(error, EINVAL,
2308 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2309 NULL, "NULL action.");
2314 rte_flow_error_set(error, EINVAL,
2315 RTE_FLOW_ERROR_TYPE_ATTR,
2316 NULL, "NULL attribute.");
2321 * Some fields may not be provided. Set spec to 0 and mask to default
2322 * value. So, we need not do anything for the not provided fields later.
2324 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2325 memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
2326 rule->mask.vlan_tci_mask = 0;
2329 * The first not void item should be
2330 * MAC or IPv4 or IPv6 or UDP or VxLAN.
2332 item = next_no_void_pattern(pattern, NULL);
2333 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
2334 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2335 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
2336 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2337 item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
2338 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2339 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2340 rte_flow_error_set(error, EINVAL,
2341 RTE_FLOW_ERROR_TYPE_ITEM,
2342 item, "Not supported by fdir filter");
2346 rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
2349 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
2350 /* Only used to describe the protocol stack. */
2351 if (item->spec || item->mask) {
2352 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2353 rte_flow_error_set(error, EINVAL,
2354 RTE_FLOW_ERROR_TYPE_ITEM,
2355 item, "Not supported by fdir filter");
2358 /* Not supported last point for range*/
2360 rte_flow_error_set(error, EINVAL,
2361 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2362 item, "Not supported last point for range");
2366 /* Check if the next not void item is IPv4 or IPv6. */
2367 item = next_no_void_pattern(pattern, item);
2368 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2369 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2370 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2371 rte_flow_error_set(error, EINVAL,
2372 RTE_FLOW_ERROR_TYPE_ITEM,
2373 item, "Not supported by fdir filter");
2379 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2380 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2381 /* Only used to describe the protocol stack. */
2382 if (item->spec || item->mask) {
2383 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2384 rte_flow_error_set(error, EINVAL,
2385 RTE_FLOW_ERROR_TYPE_ITEM,
2386 item, "Not supported by fdir filter");
2389 /*Not supported last point for range*/
2391 rte_flow_error_set(error, EINVAL,
2392 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2393 item, "Not supported last point for range");
2397 /* Check if the next not void item is UDP or NVGRE. */
2398 item = next_no_void_pattern(pattern, item);
2399 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2400 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2401 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2402 rte_flow_error_set(error, EINVAL,
2403 RTE_FLOW_ERROR_TYPE_ITEM,
2404 item, "Not supported by fdir filter");
2410 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2411 /* Only used to describe the protocol stack. */
2412 if (item->spec || item->mask) {
2413 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2414 rte_flow_error_set(error, EINVAL,
2415 RTE_FLOW_ERROR_TYPE_ITEM,
2416 item, "Not supported by fdir filter");
2419 /*Not supported last point for range*/
2421 rte_flow_error_set(error, EINVAL,
2422 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2423 item, "Not supported last point for range");
2427 /* Check if the next not void item is VxLAN. */
2428 item = next_no_void_pattern(pattern, item);
2429 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2430 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2431 rte_flow_error_set(error, EINVAL,
2432 RTE_FLOW_ERROR_TYPE_ITEM,
2433 item, "Not supported by fdir filter");
2438 /* Get the VxLAN info */
2439 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2440 rule->ixgbe_fdir.formatted.tunnel_type =
2441 RTE_FDIR_TUNNEL_TYPE_VXLAN;
2443 /* Only care about VNI, others should be masked. */
2445 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2446 rte_flow_error_set(error, EINVAL,
2447 RTE_FLOW_ERROR_TYPE_ITEM,
2448 item, "Not supported by fdir filter");
2451 /*Not supported last point for range*/
2453 rte_flow_error_set(error, EINVAL,
2454 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2455 item, "Not supported last point for range");
2458 rule->b_mask = TRUE;
2460 /* Tunnel type is always meaningful. */
2461 rule->mask.tunnel_type_mask = 1;
2463 vxlan_mask = item->mask;
2464 if (vxlan_mask->flags) {
2465 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2466 rte_flow_error_set(error, EINVAL,
2467 RTE_FLOW_ERROR_TYPE_ITEM,
2468 item, "Not supported by fdir filter");
2471 /* VNI must be totally masked or not. */
2472 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2473 vxlan_mask->vni[2]) &&
2474 ((vxlan_mask->vni[0] != 0xFF) ||
2475 (vxlan_mask->vni[1] != 0xFF) ||
2476 (vxlan_mask->vni[2] != 0xFF))) {
2477 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2478 rte_flow_error_set(error, EINVAL,
2479 RTE_FLOW_ERROR_TYPE_ITEM,
2480 item, "Not supported by fdir filter");
2484 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2485 RTE_DIM(vxlan_mask->vni));
2488 rule->b_spec = TRUE;
2489 vxlan_spec = item->spec;
2490 rte_memcpy(((uint8_t *)
2491 &rule->ixgbe_fdir.formatted.tni_vni + 1),
2492 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2493 rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
2494 rule->ixgbe_fdir.formatted.tni_vni);
2498 /* Get the NVGRE info */
2499 if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2500 rule->ixgbe_fdir.formatted.tunnel_type =
2501 RTE_FDIR_TUNNEL_TYPE_NVGRE;
2504 * Only care about flags0, flags1, protocol and TNI,
2505 * others should be masked.
2508 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2509 rte_flow_error_set(error, EINVAL,
2510 RTE_FLOW_ERROR_TYPE_ITEM,
2511 item, "Not supported by fdir filter");
2514 /*Not supported last point for range*/
2516 rte_flow_error_set(error, EINVAL,
2517 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2518 item, "Not supported last point for range");
2521 rule->b_mask = TRUE;
2523 /* Tunnel type is always meaningful. */
2524 rule->mask.tunnel_type_mask = 1;
2526 nvgre_mask = item->mask;
2527 if (nvgre_mask->flow_id) {
2528 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2529 rte_flow_error_set(error, EINVAL,
2530 RTE_FLOW_ERROR_TYPE_ITEM,
2531 item, "Not supported by fdir filter");
2534 if (nvgre_mask->protocol &&
2535 nvgre_mask->protocol != 0xFFFF) {
2536 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2537 rte_flow_error_set(error, EINVAL,
2538 RTE_FLOW_ERROR_TYPE_ITEM,
2539 item, "Not supported by fdir filter");
2542 if (nvgre_mask->c_k_s_rsvd0_ver &&
2543 nvgre_mask->c_k_s_rsvd0_ver !=
2544 rte_cpu_to_be_16(0xFFFF)) {
2545 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2546 rte_flow_error_set(error, EINVAL,
2547 RTE_FLOW_ERROR_TYPE_ITEM,
2548 item, "Not supported by fdir filter");
2551 /* TNI must be totally masked or not. */
2552 if (nvgre_mask->tni[0] &&
2553 ((nvgre_mask->tni[0] != 0xFF) ||
2554 (nvgre_mask->tni[1] != 0xFF) ||
2555 (nvgre_mask->tni[2] != 0xFF))) {
2556 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2557 rte_flow_error_set(error, EINVAL,
2558 RTE_FLOW_ERROR_TYPE_ITEM,
2559 item, "Not supported by fdir filter");
2562 /* tni is a 24-bits bit field */
2563 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2564 RTE_DIM(nvgre_mask->tni));
2565 rule->mask.tunnel_id_mask <<= 8;
2568 rule->b_spec = TRUE;
2569 nvgre_spec = item->spec;
2570 if (nvgre_spec->c_k_s_rsvd0_ver !=
2571 rte_cpu_to_be_16(0x2000) &&
2572 nvgre_mask->c_k_s_rsvd0_ver) {
2573 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2574 rte_flow_error_set(error, EINVAL,
2575 RTE_FLOW_ERROR_TYPE_ITEM,
2576 item, "Not supported by fdir filter");
2579 if (nvgre_mask->protocol &&
2580 nvgre_spec->protocol !=
2581 rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2582 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2583 rte_flow_error_set(error, EINVAL,
2584 RTE_FLOW_ERROR_TYPE_ITEM,
2585 item, "Not supported by fdir filter");
2588 /* tni is a 24-bits bit field */
2589 rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2590 nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2591 rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2595 /* check if the next not void item is MAC */
2596 item = next_no_void_pattern(pattern, item);
2597 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2598 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2599 rte_flow_error_set(error, EINVAL,
2600 RTE_FLOW_ERROR_TYPE_ITEM,
2601 item, "Not supported by fdir filter");
2606 * Only support vlan and dst MAC address,
2607 * others should be masked.
2611 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2612 rte_flow_error_set(error, EINVAL,
2613 RTE_FLOW_ERROR_TYPE_ITEM,
2614 item, "Not supported by fdir filter");
2617 /*Not supported last point for range*/
2619 rte_flow_error_set(error, EINVAL,
2620 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2621 item, "Not supported last point for range");
2624 rule->b_mask = TRUE;
2625 eth_mask = item->mask;
2627 /* Ether type should be masked. */
2628 if (eth_mask->type) {
2629 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2630 rte_flow_error_set(error, EINVAL,
2631 RTE_FLOW_ERROR_TYPE_ITEM,
2632 item, "Not supported by fdir filter");
2636 /* src MAC address should be masked. */
2637 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2638 if (eth_mask->src.addr_bytes[j]) {
2640 sizeof(struct ixgbe_fdir_rule));
2641 rte_flow_error_set(error, EINVAL,
2642 RTE_FLOW_ERROR_TYPE_ITEM,
2643 item, "Not supported by fdir filter");
2647 rule->mask.mac_addr_byte_mask = 0;
2648 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2649 /* It's a per byte mask. */
2650 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2651 rule->mask.mac_addr_byte_mask |= 0x1 << j;
2652 } else if (eth_mask->dst.addr_bytes[j]) {
2653 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2654 rte_flow_error_set(error, EINVAL,
2655 RTE_FLOW_ERROR_TYPE_ITEM,
2656 item, "Not supported by fdir filter");
2661 /* When no vlan, considered as full mask. */
2662 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2665 rule->b_spec = TRUE;
2666 eth_spec = item->spec;
2668 /* Get the dst MAC. */
2669 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2670 rule->ixgbe_fdir.formatted.inner_mac[j] =
2671 eth_spec->dst.addr_bytes[j];
2676 * Check if the next not void item is vlan or ipv4.
2677 * IPv6 is not supported.
2679 item = next_no_void_pattern(pattern, item);
2680 if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2681 (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
2682 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2683 rte_flow_error_set(error, EINVAL,
2684 RTE_FLOW_ERROR_TYPE_ITEM,
2685 item, "Not supported by fdir filter");
2688 /*Not supported last point for range*/
2690 rte_flow_error_set(error, EINVAL,
2691 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2692 item, "Not supported last point for range");
2696 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2697 if (!(item->spec && item->mask)) {
2698 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2699 rte_flow_error_set(error, EINVAL,
2700 RTE_FLOW_ERROR_TYPE_ITEM,
2701 item, "Not supported by fdir filter");
2705 vlan_spec = item->spec;
2706 vlan_mask = item->mask;
2708 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2710 rule->mask.vlan_tci_mask = vlan_mask->tci;
2711 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
2712 /* More than one tags are not supported. */
2714 /* check if the next not void item is END */
2715 item = next_no_void_pattern(pattern, item);
2717 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2718 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2719 rte_flow_error_set(error, EINVAL,
2720 RTE_FLOW_ERROR_TYPE_ITEM,
2721 item, "Not supported by fdir filter");
2727 * If the tags is 0, it means don't care about the VLAN.
2731 return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2735 ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2736 const struct rte_flow_attr *attr,
2737 const struct rte_flow_item pattern[],
2738 const struct rte_flow_action actions[],
2739 struct ixgbe_fdir_rule *rule,
2740 struct rte_flow_error *error)
2743 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2744 enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2746 if (hw->mac.type != ixgbe_mac_82599EB &&
2747 hw->mac.type != ixgbe_mac_X540 &&
2748 hw->mac.type != ixgbe_mac_X550 &&
2749 hw->mac.type != ixgbe_mac_X550EM_x &&
2750 hw->mac.type != ixgbe_mac_X550EM_a)
2753 ret = ixgbe_parse_fdir_filter_normal(dev, attr, pattern,
2754 actions, rule, error);
2759 ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2760 actions, rule, error);
2767 if (hw->mac.type == ixgbe_mac_82599EB &&
2768 rule->fdirflags == IXGBE_FDIRCMD_DROP &&
2769 (rule->ixgbe_fdir.formatted.src_port != 0 ||
2770 rule->ixgbe_fdir.formatted.dst_port != 0))
2773 if (fdir_mode == RTE_FDIR_MODE_NONE ||
2774 fdir_mode != rule->mode)
2777 if (rule->queue >= dev->data->nb_rx_queues)
2784 ixgbe_parse_rss_filter(struct rte_eth_dev *dev,
2785 const struct rte_flow_attr *attr,
2786 const struct rte_flow_action actions[],
2787 struct ixgbe_rte_flow_rss_conf *rss_conf,
2788 struct rte_flow_error *error)
2790 const struct rte_flow_action *act;
2791 const struct rte_flow_action_rss *rss;
2795 * rss only supports forwarding,
2796 * check if the first not void action is RSS.
2798 act = next_no_void_action(actions, NULL);
2799 if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
2800 memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
2801 rte_flow_error_set(error, EINVAL,
2802 RTE_FLOW_ERROR_TYPE_ACTION,
2803 act, "Not supported action.");
2807 rss = (const struct rte_flow_action_rss *)act->conf;
2809 if (!rss || !rss->queue_num) {
2810 rte_flow_error_set(error, EINVAL,
2811 RTE_FLOW_ERROR_TYPE_ACTION,
2817 for (n = 0; n < rss->queue_num; n++) {
2818 if (rss->queue[n] >= dev->data->nb_rx_queues) {
2819 rte_flow_error_set(error, EINVAL,
2820 RTE_FLOW_ERROR_TYPE_ACTION,
2822 "queue id > max number of queues");
2827 if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
2828 return rte_flow_error_set
2829 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2830 "non-default RSS hash functions are not supported");
2832 return rte_flow_error_set
2833 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2834 "a nonzero RSS encapsulation level is not supported");
2835 if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
2836 return rte_flow_error_set
2837 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2838 "RSS hash key must be exactly 40 bytes");
2839 if (rss->queue_num > RTE_DIM(rss_conf->queue))
2840 return rte_flow_error_set
2841 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2842 "too many queues for RSS context");
2843 if (ixgbe_rss_conf_init(rss_conf, rss))
2844 return rte_flow_error_set
2845 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
2846 "RSS context initialization failure");
2848 /* check if the next not void item is END */
2849 act = next_no_void_action(actions, act);
2850 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
2851 memset(rss_conf, 0, sizeof(struct rte_eth_rss_conf));
2852 rte_flow_error_set(error, EINVAL,
2853 RTE_FLOW_ERROR_TYPE_ACTION,
2854 act, "Not supported action.");
2859 /* must be input direction */
2860 if (!attr->ingress) {
2861 memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
2862 rte_flow_error_set(error, EINVAL,
2863 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
2864 attr, "Only support ingress.");
2870 memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
2871 rte_flow_error_set(error, EINVAL,
2872 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2873 attr, "Not support egress.");
2878 if (attr->transfer) {
2879 memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
2880 rte_flow_error_set(error, EINVAL,
2881 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
2882 attr, "No support for transfer.");
2886 if (attr->priority > 0xFFFF) {
2887 memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
2888 rte_flow_error_set(error, EINVAL,
2889 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
2890 attr, "Error priority.");
2897 /* remove the rss filter */
2899 ixgbe_clear_rss_filter(struct rte_eth_dev *dev)
2901 struct ixgbe_filter_info *filter_info =
2902 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2904 if (filter_info->rss_info.conf.queue_num)
2905 ixgbe_config_rss_filter(dev, &filter_info->rss_info, FALSE);
2909 ixgbe_filterlist_init(void)
2911 TAILQ_INIT(&filter_ntuple_list);
2912 TAILQ_INIT(&filter_ethertype_list);
2913 TAILQ_INIT(&filter_syn_list);
2914 TAILQ_INIT(&filter_fdir_list);
2915 TAILQ_INIT(&filter_l2_tunnel_list);
2916 TAILQ_INIT(&filter_rss_list);
2917 TAILQ_INIT(&ixgbe_flow_list);
2921 ixgbe_filterlist_flush(void)
2923 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2924 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2925 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2926 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2927 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2928 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2929 struct ixgbe_rss_conf_ele *rss_filter_ptr;
2931 while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2932 TAILQ_REMOVE(&filter_ntuple_list,
2935 rte_free(ntuple_filter_ptr);
2938 while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2939 TAILQ_REMOVE(&filter_ethertype_list,
2940 ethertype_filter_ptr,
2942 rte_free(ethertype_filter_ptr);
2945 while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2946 TAILQ_REMOVE(&filter_syn_list,
2949 rte_free(syn_filter_ptr);
2952 while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2953 TAILQ_REMOVE(&filter_l2_tunnel_list,
2956 rte_free(l2_tn_filter_ptr);
2959 while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2960 TAILQ_REMOVE(&filter_fdir_list,
2963 rte_free(fdir_rule_ptr);
2966 while ((rss_filter_ptr = TAILQ_FIRST(&filter_rss_list))) {
2967 TAILQ_REMOVE(&filter_rss_list,
2970 rte_free(rss_filter_ptr);
2973 while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
2974 TAILQ_REMOVE(&ixgbe_flow_list,
2977 rte_free(ixgbe_flow_mem_ptr->flow);
2978 rte_free(ixgbe_flow_mem_ptr);
2983 * Create or destroy a flow rule.
2984 * Theorically one rule can match more than one filters.
2985 * We will let it use the filter which it hitt first.
2986 * So, the sequence matters.
2988 static struct rte_flow *
2989 ixgbe_flow_create(struct rte_eth_dev *dev,
2990 const struct rte_flow_attr *attr,
2991 const struct rte_flow_item pattern[],
2992 const struct rte_flow_action actions[],
2993 struct rte_flow_error *error)
2996 struct rte_eth_ntuple_filter ntuple_filter;
2997 struct rte_eth_ethertype_filter ethertype_filter;
2998 struct rte_eth_syn_filter syn_filter;
2999 struct ixgbe_fdir_rule fdir_rule;
3000 struct rte_eth_l2_tunnel_conf l2_tn_filter;
3001 struct ixgbe_hw_fdir_info *fdir_info =
3002 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
3003 struct ixgbe_rte_flow_rss_conf rss_conf;
3004 struct rte_flow *flow = NULL;
3005 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
3006 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
3007 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
3008 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
3009 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
3010 struct ixgbe_rss_conf_ele *rss_filter_ptr;
3011 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
3012 uint8_t first_mask = FALSE;
3014 flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
3016 PMD_DRV_LOG(ERR, "failed to allocate memory");
3017 return (struct rte_flow *)flow;
3019 ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
3020 sizeof(struct ixgbe_flow_mem), 0);
3021 if (!ixgbe_flow_mem_ptr) {
3022 PMD_DRV_LOG(ERR, "failed to allocate memory");
3026 ixgbe_flow_mem_ptr->flow = flow;
3027 TAILQ_INSERT_TAIL(&ixgbe_flow_list,
3028 ixgbe_flow_mem_ptr, entries);
3030 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
3031 ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
3032 actions, &ntuple_filter, error);
3034 #ifdef RTE_LIBRTE_SECURITY
3035 /* ESP flow not really a flow*/
3036 if (ntuple_filter.proto == IPPROTO_ESP)
3041 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
3043 ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
3044 sizeof(struct ixgbe_ntuple_filter_ele), 0);
3045 if (!ntuple_filter_ptr) {
3046 PMD_DRV_LOG(ERR, "failed to allocate memory");
3049 rte_memcpy(&ntuple_filter_ptr->filter_info,
3051 sizeof(struct rte_eth_ntuple_filter));
3052 TAILQ_INSERT_TAIL(&filter_ntuple_list,
3053 ntuple_filter_ptr, entries);
3054 flow->rule = ntuple_filter_ptr;
3055 flow->filter_type = RTE_ETH_FILTER_NTUPLE;
3061 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
3062 ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
3063 actions, ðertype_filter, error);
3065 ret = ixgbe_add_del_ethertype_filter(dev,
3066 ðertype_filter, TRUE);
3068 ethertype_filter_ptr = rte_zmalloc(
3069 "ixgbe_ethertype_filter",
3070 sizeof(struct ixgbe_ethertype_filter_ele), 0);
3071 if (!ethertype_filter_ptr) {
3072 PMD_DRV_LOG(ERR, "failed to allocate memory");
3075 rte_memcpy(ðertype_filter_ptr->filter_info,
3077 sizeof(struct rte_eth_ethertype_filter));
3078 TAILQ_INSERT_TAIL(&filter_ethertype_list,
3079 ethertype_filter_ptr, entries);
3080 flow->rule = ethertype_filter_ptr;
3081 flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
3087 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
3088 ret = ixgbe_parse_syn_filter(dev, attr, pattern,
3089 actions, &syn_filter, error);
3091 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
3093 syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
3094 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
3095 if (!syn_filter_ptr) {
3096 PMD_DRV_LOG(ERR, "failed to allocate memory");
3099 rte_memcpy(&syn_filter_ptr->filter_info,
3101 sizeof(struct rte_eth_syn_filter));
3102 TAILQ_INSERT_TAIL(&filter_syn_list,
3105 flow->rule = syn_filter_ptr;
3106 flow->filter_type = RTE_ETH_FILTER_SYN;
3112 memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
3113 ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
3114 actions, &fdir_rule, error);
3116 /* A mask cannot be deleted. */
3117 if (fdir_rule.b_mask) {
3118 if (!fdir_info->mask_added) {
3119 /* It's the first time the mask is set. */
3120 rte_memcpy(&fdir_info->mask,
3122 sizeof(struct ixgbe_hw_fdir_mask));
3123 fdir_info->flex_bytes_offset =
3124 fdir_rule.flex_bytes_offset;
3126 if (fdir_rule.mask.flex_bytes_mask)
3127 ixgbe_fdir_set_flexbytes_offset(dev,
3128 fdir_rule.flex_bytes_offset);
3130 ret = ixgbe_fdir_set_input_mask(dev);
3134 fdir_info->mask_added = TRUE;
3138 * Only support one global mask,
3139 * all the masks should be the same.
3141 ret = memcmp(&fdir_info->mask,
3143 sizeof(struct ixgbe_hw_fdir_mask));
3147 if (fdir_info->flex_bytes_offset !=
3148 fdir_rule.flex_bytes_offset)
3153 if (fdir_rule.b_spec) {
3154 ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
3157 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
3158 sizeof(struct ixgbe_fdir_rule_ele), 0);
3159 if (!fdir_rule_ptr) {
3160 PMD_DRV_LOG(ERR, "failed to allocate memory");
3163 rte_memcpy(&fdir_rule_ptr->filter_info,
3165 sizeof(struct ixgbe_fdir_rule));
3166 TAILQ_INSERT_TAIL(&filter_fdir_list,
3167 fdir_rule_ptr, entries);
3168 flow->rule = fdir_rule_ptr;
3169 flow->filter_type = RTE_ETH_FILTER_FDIR;
3176 * clean the mask_added flag if fail to
3180 fdir_info->mask_added = FALSE;
3188 memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
3189 ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
3190 actions, &l2_tn_filter, error);
3192 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
3194 l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
3195 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
3196 if (!l2_tn_filter_ptr) {
3197 PMD_DRV_LOG(ERR, "failed to allocate memory");
3200 rte_memcpy(&l2_tn_filter_ptr->filter_info,
3202 sizeof(struct rte_eth_l2_tunnel_conf));
3203 TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
3204 l2_tn_filter_ptr, entries);
3205 flow->rule = l2_tn_filter_ptr;
3206 flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
3211 memset(&rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
3212 ret = ixgbe_parse_rss_filter(dev, attr,
3213 actions, &rss_conf, error);
3215 ret = ixgbe_config_rss_filter(dev, &rss_conf, TRUE);
3217 rss_filter_ptr = rte_zmalloc("ixgbe_rss_filter",
3218 sizeof(struct ixgbe_rss_conf_ele), 0);
3219 if (!rss_filter_ptr) {
3220 PMD_DRV_LOG(ERR, "failed to allocate memory");
3223 ixgbe_rss_conf_init(&rss_filter_ptr->filter_info,
3225 TAILQ_INSERT_TAIL(&filter_rss_list,
3226 rss_filter_ptr, entries);
3227 flow->rule = rss_filter_ptr;
3228 flow->filter_type = RTE_ETH_FILTER_HASH;
3234 TAILQ_REMOVE(&ixgbe_flow_list,
3235 ixgbe_flow_mem_ptr, entries);
3236 rte_flow_error_set(error, -ret,
3237 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
3238 "Failed to create flow.");
3239 rte_free(ixgbe_flow_mem_ptr);
3245 * Check if the flow rule is supported by ixgbe.
3246 * It only checkes the format. Don't guarantee the rule can be programmed into
3247 * the HW. Because there can be no enough room for the rule.
3250 ixgbe_flow_validate(struct rte_eth_dev *dev,
3251 const struct rte_flow_attr *attr,
3252 const struct rte_flow_item pattern[],
3253 const struct rte_flow_action actions[],
3254 struct rte_flow_error *error)
3256 struct rte_eth_ntuple_filter ntuple_filter;
3257 struct rte_eth_ethertype_filter ethertype_filter;
3258 struct rte_eth_syn_filter syn_filter;
3259 struct rte_eth_l2_tunnel_conf l2_tn_filter;
3260 struct ixgbe_fdir_rule fdir_rule;
3261 struct ixgbe_rte_flow_rss_conf rss_conf;
3264 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
3265 ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
3266 actions, &ntuple_filter, error);
3270 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
3271 ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
3272 actions, ðertype_filter, error);
3276 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
3277 ret = ixgbe_parse_syn_filter(dev, attr, pattern,
3278 actions, &syn_filter, error);
3282 memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
3283 ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
3284 actions, &fdir_rule, error);
3288 memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
3289 ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
3290 actions, &l2_tn_filter, error);
3294 memset(&rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
3295 ret = ixgbe_parse_rss_filter(dev, attr,
3296 actions, &rss_conf, error);
3301 /* Destroy a flow rule on ixgbe. */
3303 ixgbe_flow_destroy(struct rte_eth_dev *dev,
3304 struct rte_flow *flow,
3305 struct rte_flow_error *error)
3308 struct rte_flow *pmd_flow = flow;
3309 enum rte_filter_type filter_type = pmd_flow->filter_type;
3310 struct rte_eth_ntuple_filter ntuple_filter;
3311 struct rte_eth_ethertype_filter ethertype_filter;
3312 struct rte_eth_syn_filter syn_filter;
3313 struct ixgbe_fdir_rule fdir_rule;
3314 struct rte_eth_l2_tunnel_conf l2_tn_filter;
3315 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
3316 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
3317 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
3318 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
3319 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
3320 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
3321 struct ixgbe_hw_fdir_info *fdir_info =
3322 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
3323 struct ixgbe_rss_conf_ele *rss_filter_ptr;
3325 switch (filter_type) {
3326 case RTE_ETH_FILTER_NTUPLE:
3327 ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
3329 rte_memcpy(&ntuple_filter,
3330 &ntuple_filter_ptr->filter_info,
3331 sizeof(struct rte_eth_ntuple_filter));
3332 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
3334 TAILQ_REMOVE(&filter_ntuple_list,
3335 ntuple_filter_ptr, entries);
3336 rte_free(ntuple_filter_ptr);
3339 case RTE_ETH_FILTER_ETHERTYPE:
3340 ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
3342 rte_memcpy(ðertype_filter,
3343 ðertype_filter_ptr->filter_info,
3344 sizeof(struct rte_eth_ethertype_filter));
3345 ret = ixgbe_add_del_ethertype_filter(dev,
3346 ðertype_filter, FALSE);
3348 TAILQ_REMOVE(&filter_ethertype_list,
3349 ethertype_filter_ptr, entries);
3350 rte_free(ethertype_filter_ptr);
3353 case RTE_ETH_FILTER_SYN:
3354 syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
3356 rte_memcpy(&syn_filter,
3357 &syn_filter_ptr->filter_info,
3358 sizeof(struct rte_eth_syn_filter));
3359 ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
3361 TAILQ_REMOVE(&filter_syn_list,
3362 syn_filter_ptr, entries);
3363 rte_free(syn_filter_ptr);
3366 case RTE_ETH_FILTER_FDIR:
3367 fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
3368 rte_memcpy(&fdir_rule,
3369 &fdir_rule_ptr->filter_info,
3370 sizeof(struct ixgbe_fdir_rule));
3371 ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
3373 TAILQ_REMOVE(&filter_fdir_list,
3374 fdir_rule_ptr, entries);
3375 rte_free(fdir_rule_ptr);
3376 if (TAILQ_EMPTY(&filter_fdir_list))
3377 fdir_info->mask_added = false;
3380 case RTE_ETH_FILTER_L2_TUNNEL:
3381 l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
3383 rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
3384 sizeof(struct rte_eth_l2_tunnel_conf));
3385 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
3387 TAILQ_REMOVE(&filter_l2_tunnel_list,
3388 l2_tn_filter_ptr, entries);
3389 rte_free(l2_tn_filter_ptr);
3392 case RTE_ETH_FILTER_HASH:
3393 rss_filter_ptr = (struct ixgbe_rss_conf_ele *)
3395 ret = ixgbe_config_rss_filter(dev,
3396 &rss_filter_ptr->filter_info, FALSE);
3398 TAILQ_REMOVE(&filter_rss_list,
3399 rss_filter_ptr, entries);
3400 rte_free(rss_filter_ptr);
3404 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
3411 rte_flow_error_set(error, EINVAL,
3412 RTE_FLOW_ERROR_TYPE_HANDLE,
3413 NULL, "Failed to destroy flow");
3417 TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
3418 if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
3419 TAILQ_REMOVE(&ixgbe_flow_list,
3420 ixgbe_flow_mem_ptr, entries);
3421 rte_free(ixgbe_flow_mem_ptr);
3429 /* Destroy all flow rules associated with a port on ixgbe. */
3431 ixgbe_flow_flush(struct rte_eth_dev *dev,
3432 struct rte_flow_error *error)
3436 ixgbe_clear_all_ntuple_filter(dev);
3437 ixgbe_clear_all_ethertype_filter(dev);
3438 ixgbe_clear_syn_filter(dev);
3440 ret = ixgbe_clear_all_fdir_filter(dev);
3442 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3443 NULL, "Failed to flush rule");
3447 ret = ixgbe_clear_all_l2_tn_filter(dev);
3449 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3450 NULL, "Failed to flush rule");
3454 ixgbe_clear_rss_filter(dev);
3456 ixgbe_filterlist_flush();
3461 const struct rte_flow_ops ixgbe_flow_ops = {
3462 .validate = ixgbe_flow_validate,
3463 .create = ixgbe_flow_create,
3464 .destroy = ixgbe_flow_destroy,
3465 .flush = ixgbe_flow_flush,