1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
13 #include <rte_byteorder.h>
14 #include <rte_common.h>
15 #include <rte_cycles.h>
17 #include <rte_interrupts.h>
19 #include <rte_debug.h>
21 #include <rte_atomic.h>
22 #include <rte_branch_prediction.h>
23 #include <rte_memory.h>
25 #include <rte_alarm.h>
26 #include <rte_ether.h>
27 #include <ethdev_driver.h>
28 #include <rte_malloc.h>
29 #include <rte_random.h>
31 #include <rte_hash_crc.h>
33 #include <rte_flow_driver.h>
35 #include "ixgbe_logs.h"
36 #include "base/ixgbe_api.h"
37 #include "base/ixgbe_vf.h"
38 #include "base/ixgbe_common.h"
39 #include "base/ixgbe_osdep.h"
40 #include "ixgbe_ethdev.h"
41 #include "ixgbe_bypass.h"
42 #include "ixgbe_rxtx.h"
43 #include "base/ixgbe_type.h"
44 #include "base/ixgbe_phy.h"
45 #include "rte_pmd_ixgbe.h"
48 #define IXGBE_MIN_N_TUPLE_PRIO 1
49 #define IXGBE_MAX_N_TUPLE_PRIO 7
50 #define IXGBE_MAX_FLX_SOURCE_OFF 62
52 /* ntuple filter list structure */
53 struct ixgbe_ntuple_filter_ele {
54 TAILQ_ENTRY(ixgbe_ntuple_filter_ele) entries;
55 struct rte_eth_ntuple_filter filter_info;
57 /* ethertype filter list structure */
58 struct ixgbe_ethertype_filter_ele {
59 TAILQ_ENTRY(ixgbe_ethertype_filter_ele) entries;
60 struct rte_eth_ethertype_filter filter_info;
62 /* syn filter list structure */
63 struct ixgbe_eth_syn_filter_ele {
64 TAILQ_ENTRY(ixgbe_eth_syn_filter_ele) entries;
65 struct rte_eth_syn_filter filter_info;
67 /* fdir filter list structure */
68 struct ixgbe_fdir_rule_ele {
69 TAILQ_ENTRY(ixgbe_fdir_rule_ele) entries;
70 struct ixgbe_fdir_rule filter_info;
72 /* l2_tunnel filter list structure */
73 struct ixgbe_eth_l2_tunnel_conf_ele {
74 TAILQ_ENTRY(ixgbe_eth_l2_tunnel_conf_ele) entries;
75 struct ixgbe_l2_tunnel_conf filter_info;
77 /* rss filter list structure */
78 struct ixgbe_rss_conf_ele {
79 TAILQ_ENTRY(ixgbe_rss_conf_ele) entries;
80 struct ixgbe_rte_flow_rss_conf filter_info;
82 /* ixgbe_flow memory list structure */
83 struct ixgbe_flow_mem {
84 TAILQ_ENTRY(ixgbe_flow_mem) entries;
85 struct rte_flow *flow;
88 TAILQ_HEAD(ixgbe_ntuple_filter_list, ixgbe_ntuple_filter_ele);
89 TAILQ_HEAD(ixgbe_ethertype_filter_list, ixgbe_ethertype_filter_ele);
90 TAILQ_HEAD(ixgbe_syn_filter_list, ixgbe_eth_syn_filter_ele);
91 TAILQ_HEAD(ixgbe_fdir_rule_filter_list, ixgbe_fdir_rule_ele);
92 TAILQ_HEAD(ixgbe_l2_tunnel_filter_list, ixgbe_eth_l2_tunnel_conf_ele);
93 TAILQ_HEAD(ixgbe_rss_filter_list, ixgbe_rss_conf_ele);
94 TAILQ_HEAD(ixgbe_flow_mem_list, ixgbe_flow_mem);
96 static struct ixgbe_ntuple_filter_list filter_ntuple_list;
97 static struct ixgbe_ethertype_filter_list filter_ethertype_list;
98 static struct ixgbe_syn_filter_list filter_syn_list;
99 static struct ixgbe_fdir_rule_filter_list filter_fdir_list;
100 static struct ixgbe_l2_tunnel_filter_list filter_l2_tunnel_list;
101 static struct ixgbe_rss_filter_list filter_rss_list;
102 static struct ixgbe_flow_mem_list ixgbe_flow_list;
105 * Endless loop will never happen with below assumption
106 * 1. there is at least one no-void item(END)
107 * 2. cur is before END.
110 const struct rte_flow_item *next_no_void_pattern(
111 const struct rte_flow_item pattern[],
112 const struct rte_flow_item *cur)
114 const struct rte_flow_item *next =
115 cur ? cur + 1 : &pattern[0];
117 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
124 const struct rte_flow_action *next_no_void_action(
125 const struct rte_flow_action actions[],
126 const struct rte_flow_action *cur)
128 const struct rte_flow_action *next =
129 cur ? cur + 1 : &actions[0];
131 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
138 * Please aware there's an asumption for all the parsers.
139 * rte_flow_item is using big endian, rte_flow_attr and
140 * rte_flow_action are using CPU order.
141 * Because the pattern is used to describe the packets,
142 * normally the packets should use network order.
146 * Parse the rule to see if it is a n-tuple rule.
147 * And get the n-tuple filter info BTW.
149 * The first not void item can be ETH or IPV4.
150 * The second not void item must be IPV4 if the first one is ETH.
151 * The third not void item must be UDP or TCP.
152 * The next not void item must be END.
154 * The first not void action should be QUEUE.
155 * The next not void action should be END.
159 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
160 * dst_addr 192.167.3.50 0xFFFFFFFF
161 * next_proto_id 17 0xFF
162 * UDP/TCP/ src_port 80 0xFFFF
163 * SCTP dst_port 80 0xFFFF
165 * other members in mask and spec should set to 0x00.
166 * item->last should be NULL.
168 * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY.
172 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
173 const struct rte_flow_item pattern[],
174 const struct rte_flow_action actions[],
175 struct rte_eth_ntuple_filter *filter,
176 struct rte_flow_error *error)
178 const struct rte_flow_item *item;
179 const struct rte_flow_action *act;
180 const struct rte_flow_item_ipv4 *ipv4_spec;
181 const struct rte_flow_item_ipv4 *ipv4_mask;
182 const struct rte_flow_item_tcp *tcp_spec;
183 const struct rte_flow_item_tcp *tcp_mask;
184 const struct rte_flow_item_udp *udp_spec;
185 const struct rte_flow_item_udp *udp_mask;
186 const struct rte_flow_item_sctp *sctp_spec;
187 const struct rte_flow_item_sctp *sctp_mask;
188 const struct rte_flow_item_eth *eth_spec;
189 const struct rte_flow_item_eth *eth_mask;
190 const struct rte_flow_item_vlan *vlan_spec;
191 const struct rte_flow_item_vlan *vlan_mask;
192 struct rte_flow_item_eth eth_null;
193 struct rte_flow_item_vlan vlan_null;
196 rte_flow_error_set(error,
197 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
198 NULL, "NULL pattern.");
203 rte_flow_error_set(error, EINVAL,
204 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
205 NULL, "NULL action.");
209 rte_flow_error_set(error, EINVAL,
210 RTE_FLOW_ERROR_TYPE_ATTR,
211 NULL, "NULL attribute.");
215 memset(ð_null, 0, sizeof(struct rte_flow_item_eth));
216 memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan));
218 #ifdef RTE_LIB_SECURITY
220 * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY
222 act = next_no_void_action(actions, NULL);
223 if (act->type == RTE_FLOW_ACTION_TYPE_SECURITY) {
224 const void *conf = act->conf;
225 /* check if the next not void item is END */
226 act = next_no_void_action(actions, act);
227 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
228 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
229 rte_flow_error_set(error, EINVAL,
230 RTE_FLOW_ERROR_TYPE_ACTION,
231 act, "Not supported action.");
235 /* get the IP pattern*/
236 item = next_no_void_pattern(pattern, NULL);
237 while (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
238 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
240 item->type == RTE_FLOW_ITEM_TYPE_END) {
241 rte_flow_error_set(error, EINVAL,
242 RTE_FLOW_ERROR_TYPE_ITEM,
243 item, "IP pattern missing.");
246 item = next_no_void_pattern(pattern, item);
249 filter->proto = IPPROTO_ESP;
250 return ixgbe_crypto_add_ingress_sa_from_flow(conf, item->spec,
251 item->type == RTE_FLOW_ITEM_TYPE_IPV6);
255 /* the first not void item can be MAC or IPv4 */
256 item = next_no_void_pattern(pattern, NULL);
258 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
259 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
260 rte_flow_error_set(error, EINVAL,
261 RTE_FLOW_ERROR_TYPE_ITEM,
262 item, "Not supported by ntuple filter");
266 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
267 eth_spec = item->spec;
268 eth_mask = item->mask;
269 /*Not supported last point for range*/
271 rte_flow_error_set(error,
273 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
274 item, "Not supported last point for range");
278 /* if the first item is MAC, the content should be NULL */
279 if ((item->spec || item->mask) &&
280 (memcmp(eth_spec, ð_null,
281 sizeof(struct rte_flow_item_eth)) ||
282 memcmp(eth_mask, ð_null,
283 sizeof(struct rte_flow_item_eth)))) {
284 rte_flow_error_set(error, EINVAL,
285 RTE_FLOW_ERROR_TYPE_ITEM,
286 item, "Not supported by ntuple filter");
289 /* check if the next not void item is IPv4 or Vlan */
290 item = next_no_void_pattern(pattern, item);
291 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
292 item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
293 rte_flow_error_set(error,
294 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
295 item, "Not supported by ntuple filter");
300 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
301 vlan_spec = item->spec;
302 vlan_mask = item->mask;
303 /*Not supported last point for range*/
305 rte_flow_error_set(error,
307 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
308 item, "Not supported last point for range");
311 /* the content should be NULL */
312 if ((item->spec || item->mask) &&
313 (memcmp(vlan_spec, &vlan_null,
314 sizeof(struct rte_flow_item_vlan)) ||
315 memcmp(vlan_mask, &vlan_null,
316 sizeof(struct rte_flow_item_vlan)))) {
318 rte_flow_error_set(error, EINVAL,
319 RTE_FLOW_ERROR_TYPE_ITEM,
320 item, "Not supported by ntuple filter");
323 /* check if the next not void item is IPv4 */
324 item = next_no_void_pattern(pattern, item);
325 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
326 rte_flow_error_set(error,
327 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
328 item, "Not supported by ntuple filter");
334 /* get the IPv4 info */
335 if (!item->spec || !item->mask) {
336 rte_flow_error_set(error, EINVAL,
337 RTE_FLOW_ERROR_TYPE_ITEM,
338 item, "Invalid ntuple mask");
341 /*Not supported last point for range*/
343 rte_flow_error_set(error, EINVAL,
344 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
345 item, "Not supported last point for range");
349 ipv4_mask = item->mask;
351 * Only support src & dst addresses, protocol,
352 * others should be masked.
354 if (ipv4_mask->hdr.version_ihl ||
355 ipv4_mask->hdr.type_of_service ||
356 ipv4_mask->hdr.total_length ||
357 ipv4_mask->hdr.packet_id ||
358 ipv4_mask->hdr.fragment_offset ||
359 ipv4_mask->hdr.time_to_live ||
360 ipv4_mask->hdr.hdr_checksum) {
361 rte_flow_error_set(error,
362 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
363 item, "Not supported by ntuple filter");
366 if ((ipv4_mask->hdr.src_addr != 0 &&
367 ipv4_mask->hdr.src_addr != UINT32_MAX) ||
368 (ipv4_mask->hdr.dst_addr != 0 &&
369 ipv4_mask->hdr.dst_addr != UINT32_MAX) ||
370 (ipv4_mask->hdr.next_proto_id != UINT8_MAX &&
371 ipv4_mask->hdr.next_proto_id != 0)) {
372 rte_flow_error_set(error,
373 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
374 item, "Not supported by ntuple filter");
378 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
379 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
380 filter->proto_mask = ipv4_mask->hdr.next_proto_id;
382 ipv4_spec = item->spec;
383 filter->dst_ip = ipv4_spec->hdr.dst_addr;
384 filter->src_ip = ipv4_spec->hdr.src_addr;
385 filter->proto = ipv4_spec->hdr.next_proto_id;
388 /* check if the next not void item is TCP or UDP */
389 item = next_no_void_pattern(pattern, item);
390 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
391 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
392 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
393 item->type != RTE_FLOW_ITEM_TYPE_END) {
394 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
395 rte_flow_error_set(error, EINVAL,
396 RTE_FLOW_ERROR_TYPE_ITEM,
397 item, "Not supported by ntuple filter");
401 if ((item->type != RTE_FLOW_ITEM_TYPE_END) &&
402 (!item->spec && !item->mask)) {
406 /* get the TCP/UDP/SCTP info */
407 if (item->type != RTE_FLOW_ITEM_TYPE_END &&
408 (!item->spec || !item->mask)) {
409 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
410 rte_flow_error_set(error, EINVAL,
411 RTE_FLOW_ERROR_TYPE_ITEM,
412 item, "Invalid ntuple mask");
416 /*Not supported last point for range*/
418 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
419 rte_flow_error_set(error, EINVAL,
420 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
421 item, "Not supported last point for range");
426 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
427 tcp_mask = item->mask;
430 * Only support src & dst ports, tcp flags,
431 * others should be masked.
433 if (tcp_mask->hdr.sent_seq ||
434 tcp_mask->hdr.recv_ack ||
435 tcp_mask->hdr.data_off ||
436 tcp_mask->hdr.rx_win ||
437 tcp_mask->hdr.cksum ||
438 tcp_mask->hdr.tcp_urp) {
440 sizeof(struct rte_eth_ntuple_filter));
441 rte_flow_error_set(error, EINVAL,
442 RTE_FLOW_ERROR_TYPE_ITEM,
443 item, "Not supported by ntuple filter");
446 if ((tcp_mask->hdr.src_port != 0 &&
447 tcp_mask->hdr.src_port != UINT16_MAX) ||
448 (tcp_mask->hdr.dst_port != 0 &&
449 tcp_mask->hdr.dst_port != UINT16_MAX)) {
450 rte_flow_error_set(error,
451 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
452 item, "Not supported by ntuple filter");
456 filter->dst_port_mask = tcp_mask->hdr.dst_port;
457 filter->src_port_mask = tcp_mask->hdr.src_port;
458 if (tcp_mask->hdr.tcp_flags == 0xFF) {
459 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
460 } else if (!tcp_mask->hdr.tcp_flags) {
461 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
463 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
464 rte_flow_error_set(error, EINVAL,
465 RTE_FLOW_ERROR_TYPE_ITEM,
466 item, "Not supported by ntuple filter");
470 tcp_spec = item->spec;
471 filter->dst_port = tcp_spec->hdr.dst_port;
472 filter->src_port = tcp_spec->hdr.src_port;
473 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
474 } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
475 udp_mask = item->mask;
478 * Only support src & dst ports,
479 * others should be masked.
481 if (udp_mask->hdr.dgram_len ||
482 udp_mask->hdr.dgram_cksum) {
484 sizeof(struct rte_eth_ntuple_filter));
485 rte_flow_error_set(error, EINVAL,
486 RTE_FLOW_ERROR_TYPE_ITEM,
487 item, "Not supported by ntuple filter");
490 if ((udp_mask->hdr.src_port != 0 &&
491 udp_mask->hdr.src_port != UINT16_MAX) ||
492 (udp_mask->hdr.dst_port != 0 &&
493 udp_mask->hdr.dst_port != UINT16_MAX)) {
494 rte_flow_error_set(error,
495 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
496 item, "Not supported by ntuple filter");
500 filter->dst_port_mask = udp_mask->hdr.dst_port;
501 filter->src_port_mask = udp_mask->hdr.src_port;
503 udp_spec = item->spec;
504 filter->dst_port = udp_spec->hdr.dst_port;
505 filter->src_port = udp_spec->hdr.src_port;
506 } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
507 sctp_mask = item->mask;
510 * Only support src & dst ports,
511 * others should be masked.
513 if (sctp_mask->hdr.tag ||
514 sctp_mask->hdr.cksum) {
516 sizeof(struct rte_eth_ntuple_filter));
517 rte_flow_error_set(error, EINVAL,
518 RTE_FLOW_ERROR_TYPE_ITEM,
519 item, "Not supported by ntuple filter");
523 filter->dst_port_mask = sctp_mask->hdr.dst_port;
524 filter->src_port_mask = sctp_mask->hdr.src_port;
526 sctp_spec = item->spec;
527 filter->dst_port = sctp_spec->hdr.dst_port;
528 filter->src_port = sctp_spec->hdr.src_port;
533 /* check if the next not void item is END */
534 item = next_no_void_pattern(pattern, item);
535 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
536 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
537 rte_flow_error_set(error, EINVAL,
538 RTE_FLOW_ERROR_TYPE_ITEM,
539 item, "Not supported by ntuple filter");
546 * n-tuple only supports forwarding,
547 * check if the first not void action is QUEUE.
549 act = next_no_void_action(actions, NULL);
550 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
551 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
552 rte_flow_error_set(error, EINVAL,
553 RTE_FLOW_ERROR_TYPE_ACTION,
554 item, "Not supported action.");
558 ((const struct rte_flow_action_queue *)act->conf)->index;
560 /* check if the next not void item is END */
561 act = next_no_void_action(actions, act);
562 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
563 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
564 rte_flow_error_set(error, EINVAL,
565 RTE_FLOW_ERROR_TYPE_ACTION,
566 act, "Not supported action.");
571 /* must be input direction */
572 if (!attr->ingress) {
573 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
574 rte_flow_error_set(error, EINVAL,
575 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
576 attr, "Only support ingress.");
582 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
583 rte_flow_error_set(error, EINVAL,
584 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
585 attr, "Not support egress.");
590 if (attr->transfer) {
591 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
592 rte_flow_error_set(error, EINVAL,
593 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
594 attr, "No support for transfer.");
598 if (attr->priority > 0xFFFF) {
599 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
600 rte_flow_error_set(error, EINVAL,
601 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
602 attr, "Error priority.");
605 filter->priority = (uint16_t)attr->priority;
606 if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
607 attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
608 filter->priority = 1;
613 /* a specific function for ixgbe because the flags is specific */
615 ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
616 const struct rte_flow_attr *attr,
617 const struct rte_flow_item pattern[],
618 const struct rte_flow_action actions[],
619 struct rte_eth_ntuple_filter *filter,
620 struct rte_flow_error *error)
623 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
625 MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
627 ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
632 #ifdef RTE_LIB_SECURITY
633 /* ESP flow not really a flow*/
634 if (filter->proto == IPPROTO_ESP)
638 /* Ixgbe doesn't support tcp flags. */
639 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
640 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
641 rte_flow_error_set(error, EINVAL,
642 RTE_FLOW_ERROR_TYPE_ITEM,
643 NULL, "Not supported by ntuple filter");
647 /* Ixgbe doesn't support many priorities. */
648 if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
649 filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
650 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
651 rte_flow_error_set(error, EINVAL,
652 RTE_FLOW_ERROR_TYPE_ITEM,
653 NULL, "Priority not supported by ntuple filter");
657 if (filter->queue >= dev->data->nb_rx_queues)
660 /* fixed value for ixgbe */
661 filter->flags = RTE_5TUPLE_FLAGS;
666 * Parse the rule to see if it is a ethertype rule.
667 * And get the ethertype filter info BTW.
669 * The first not void item can be ETH.
670 * The next not void item must be END.
672 * The first not void action should be QUEUE.
673 * The next not void action should be END.
676 * ETH type 0x0807 0xFFFF
678 * other members in mask and spec should set to 0x00.
679 * item->last should be NULL.
682 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
683 const struct rte_flow_item *pattern,
684 const struct rte_flow_action *actions,
685 struct rte_eth_ethertype_filter *filter,
686 struct rte_flow_error *error)
688 const struct rte_flow_item *item;
689 const struct rte_flow_action *act;
690 const struct rte_flow_item_eth *eth_spec;
691 const struct rte_flow_item_eth *eth_mask;
692 const struct rte_flow_action_queue *act_q;
695 rte_flow_error_set(error, EINVAL,
696 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
697 NULL, "NULL pattern.");
702 rte_flow_error_set(error, EINVAL,
703 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
704 NULL, "NULL action.");
709 rte_flow_error_set(error, EINVAL,
710 RTE_FLOW_ERROR_TYPE_ATTR,
711 NULL, "NULL attribute.");
715 item = next_no_void_pattern(pattern, NULL);
716 /* The first non-void item should be MAC. */
717 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
718 rte_flow_error_set(error, EINVAL,
719 RTE_FLOW_ERROR_TYPE_ITEM,
720 item, "Not supported by ethertype filter");
724 /*Not supported last point for range*/
726 rte_flow_error_set(error, EINVAL,
727 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
728 item, "Not supported last point for range");
732 /* Get the MAC info. */
733 if (!item->spec || !item->mask) {
734 rte_flow_error_set(error, EINVAL,
735 RTE_FLOW_ERROR_TYPE_ITEM,
736 item, "Not supported by ethertype filter");
740 eth_spec = item->spec;
741 eth_mask = item->mask;
743 /* Mask bits of source MAC address must be full of 0.
744 * Mask bits of destination MAC address must be full
747 if (!rte_is_zero_ether_addr(ð_mask->src) ||
748 (!rte_is_zero_ether_addr(ð_mask->dst) &&
749 !rte_is_broadcast_ether_addr(ð_mask->dst))) {
750 rte_flow_error_set(error, EINVAL,
751 RTE_FLOW_ERROR_TYPE_ITEM,
752 item, "Invalid ether address mask");
756 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
757 rte_flow_error_set(error, EINVAL,
758 RTE_FLOW_ERROR_TYPE_ITEM,
759 item, "Invalid ethertype mask");
763 /* If mask bits of destination MAC address
764 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
766 if (rte_is_broadcast_ether_addr(ð_mask->dst)) {
767 filter->mac_addr = eth_spec->dst;
768 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
770 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
772 filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
774 /* Check if the next non-void item is END. */
775 item = next_no_void_pattern(pattern, item);
776 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
777 rte_flow_error_set(error, EINVAL,
778 RTE_FLOW_ERROR_TYPE_ITEM,
779 item, "Not supported by ethertype filter.");
785 act = next_no_void_action(actions, NULL);
786 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
787 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
788 rte_flow_error_set(error, EINVAL,
789 RTE_FLOW_ERROR_TYPE_ACTION,
790 act, "Not supported action.");
794 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
795 act_q = (const struct rte_flow_action_queue *)act->conf;
796 filter->queue = act_q->index;
798 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
801 /* Check if the next non-void item is END */
802 act = next_no_void_action(actions, act);
803 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
804 rte_flow_error_set(error, EINVAL,
805 RTE_FLOW_ERROR_TYPE_ACTION,
806 act, "Not supported action.");
811 /* Must be input direction */
812 if (!attr->ingress) {
813 rte_flow_error_set(error, EINVAL,
814 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
815 attr, "Only support ingress.");
821 rte_flow_error_set(error, EINVAL,
822 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
823 attr, "Not support egress.");
828 if (attr->transfer) {
829 rte_flow_error_set(error, EINVAL,
830 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
831 attr, "No support for transfer.");
836 if (attr->priority) {
837 rte_flow_error_set(error, EINVAL,
838 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
839 attr, "Not support priority.");
845 rte_flow_error_set(error, EINVAL,
846 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
847 attr, "Not support group.");
855 ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
856 const struct rte_flow_attr *attr,
857 const struct rte_flow_item pattern[],
858 const struct rte_flow_action actions[],
859 struct rte_eth_ethertype_filter *filter,
860 struct rte_flow_error *error)
863 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
865 MAC_TYPE_FILTER_SUP(hw->mac.type);
867 ret = cons_parse_ethertype_filter(attr, pattern,
868 actions, filter, error);
873 if (filter->queue >= dev->data->nb_rx_queues) {
874 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
875 rte_flow_error_set(error, EINVAL,
876 RTE_FLOW_ERROR_TYPE_ITEM,
877 NULL, "queue index much too big");
881 if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
882 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
883 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
884 rte_flow_error_set(error, EINVAL,
885 RTE_FLOW_ERROR_TYPE_ITEM,
886 NULL, "IPv4/IPv6 not supported by ethertype filter");
890 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
891 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
892 rte_flow_error_set(error, EINVAL,
893 RTE_FLOW_ERROR_TYPE_ITEM,
894 NULL, "mac compare is unsupported");
898 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
899 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
900 rte_flow_error_set(error, EINVAL,
901 RTE_FLOW_ERROR_TYPE_ITEM,
902 NULL, "drop option is unsupported");
910 * Parse the rule to see if it is a TCP SYN rule.
911 * And get the TCP SYN filter info BTW.
913 * The first not void item must be ETH.
914 * The second not void item must be IPV4 or IPV6.
915 * The third not void item must be TCP.
916 * The next not void item must be END.
918 * The first not void action should be QUEUE.
919 * The next not void action should be END.
923 * IPV4/IPV6 NULL NULL
924 * TCP tcp_flags 0x02 0xFF
926 * other members in mask and spec should set to 0x00.
927 * item->last should be NULL.
930 cons_parse_syn_filter(const struct rte_flow_attr *attr,
931 const struct rte_flow_item pattern[],
932 const struct rte_flow_action actions[],
933 struct rte_eth_syn_filter *filter,
934 struct rte_flow_error *error)
936 const struct rte_flow_item *item;
937 const struct rte_flow_action *act;
938 const struct rte_flow_item_tcp *tcp_spec;
939 const struct rte_flow_item_tcp *tcp_mask;
940 const struct rte_flow_action_queue *act_q;
943 rte_flow_error_set(error, EINVAL,
944 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
945 NULL, "NULL pattern.");
950 rte_flow_error_set(error, EINVAL,
951 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
952 NULL, "NULL action.");
957 rte_flow_error_set(error, EINVAL,
958 RTE_FLOW_ERROR_TYPE_ATTR,
959 NULL, "NULL attribute.");
964 /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
965 item = next_no_void_pattern(pattern, NULL);
966 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
967 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
968 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
969 item->type != RTE_FLOW_ITEM_TYPE_TCP) {
970 rte_flow_error_set(error, EINVAL,
971 RTE_FLOW_ERROR_TYPE_ITEM,
972 item, "Not supported by syn filter");
975 /*Not supported last point for range*/
977 rte_flow_error_set(error, EINVAL,
978 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
979 item, "Not supported last point for range");
984 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
985 /* if the item is MAC, the content should be NULL */
986 if (item->spec || item->mask) {
987 rte_flow_error_set(error, EINVAL,
988 RTE_FLOW_ERROR_TYPE_ITEM,
989 item, "Invalid SYN address mask");
993 /* check if the next not void item is IPv4 or IPv6 */
994 item = next_no_void_pattern(pattern, item);
995 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
996 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
997 rte_flow_error_set(error, EINVAL,
998 RTE_FLOW_ERROR_TYPE_ITEM,
999 item, "Not supported by syn filter");
1005 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
1006 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1007 /* if the item is IP, the content should be NULL */
1008 if (item->spec || item->mask) {
1009 rte_flow_error_set(error, EINVAL,
1010 RTE_FLOW_ERROR_TYPE_ITEM,
1011 item, "Invalid SYN mask");
1015 /* check if the next not void item is TCP */
1016 item = next_no_void_pattern(pattern, item);
1017 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
1018 rte_flow_error_set(error, EINVAL,
1019 RTE_FLOW_ERROR_TYPE_ITEM,
1020 item, "Not supported by syn filter");
1025 /* Get the TCP info. Only support SYN. */
1026 if (!item->spec || !item->mask) {
1027 rte_flow_error_set(error, EINVAL,
1028 RTE_FLOW_ERROR_TYPE_ITEM,
1029 item, "Invalid SYN mask");
1032 /*Not supported last point for range*/
1034 rte_flow_error_set(error, EINVAL,
1035 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1036 item, "Not supported last point for range");
1040 tcp_spec = item->spec;
1041 tcp_mask = item->mask;
1042 if (!(tcp_spec->hdr.tcp_flags & RTE_TCP_SYN_FLAG) ||
1043 tcp_mask->hdr.src_port ||
1044 tcp_mask->hdr.dst_port ||
1045 tcp_mask->hdr.sent_seq ||
1046 tcp_mask->hdr.recv_ack ||
1047 tcp_mask->hdr.data_off ||
1048 tcp_mask->hdr.tcp_flags != RTE_TCP_SYN_FLAG ||
1049 tcp_mask->hdr.rx_win ||
1050 tcp_mask->hdr.cksum ||
1051 tcp_mask->hdr.tcp_urp) {
1052 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1053 rte_flow_error_set(error, EINVAL,
1054 RTE_FLOW_ERROR_TYPE_ITEM,
1055 item, "Not supported by syn filter");
1059 /* check if the next not void item is END */
1060 item = next_no_void_pattern(pattern, item);
1061 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1062 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1063 rte_flow_error_set(error, EINVAL,
1064 RTE_FLOW_ERROR_TYPE_ITEM,
1065 item, "Not supported by syn filter");
1069 /* check if the first not void action is QUEUE. */
1070 act = next_no_void_action(actions, NULL);
1071 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1072 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1073 rte_flow_error_set(error, EINVAL,
1074 RTE_FLOW_ERROR_TYPE_ACTION,
1075 act, "Not supported action.");
1079 act_q = (const struct rte_flow_action_queue *)act->conf;
1080 filter->queue = act_q->index;
1081 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
1082 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1083 rte_flow_error_set(error, EINVAL,
1084 RTE_FLOW_ERROR_TYPE_ACTION,
1085 act, "Not supported action.");
1089 /* check if the next not void item is END */
1090 act = next_no_void_action(actions, act);
1091 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1092 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1093 rte_flow_error_set(error, EINVAL,
1094 RTE_FLOW_ERROR_TYPE_ACTION,
1095 act, "Not supported action.");
1100 /* must be input direction */
1101 if (!attr->ingress) {
1102 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1103 rte_flow_error_set(error, EINVAL,
1104 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1105 attr, "Only support ingress.");
1111 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1112 rte_flow_error_set(error, EINVAL,
1113 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1114 attr, "Not support egress.");
1119 if (attr->transfer) {
1120 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1121 rte_flow_error_set(error, EINVAL,
1122 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1123 attr, "No support for transfer.");
1127 /* Support 2 priorities, the lowest or highest. */
1128 if (!attr->priority) {
1129 filter->hig_pri = 0;
1130 } else if (attr->priority == (uint32_t)~0U) {
1131 filter->hig_pri = 1;
1133 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1134 rte_flow_error_set(error, EINVAL,
1135 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1136 attr, "Not support priority.");
1144 ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
1145 const struct rte_flow_attr *attr,
1146 const struct rte_flow_item pattern[],
1147 const struct rte_flow_action actions[],
1148 struct rte_eth_syn_filter *filter,
1149 struct rte_flow_error *error)
1152 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1154 MAC_TYPE_FILTER_SUP(hw->mac.type);
1156 ret = cons_parse_syn_filter(attr, pattern,
1157 actions, filter, error);
1159 if (filter->queue >= dev->data->nb_rx_queues)
1169 * Parse the rule to see if it is a L2 tunnel rule.
1170 * And get the L2 tunnel filter info BTW.
1171 * Only support E-tag now.
1173 * The first not void item can be E_TAG.
1174 * The next not void item must be END.
1176 * The first not void action should be VF or PF.
1177 * The next not void action should be END.
1181 e_cid_base 0x309 0xFFF
1183 * other members in mask and spec should set to 0x00.
1184 * item->last should be NULL.
1187 cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
1188 const struct rte_flow_attr *attr,
1189 const struct rte_flow_item pattern[],
1190 const struct rte_flow_action actions[],
1191 struct ixgbe_l2_tunnel_conf *filter,
1192 struct rte_flow_error *error)
1194 const struct rte_flow_item *item;
1195 const struct rte_flow_item_e_tag *e_tag_spec;
1196 const struct rte_flow_item_e_tag *e_tag_mask;
1197 const struct rte_flow_action *act;
1198 const struct rte_flow_action_vf *act_vf;
1199 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1202 rte_flow_error_set(error, EINVAL,
1203 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1204 NULL, "NULL pattern.");
1209 rte_flow_error_set(error, EINVAL,
1210 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1211 NULL, "NULL action.");
1216 rte_flow_error_set(error, EINVAL,
1217 RTE_FLOW_ERROR_TYPE_ATTR,
1218 NULL, "NULL attribute.");
1222 /* The first not void item should be e-tag. */
1223 item = next_no_void_pattern(pattern, NULL);
1224 if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1225 memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_conf));
1226 rte_flow_error_set(error, EINVAL,
1227 RTE_FLOW_ERROR_TYPE_ITEM,
1228 item, "Not supported by L2 tunnel filter");
1232 if (!item->spec || !item->mask) {
1233 memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_conf));
1234 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1235 item, "Not supported by L2 tunnel filter");
1239 /*Not supported last point for range*/
1241 rte_flow_error_set(error, EINVAL,
1242 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1243 item, "Not supported last point for range");
1247 e_tag_spec = item->spec;
1248 e_tag_mask = item->mask;
1250 /* Only care about GRP and E cid base. */
1251 if (e_tag_mask->epcp_edei_in_ecid_b ||
1252 e_tag_mask->in_ecid_e ||
1253 e_tag_mask->ecid_e ||
1254 e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1255 memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_conf));
1256 rte_flow_error_set(error, EINVAL,
1257 RTE_FLOW_ERROR_TYPE_ITEM,
1258 item, "Not supported by L2 tunnel filter");
1262 filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1264 * grp and e_cid_base are bit fields and only use 14 bits.
1265 * e-tag id is taken as little endian by HW.
1267 filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1269 /* check if the next not void item is END */
1270 item = next_no_void_pattern(pattern, item);
1271 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1272 memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_conf));
1273 rte_flow_error_set(error, EINVAL,
1274 RTE_FLOW_ERROR_TYPE_ITEM,
1275 item, "Not supported by L2 tunnel filter");
1280 /* must be input direction */
1281 if (!attr->ingress) {
1282 memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_conf));
1283 rte_flow_error_set(error, EINVAL,
1284 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1285 attr, "Only support ingress.");
1291 memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_conf));
1292 rte_flow_error_set(error, EINVAL,
1293 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1294 attr, "Not support egress.");
1299 if (attr->transfer) {
1300 memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_conf));
1301 rte_flow_error_set(error, EINVAL,
1302 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1303 attr, "No support for transfer.");
1308 if (attr->priority) {
1309 memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_conf));
1310 rte_flow_error_set(error, EINVAL,
1311 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1312 attr, "Not support priority.");
1316 /* check if the first not void action is VF or PF. */
1317 act = next_no_void_action(actions, NULL);
1318 if (act->type != RTE_FLOW_ACTION_TYPE_VF &&
1319 act->type != RTE_FLOW_ACTION_TYPE_PF) {
1320 memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_conf));
1321 rte_flow_error_set(error, EINVAL,
1322 RTE_FLOW_ERROR_TYPE_ACTION,
1323 act, "Not supported action.");
1327 if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
1328 act_vf = (const struct rte_flow_action_vf *)act->conf;
1329 filter->pool = act_vf->id;
1331 filter->pool = pci_dev->max_vfs;
1334 /* check if the next not void item is END */
1335 act = next_no_void_action(actions, act);
1336 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1337 memset(filter, 0, sizeof(struct ixgbe_l2_tunnel_conf));
1338 rte_flow_error_set(error, EINVAL,
1339 RTE_FLOW_ERROR_TYPE_ACTION,
1340 act, "Not supported action.");
1348 ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1349 const struct rte_flow_attr *attr,
1350 const struct rte_flow_item pattern[],
1351 const struct rte_flow_action actions[],
1352 struct ixgbe_l2_tunnel_conf *l2_tn_filter,
1353 struct rte_flow_error *error)
1356 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1357 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1360 ret = cons_parse_l2_tn_filter(dev, attr, pattern,
1361 actions, l2_tn_filter, error);
1363 if (hw->mac.type != ixgbe_mac_X550 &&
1364 hw->mac.type != ixgbe_mac_X550EM_x &&
1365 hw->mac.type != ixgbe_mac_X550EM_a) {
1366 memset(l2_tn_filter, 0, sizeof(struct ixgbe_l2_tunnel_conf));
1367 rte_flow_error_set(error, EINVAL,
1368 RTE_FLOW_ERROR_TYPE_ITEM,
1369 NULL, "Not supported by L2 tunnel filter");
1373 vf_num = pci_dev->max_vfs;
1375 if (l2_tn_filter->pool > vf_num)
1381 /* Parse to get the attr and action info of flow director rule. */
1383 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1384 const struct rte_flow_action actions[],
1385 struct ixgbe_fdir_rule *rule,
1386 struct rte_flow_error *error)
1388 const struct rte_flow_action *act;
1389 const struct rte_flow_action_queue *act_q;
1390 const struct rte_flow_action_mark *mark;
1393 /* must be input direction */
1394 if (!attr->ingress) {
1395 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1396 rte_flow_error_set(error, EINVAL,
1397 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1398 attr, "Only support ingress.");
1404 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1405 rte_flow_error_set(error, EINVAL,
1406 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1407 attr, "Not support egress.");
1412 if (attr->transfer) {
1413 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1414 rte_flow_error_set(error, EINVAL,
1415 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1416 attr, "No support for transfer.");
1421 if (attr->priority) {
1422 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1423 rte_flow_error_set(error, EINVAL,
1424 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1425 attr, "Not support priority.");
1429 /* check if the first not void action is QUEUE or DROP. */
1430 act = next_no_void_action(actions, NULL);
1431 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1432 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1433 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1434 rte_flow_error_set(error, EINVAL,
1435 RTE_FLOW_ERROR_TYPE_ACTION,
1436 act, "Not supported action.");
1440 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1441 act_q = (const struct rte_flow_action_queue *)act->conf;
1442 rule->queue = act_q->index;
1444 /* signature mode does not support drop action. */
1445 if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1446 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1447 rte_flow_error_set(error, EINVAL,
1448 RTE_FLOW_ERROR_TYPE_ACTION,
1449 act, "Not supported action.");
1452 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1455 /* check if the next not void item is MARK */
1456 act = next_no_void_action(actions, act);
1457 if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1458 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1459 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1460 rte_flow_error_set(error, EINVAL,
1461 RTE_FLOW_ERROR_TYPE_ACTION,
1462 act, "Not supported action.");
1468 if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1469 mark = (const struct rte_flow_action_mark *)act->conf;
1470 rule->soft_id = mark->id;
1471 act = next_no_void_action(actions, act);
1474 /* check if the next not void item is END */
1475 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1476 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1477 rte_flow_error_set(error, EINVAL,
1478 RTE_FLOW_ERROR_TYPE_ACTION,
1479 act, "Not supported action.");
1486 /* search next no void pattern and skip fuzzy */
1488 const struct rte_flow_item *next_no_fuzzy_pattern(
1489 const struct rte_flow_item pattern[],
1490 const struct rte_flow_item *cur)
1492 const struct rte_flow_item *next =
1493 next_no_void_pattern(pattern, cur);
1495 if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
1497 next = next_no_void_pattern(pattern, next);
1501 static inline uint8_t signature_match(const struct rte_flow_item pattern[])
1503 const struct rte_flow_item_fuzzy *spec, *last, *mask;
1504 const struct rte_flow_item *item;
1505 uint32_t sh, lh, mh;
1510 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1513 if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
1545 * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1546 * And get the flow director filter info BTW.
1547 * UDP/TCP/SCTP PATTERN:
1548 * The first not void item can be ETH or IPV4 or IPV6
1549 * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1550 * The next not void item could be UDP or TCP or SCTP (optional)
1551 * The next not void item could be RAW (for flexbyte, optional)
1552 * The next not void item must be END.
1553 * A Fuzzy Match pattern can appear at any place before END.
1554 * Fuzzy Match is optional for IPV4 but is required for IPV6
1556 * The first not void item must be ETH.
1557 * The second not void item must be MAC VLAN.
1558 * The next not void item must be END.
1560 * The first not void action should be QUEUE or DROP.
1561 * The second not void optional action should be MARK,
1562 * mark_id is a uint32_t number.
1563 * The next not void action should be END.
1564 * UDP/TCP/SCTP pattern example:
1567 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
1568 * dst_addr 192.167.3.50 0xFFFFFFFF
1569 * UDP/TCP/SCTP src_port 80 0xFFFF
1570 * dst_port 80 0xFFFF
1571 * FLEX relative 0 0x1
1574 * offset 12 0xFFFFFFFF
1577 * pattern[0] 0x86 0xFF
1578 * pattern[1] 0xDD 0xFF
1580 * MAC VLAN pattern example:
1583 {0xAC, 0x7B, 0xA1, {0xFF, 0xFF, 0xFF,
1584 0x2C, 0x6D, 0x36} 0xFF, 0xFF, 0xFF}
1585 * MAC VLAN tci 0x2016 0xEFFF
1587 * Other members in mask and spec should set to 0x00.
1588 * Item->last should be NULL.
1591 ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
1592 const struct rte_flow_attr *attr,
1593 const struct rte_flow_item pattern[],
1594 const struct rte_flow_action actions[],
1595 struct ixgbe_fdir_rule *rule,
1596 struct rte_flow_error *error)
1598 const struct rte_flow_item *item;
1599 const struct rte_flow_item_eth *eth_spec;
1600 const struct rte_flow_item_eth *eth_mask;
1601 const struct rte_flow_item_ipv4 *ipv4_spec;
1602 const struct rte_flow_item_ipv4 *ipv4_mask;
1603 const struct rte_flow_item_ipv6 *ipv6_spec;
1604 const struct rte_flow_item_ipv6 *ipv6_mask;
1605 const struct rte_flow_item_tcp *tcp_spec;
1606 const struct rte_flow_item_tcp *tcp_mask;
1607 const struct rte_flow_item_udp *udp_spec;
1608 const struct rte_flow_item_udp *udp_mask;
1609 const struct rte_flow_item_sctp *sctp_spec;
1610 const struct rte_flow_item_sctp *sctp_mask;
1611 const struct rte_flow_item_vlan *vlan_spec;
1612 const struct rte_flow_item_vlan *vlan_mask;
1613 const struct rte_flow_item_raw *raw_mask;
1614 const struct rte_flow_item_raw *raw_spec;
1617 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1620 rte_flow_error_set(error, EINVAL,
1621 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1622 NULL, "NULL pattern.");
1627 rte_flow_error_set(error, EINVAL,
1628 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1629 NULL, "NULL action.");
1634 rte_flow_error_set(error, EINVAL,
1635 RTE_FLOW_ERROR_TYPE_ATTR,
1636 NULL, "NULL attribute.");
1641 * Some fields may not be provided. Set spec to 0 and mask to default
1642 * value. So, we need not do anything for the not provided fields later.
1644 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1645 memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1646 rule->mask.vlan_tci_mask = 0;
1647 rule->mask.flex_bytes_mask = 0;
1650 * The first not void item should be
1651 * MAC or IPv4 or TCP or UDP or SCTP.
1653 item = next_no_fuzzy_pattern(pattern, NULL);
1654 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1655 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1656 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1657 item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1658 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1659 item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1660 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1661 rte_flow_error_set(error, EINVAL,
1662 RTE_FLOW_ERROR_TYPE_ITEM,
1663 item, "Not supported by fdir filter");
1667 if (signature_match(pattern))
1668 rule->mode = RTE_FDIR_MODE_SIGNATURE;
1670 rule->mode = RTE_FDIR_MODE_PERFECT;
1672 /*Not supported last point for range*/
1674 rte_flow_error_set(error, EINVAL,
1675 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1676 item, "Not supported last point for range");
1680 /* Get the MAC info. */
1681 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1683 * Only support vlan and dst MAC address,
1684 * others should be masked.
1686 if (item->spec && !item->mask) {
1687 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1688 rte_flow_error_set(error, EINVAL,
1689 RTE_FLOW_ERROR_TYPE_ITEM,
1690 item, "Not supported by fdir filter");
1695 rule->b_spec = TRUE;
1696 eth_spec = item->spec;
1698 /* Get the dst MAC. */
1699 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
1700 rule->ixgbe_fdir.formatted.inner_mac[j] =
1701 eth_spec->dst.addr_bytes[j];
1708 rule->b_mask = TRUE;
1709 eth_mask = item->mask;
1711 /* Ether type should be masked. */
1712 if (eth_mask->type ||
1713 rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1714 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1715 rte_flow_error_set(error, EINVAL,
1716 RTE_FLOW_ERROR_TYPE_ITEM,
1717 item, "Not supported by fdir filter");
1721 /* If ethernet has meaning, it means MAC VLAN mode. */
1722 rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1725 * src MAC address must be masked,
1726 * and don't support dst MAC address mask.
1728 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
1729 if (eth_mask->src.addr_bytes[j] ||
1730 eth_mask->dst.addr_bytes[j] != 0xFF) {
1732 sizeof(struct ixgbe_fdir_rule));
1733 rte_flow_error_set(error, EINVAL,
1734 RTE_FLOW_ERROR_TYPE_ITEM,
1735 item, "Not supported by fdir filter");
1740 /* When no VLAN, considered as full mask. */
1741 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1743 /*** If both spec and mask are item,
1744 * it means don't care about ETH.
1749 * Check if the next not void item is vlan or ipv4.
1750 * IPv6 is not supported.
1752 item = next_no_fuzzy_pattern(pattern, item);
1753 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1754 if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1755 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1756 rte_flow_error_set(error, EINVAL,
1757 RTE_FLOW_ERROR_TYPE_ITEM,
1758 item, "Not supported by fdir filter");
1762 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1763 item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1764 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1765 rte_flow_error_set(error, EINVAL,
1766 RTE_FLOW_ERROR_TYPE_ITEM,
1767 item, "Not supported by fdir filter");
1773 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1774 if (!(item->spec && item->mask)) {
1775 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1776 rte_flow_error_set(error, EINVAL,
1777 RTE_FLOW_ERROR_TYPE_ITEM,
1778 item, "Not supported by fdir filter");
1782 /*Not supported last point for range*/
1784 rte_flow_error_set(error, EINVAL,
1785 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1786 item, "Not supported last point for range");
1790 vlan_spec = item->spec;
1791 vlan_mask = item->mask;
1793 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1795 rule->mask.vlan_tci_mask = vlan_mask->tci;
1796 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
1797 /* More than one tags are not supported. */
1799 /* Next not void item must be END */
1800 item = next_no_fuzzy_pattern(pattern, item);
1801 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1802 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1803 rte_flow_error_set(error, EINVAL,
1804 RTE_FLOW_ERROR_TYPE_ITEM,
1805 item, "Not supported by fdir filter");
1810 /* Get the IPV4 info. */
1811 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1813 * Set the flow type even if there's no content
1814 * as we must have a flow type.
1816 rule->ixgbe_fdir.formatted.flow_type =
1817 IXGBE_ATR_FLOW_TYPE_IPV4;
1818 /*Not supported last point for range*/
1820 rte_flow_error_set(error, EINVAL,
1821 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1822 item, "Not supported last point for range");
1826 * Only care about src & dst addresses,
1827 * others should be masked.
1830 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1831 rte_flow_error_set(error, EINVAL,
1832 RTE_FLOW_ERROR_TYPE_ITEM,
1833 item, "Not supported by fdir filter");
1836 rule->b_mask = TRUE;
1837 ipv4_mask = item->mask;
1838 if (ipv4_mask->hdr.version_ihl ||
1839 ipv4_mask->hdr.type_of_service ||
1840 ipv4_mask->hdr.total_length ||
1841 ipv4_mask->hdr.packet_id ||
1842 ipv4_mask->hdr.fragment_offset ||
1843 ipv4_mask->hdr.time_to_live ||
1844 ipv4_mask->hdr.next_proto_id ||
1845 ipv4_mask->hdr.hdr_checksum) {
1846 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1847 rte_flow_error_set(error, EINVAL,
1848 RTE_FLOW_ERROR_TYPE_ITEM,
1849 item, "Not supported by fdir filter");
1852 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1853 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1856 rule->b_spec = TRUE;
1857 ipv4_spec = item->spec;
1858 rule->ixgbe_fdir.formatted.dst_ip[0] =
1859 ipv4_spec->hdr.dst_addr;
1860 rule->ixgbe_fdir.formatted.src_ip[0] =
1861 ipv4_spec->hdr.src_addr;
1865 * Check if the next not void item is
1866 * TCP or UDP or SCTP or END.
1868 item = next_no_fuzzy_pattern(pattern, item);
1869 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1870 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1871 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1872 item->type != RTE_FLOW_ITEM_TYPE_END &&
1873 item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1874 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1875 rte_flow_error_set(error, EINVAL,
1876 RTE_FLOW_ERROR_TYPE_ITEM,
1877 item, "Not supported by fdir filter");
1882 /* Get the IPV6 info. */
1883 if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1885 * Set the flow type even if there's no content
1886 * as we must have a flow type.
1888 rule->ixgbe_fdir.formatted.flow_type =
1889 IXGBE_ATR_FLOW_TYPE_IPV6;
1892 * 1. must signature match
1893 * 2. not support last
1894 * 3. mask must not null
1896 if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
1899 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1900 rte_flow_error_set(error, EINVAL,
1901 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1902 item, "Not supported last point for range");
1906 rule->b_mask = TRUE;
1907 ipv6_mask = item->mask;
1908 if (ipv6_mask->hdr.vtc_flow ||
1909 ipv6_mask->hdr.payload_len ||
1910 ipv6_mask->hdr.proto ||
1911 ipv6_mask->hdr.hop_limits) {
1912 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1913 rte_flow_error_set(error, EINVAL,
1914 RTE_FLOW_ERROR_TYPE_ITEM,
1915 item, "Not supported by fdir filter");
1919 /* check src addr mask */
1920 for (j = 0; j < 16; j++) {
1921 if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
1922 rule->mask.src_ipv6_mask |= 1 << j;
1923 } else if (ipv6_mask->hdr.src_addr[j] != 0) {
1924 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1925 rte_flow_error_set(error, EINVAL,
1926 RTE_FLOW_ERROR_TYPE_ITEM,
1927 item, "Not supported by fdir filter");
1932 /* check dst addr mask */
1933 for (j = 0; j < 16; j++) {
1934 if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
1935 rule->mask.dst_ipv6_mask |= 1 << j;
1936 } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
1937 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1938 rte_flow_error_set(error, EINVAL,
1939 RTE_FLOW_ERROR_TYPE_ITEM,
1940 item, "Not supported by fdir filter");
1946 rule->b_spec = TRUE;
1947 ipv6_spec = item->spec;
1948 rte_memcpy(rule->ixgbe_fdir.formatted.src_ip,
1949 ipv6_spec->hdr.src_addr, 16);
1950 rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip,
1951 ipv6_spec->hdr.dst_addr, 16);
1955 * Check if the next not void item is
1956 * TCP or UDP or SCTP or END.
1958 item = next_no_fuzzy_pattern(pattern, item);
1959 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1960 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1961 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1962 item->type != RTE_FLOW_ITEM_TYPE_END &&
1963 item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1964 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1965 rte_flow_error_set(error, EINVAL,
1966 RTE_FLOW_ERROR_TYPE_ITEM,
1967 item, "Not supported by fdir filter");
1972 /* Get the TCP info. */
1973 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1975 * Set the flow type even if there's no content
1976 * as we must have a flow type.
1978 rule->ixgbe_fdir.formatted.flow_type |=
1979 IXGBE_ATR_L4TYPE_TCP;
1980 /*Not supported last point for range*/
1982 rte_flow_error_set(error, EINVAL,
1983 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1984 item, "Not supported last point for range");
1988 * Only care about src & dst ports,
1989 * others should be masked.
1992 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1993 rte_flow_error_set(error, EINVAL,
1994 RTE_FLOW_ERROR_TYPE_ITEM,
1995 item, "Not supported by fdir filter");
1998 rule->b_mask = TRUE;
1999 tcp_mask = item->mask;
2000 if (tcp_mask->hdr.sent_seq ||
2001 tcp_mask->hdr.recv_ack ||
2002 tcp_mask->hdr.data_off ||
2003 tcp_mask->hdr.tcp_flags ||
2004 tcp_mask->hdr.rx_win ||
2005 tcp_mask->hdr.cksum ||
2006 tcp_mask->hdr.tcp_urp) {
2007 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2008 rte_flow_error_set(error, EINVAL,
2009 RTE_FLOW_ERROR_TYPE_ITEM,
2010 item, "Not supported by fdir filter");
2013 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
2014 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
2017 rule->b_spec = TRUE;
2018 tcp_spec = item->spec;
2019 rule->ixgbe_fdir.formatted.src_port =
2020 tcp_spec->hdr.src_port;
2021 rule->ixgbe_fdir.formatted.dst_port =
2022 tcp_spec->hdr.dst_port;
2025 item = next_no_fuzzy_pattern(pattern, item);
2026 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
2027 item->type != RTE_FLOW_ITEM_TYPE_END) {
2028 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2029 rte_flow_error_set(error, EINVAL,
2030 RTE_FLOW_ERROR_TYPE_ITEM,
2031 item, "Not supported by fdir filter");
2037 /* Get the UDP info */
2038 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2040 * Set the flow type even if there's no content
2041 * as we must have a flow type.
2043 rule->ixgbe_fdir.formatted.flow_type |=
2044 IXGBE_ATR_L4TYPE_UDP;
2045 /*Not supported last point for range*/
2047 rte_flow_error_set(error, EINVAL,
2048 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2049 item, "Not supported last point for range");
2053 * Only care about src & dst ports,
2054 * others should be masked.
2057 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2058 rte_flow_error_set(error, EINVAL,
2059 RTE_FLOW_ERROR_TYPE_ITEM,
2060 item, "Not supported by fdir filter");
2063 rule->b_mask = TRUE;
2064 udp_mask = item->mask;
2065 if (udp_mask->hdr.dgram_len ||
2066 udp_mask->hdr.dgram_cksum) {
2067 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2068 rte_flow_error_set(error, EINVAL,
2069 RTE_FLOW_ERROR_TYPE_ITEM,
2070 item, "Not supported by fdir filter");
2073 rule->mask.src_port_mask = udp_mask->hdr.src_port;
2074 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
2077 rule->b_spec = TRUE;
2078 udp_spec = item->spec;
2079 rule->ixgbe_fdir.formatted.src_port =
2080 udp_spec->hdr.src_port;
2081 rule->ixgbe_fdir.formatted.dst_port =
2082 udp_spec->hdr.dst_port;
2085 item = next_no_fuzzy_pattern(pattern, item);
2086 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
2087 item->type != RTE_FLOW_ITEM_TYPE_END) {
2088 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2089 rte_flow_error_set(error, EINVAL,
2090 RTE_FLOW_ERROR_TYPE_ITEM,
2091 item, "Not supported by fdir filter");
2097 /* Get the SCTP info */
2098 if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
2100 * Set the flow type even if there's no content
2101 * as we must have a flow type.
2103 rule->ixgbe_fdir.formatted.flow_type |=
2104 IXGBE_ATR_L4TYPE_SCTP;
2105 /*Not supported last point for range*/
2107 rte_flow_error_set(error, EINVAL,
2108 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2109 item, "Not supported last point for range");
2113 /* only x550 family only support sctp port */
2114 if (hw->mac.type == ixgbe_mac_X550 ||
2115 hw->mac.type == ixgbe_mac_X550EM_x ||
2116 hw->mac.type == ixgbe_mac_X550EM_a) {
2118 * Only care about src & dst ports,
2119 * others should be masked.
2122 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2123 rte_flow_error_set(error, EINVAL,
2124 RTE_FLOW_ERROR_TYPE_ITEM,
2125 item, "Not supported by fdir filter");
2128 rule->b_mask = TRUE;
2129 sctp_mask = item->mask;
2130 if (sctp_mask->hdr.tag ||
2131 sctp_mask->hdr.cksum) {
2132 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2133 rte_flow_error_set(error, EINVAL,
2134 RTE_FLOW_ERROR_TYPE_ITEM,
2135 item, "Not supported by fdir filter");
2138 rule->mask.src_port_mask = sctp_mask->hdr.src_port;
2139 rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
2142 rule->b_spec = TRUE;
2143 sctp_spec = item->spec;
2144 rule->ixgbe_fdir.formatted.src_port =
2145 sctp_spec->hdr.src_port;
2146 rule->ixgbe_fdir.formatted.dst_port =
2147 sctp_spec->hdr.dst_port;
2149 /* others even sctp port is not supported */
2151 sctp_mask = item->mask;
2153 (sctp_mask->hdr.src_port ||
2154 sctp_mask->hdr.dst_port ||
2155 sctp_mask->hdr.tag ||
2156 sctp_mask->hdr.cksum)) {
2157 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2158 rte_flow_error_set(error, EINVAL,
2159 RTE_FLOW_ERROR_TYPE_ITEM,
2160 item, "Not supported by fdir filter");
2165 item = next_no_fuzzy_pattern(pattern, item);
2166 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
2167 item->type != RTE_FLOW_ITEM_TYPE_END) {
2168 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2169 rte_flow_error_set(error, EINVAL,
2170 RTE_FLOW_ERROR_TYPE_ITEM,
2171 item, "Not supported by fdir filter");
2176 /* Get the flex byte info */
2177 if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
2178 /* Not supported last point for range*/
2180 rte_flow_error_set(error, EINVAL,
2181 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2182 item, "Not supported last point for range");
2185 /* mask should not be null */
2186 if (!item->mask || !item->spec) {
2187 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2188 rte_flow_error_set(error, EINVAL,
2189 RTE_FLOW_ERROR_TYPE_ITEM,
2190 item, "Not supported by fdir filter");
2194 raw_mask = item->mask;
2197 if (raw_mask->relative != 0x1 ||
2198 raw_mask->search != 0x1 ||
2199 raw_mask->reserved != 0x0 ||
2200 (uint32_t)raw_mask->offset != 0xffffffff ||
2201 raw_mask->limit != 0xffff ||
2202 raw_mask->length != 0xffff) {
2203 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2204 rte_flow_error_set(error, EINVAL,
2205 RTE_FLOW_ERROR_TYPE_ITEM,
2206 item, "Not supported by fdir filter");
2210 raw_spec = item->spec;
2213 if (raw_spec->relative != 0 ||
2214 raw_spec->search != 0 ||
2215 raw_spec->reserved != 0 ||
2216 raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
2217 raw_spec->offset % 2 ||
2218 raw_spec->limit != 0 ||
2219 raw_spec->length != 2 ||
2220 /* pattern can't be 0xffff */
2221 (raw_spec->pattern[0] == 0xff &&
2222 raw_spec->pattern[1] == 0xff)) {
2223 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2224 rte_flow_error_set(error, EINVAL,
2225 RTE_FLOW_ERROR_TYPE_ITEM,
2226 item, "Not supported by fdir filter");
2230 /* check pattern mask */
2231 if (raw_mask->pattern[0] != 0xff ||
2232 raw_mask->pattern[1] != 0xff) {
2233 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2234 rte_flow_error_set(error, EINVAL,
2235 RTE_FLOW_ERROR_TYPE_ITEM,
2236 item, "Not supported by fdir filter");
2240 rule->mask.flex_bytes_mask = 0xffff;
2241 rule->ixgbe_fdir.formatted.flex_bytes =
2242 (((uint16_t)raw_spec->pattern[1]) << 8) |
2243 raw_spec->pattern[0];
2244 rule->flex_bytes_offset = raw_spec->offset;
2247 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2248 /* check if the next not void item is END */
2249 item = next_no_fuzzy_pattern(pattern, item);
2250 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2251 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2252 rte_flow_error_set(error, EINVAL,
2253 RTE_FLOW_ERROR_TYPE_ITEM,
2254 item, "Not supported by fdir filter");
2259 return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2262 #define NVGRE_PROTOCOL 0x6558
2265 * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
2266 * And get the flow director filter info BTW.
2268 * The first not void item must be ETH.
2269 * The second not void item must be IPV4/ IPV6.
2270 * The third not void item must be NVGRE.
2271 * The next not void item must be END.
2273 * The first not void item must be ETH.
2274 * The second not void item must be IPV4/ IPV6.
2275 * The third not void item must be NVGRE.
2276 * The next not void item must be END.
2278 * The first not void action should be QUEUE or DROP.
2279 * The second not void optional action should be MARK,
2280 * mark_id is a uint32_t number.
2281 * The next not void action should be END.
2282 * VxLAN pattern example:
2285 * IPV4/IPV6 NULL NULL
2287 * VxLAN vni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
2288 * MAC VLAN tci 0x2016 0xEFFF
2290 * NEGRV pattern example:
2293 * IPV4/IPV6 NULL NULL
2294 * NVGRE protocol 0x6558 0xFFFF
2295 * tni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
2296 * MAC VLAN tci 0x2016 0xEFFF
2298 * other members in mask and spec should set to 0x00.
2299 * item->last should be NULL.
2302 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
2303 const struct rte_flow_item pattern[],
2304 const struct rte_flow_action actions[],
2305 struct ixgbe_fdir_rule *rule,
2306 struct rte_flow_error *error)
2308 const struct rte_flow_item *item;
2309 const struct rte_flow_item_vxlan *vxlan_spec;
2310 const struct rte_flow_item_vxlan *vxlan_mask;
2311 const struct rte_flow_item_nvgre *nvgre_spec;
2312 const struct rte_flow_item_nvgre *nvgre_mask;
2313 const struct rte_flow_item_eth *eth_spec;
2314 const struct rte_flow_item_eth *eth_mask;
2315 const struct rte_flow_item_vlan *vlan_spec;
2316 const struct rte_flow_item_vlan *vlan_mask;
2320 rte_flow_error_set(error, EINVAL,
2321 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2322 NULL, "NULL pattern.");
2327 rte_flow_error_set(error, EINVAL,
2328 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2329 NULL, "NULL action.");
2334 rte_flow_error_set(error, EINVAL,
2335 RTE_FLOW_ERROR_TYPE_ATTR,
2336 NULL, "NULL attribute.");
2341 * Some fields may not be provided. Set spec to 0 and mask to default
2342 * value. So, we need not do anything for the not provided fields later.
2344 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2345 memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
2346 rule->mask.vlan_tci_mask = 0;
2349 * The first not void item should be
2350 * MAC or IPv4 or IPv6 or UDP or VxLAN.
2352 item = next_no_void_pattern(pattern, NULL);
2353 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
2354 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2355 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
2356 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2357 item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
2358 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2359 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2360 rte_flow_error_set(error, EINVAL,
2361 RTE_FLOW_ERROR_TYPE_ITEM,
2362 item, "Not supported by fdir filter");
2366 rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
2369 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
2370 /* Only used to describe the protocol stack. */
2371 if (item->spec || item->mask) {
2372 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2373 rte_flow_error_set(error, EINVAL,
2374 RTE_FLOW_ERROR_TYPE_ITEM,
2375 item, "Not supported by fdir filter");
2378 /* Not supported last point for range*/
2380 rte_flow_error_set(error, EINVAL,
2381 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2382 item, "Not supported last point for range");
2386 /* Check if the next not void item is IPv4 or IPv6. */
2387 item = next_no_void_pattern(pattern, item);
2388 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2389 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2390 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2391 rte_flow_error_set(error, EINVAL,
2392 RTE_FLOW_ERROR_TYPE_ITEM,
2393 item, "Not supported by fdir filter");
2399 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2400 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2401 /* Only used to describe the protocol stack. */
2402 if (item->spec || item->mask) {
2403 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2404 rte_flow_error_set(error, EINVAL,
2405 RTE_FLOW_ERROR_TYPE_ITEM,
2406 item, "Not supported by fdir filter");
2409 /*Not supported last point for range*/
2411 rte_flow_error_set(error, EINVAL,
2412 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2413 item, "Not supported last point for range");
2417 /* Check if the next not void item is UDP or NVGRE. */
2418 item = next_no_void_pattern(pattern, item);
2419 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2420 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2421 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2422 rte_flow_error_set(error, EINVAL,
2423 RTE_FLOW_ERROR_TYPE_ITEM,
2424 item, "Not supported by fdir filter");
2430 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2431 /* Only used to describe the protocol stack. */
2432 if (item->spec || item->mask) {
2433 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2434 rte_flow_error_set(error, EINVAL,
2435 RTE_FLOW_ERROR_TYPE_ITEM,
2436 item, "Not supported by fdir filter");
2439 /*Not supported last point for range*/
2441 rte_flow_error_set(error, EINVAL,
2442 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2443 item, "Not supported last point for range");
2447 /* Check if the next not void item is VxLAN. */
2448 item = next_no_void_pattern(pattern, item);
2449 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2450 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2451 rte_flow_error_set(error, EINVAL,
2452 RTE_FLOW_ERROR_TYPE_ITEM,
2453 item, "Not supported by fdir filter");
2458 /* Get the VxLAN info */
2459 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2460 rule->ixgbe_fdir.formatted.tunnel_type =
2461 IXGBE_FDIR_VXLAN_TUNNEL_TYPE;
2463 /* Only care about VNI, others should be masked. */
2465 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2466 rte_flow_error_set(error, EINVAL,
2467 RTE_FLOW_ERROR_TYPE_ITEM,
2468 item, "Not supported by fdir filter");
2471 /*Not supported last point for range*/
2473 rte_flow_error_set(error, EINVAL,
2474 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2475 item, "Not supported last point for range");
2478 rule->b_mask = TRUE;
2480 /* Tunnel type is always meaningful. */
2481 rule->mask.tunnel_type_mask = 1;
2483 vxlan_mask = item->mask;
2484 if (vxlan_mask->flags) {
2485 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2486 rte_flow_error_set(error, EINVAL,
2487 RTE_FLOW_ERROR_TYPE_ITEM,
2488 item, "Not supported by fdir filter");
2491 /* VNI must be totally masked or not. */
2492 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2493 vxlan_mask->vni[2]) &&
2494 ((vxlan_mask->vni[0] != 0xFF) ||
2495 (vxlan_mask->vni[1] != 0xFF) ||
2496 (vxlan_mask->vni[2] != 0xFF))) {
2497 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2498 rte_flow_error_set(error, EINVAL,
2499 RTE_FLOW_ERROR_TYPE_ITEM,
2500 item, "Not supported by fdir filter");
2504 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2505 RTE_DIM(vxlan_mask->vni));
2508 rule->b_spec = TRUE;
2509 vxlan_spec = item->spec;
2510 rte_memcpy(((uint8_t *)
2511 &rule->ixgbe_fdir.formatted.tni_vni),
2512 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2516 /* Get the NVGRE info */
2517 if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2518 rule->ixgbe_fdir.formatted.tunnel_type =
2519 IXGBE_FDIR_NVGRE_TUNNEL_TYPE;
2522 * Only care about flags0, flags1, protocol and TNI,
2523 * others should be masked.
2526 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2527 rte_flow_error_set(error, EINVAL,
2528 RTE_FLOW_ERROR_TYPE_ITEM,
2529 item, "Not supported by fdir filter");
2532 /*Not supported last point for range*/
2534 rte_flow_error_set(error, EINVAL,
2535 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2536 item, "Not supported last point for range");
2539 rule->b_mask = TRUE;
2541 /* Tunnel type is always meaningful. */
2542 rule->mask.tunnel_type_mask = 1;
2544 nvgre_mask = item->mask;
2545 if (nvgre_mask->flow_id) {
2546 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2547 rte_flow_error_set(error, EINVAL,
2548 RTE_FLOW_ERROR_TYPE_ITEM,
2549 item, "Not supported by fdir filter");
2552 if (nvgre_mask->protocol &&
2553 nvgre_mask->protocol != 0xFFFF) {
2554 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2555 rte_flow_error_set(error, EINVAL,
2556 RTE_FLOW_ERROR_TYPE_ITEM,
2557 item, "Not supported by fdir filter");
2560 if (nvgre_mask->c_k_s_rsvd0_ver &&
2561 nvgre_mask->c_k_s_rsvd0_ver !=
2562 rte_cpu_to_be_16(0xFFFF)) {
2563 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2564 rte_flow_error_set(error, EINVAL,
2565 RTE_FLOW_ERROR_TYPE_ITEM,
2566 item, "Not supported by fdir filter");
2569 /* TNI must be totally masked or not. */
2570 if (nvgre_mask->tni[0] &&
2571 ((nvgre_mask->tni[0] != 0xFF) ||
2572 (nvgre_mask->tni[1] != 0xFF) ||
2573 (nvgre_mask->tni[2] != 0xFF))) {
2574 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2575 rte_flow_error_set(error, EINVAL,
2576 RTE_FLOW_ERROR_TYPE_ITEM,
2577 item, "Not supported by fdir filter");
2580 /* tni is a 24-bits bit field */
2581 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2582 RTE_DIM(nvgre_mask->tni));
2583 rule->mask.tunnel_id_mask <<= 8;
2586 rule->b_spec = TRUE;
2587 nvgre_spec = item->spec;
2588 if (nvgre_spec->c_k_s_rsvd0_ver !=
2589 rte_cpu_to_be_16(0x2000) &&
2590 nvgre_mask->c_k_s_rsvd0_ver) {
2591 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2592 rte_flow_error_set(error, EINVAL,
2593 RTE_FLOW_ERROR_TYPE_ITEM,
2594 item, "Not supported by fdir filter");
2597 if (nvgre_mask->protocol &&
2598 nvgre_spec->protocol !=
2599 rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2600 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2601 rte_flow_error_set(error, EINVAL,
2602 RTE_FLOW_ERROR_TYPE_ITEM,
2603 item, "Not supported by fdir filter");
2606 /* tni is a 24-bits bit field */
2607 rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2608 nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2612 /* check if the next not void item is MAC */
2613 item = next_no_void_pattern(pattern, item);
2614 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2615 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2616 rte_flow_error_set(error, EINVAL,
2617 RTE_FLOW_ERROR_TYPE_ITEM,
2618 item, "Not supported by fdir filter");
2623 * Only support vlan and dst MAC address,
2624 * others should be masked.
2628 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2629 rte_flow_error_set(error, EINVAL,
2630 RTE_FLOW_ERROR_TYPE_ITEM,
2631 item, "Not supported by fdir filter");
2634 /*Not supported last point for range*/
2636 rte_flow_error_set(error, EINVAL,
2637 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2638 item, "Not supported last point for range");
2641 rule->b_mask = TRUE;
2642 eth_mask = item->mask;
2644 /* Ether type should be masked. */
2645 if (eth_mask->type) {
2646 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2647 rte_flow_error_set(error, EINVAL,
2648 RTE_FLOW_ERROR_TYPE_ITEM,
2649 item, "Not supported by fdir filter");
2653 /* src MAC address should be masked. */
2654 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
2655 if (eth_mask->src.addr_bytes[j]) {
2657 sizeof(struct ixgbe_fdir_rule));
2658 rte_flow_error_set(error, EINVAL,
2659 RTE_FLOW_ERROR_TYPE_ITEM,
2660 item, "Not supported by fdir filter");
2664 rule->mask.mac_addr_byte_mask = 0;
2665 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
2666 /* It's a per byte mask. */
2667 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2668 rule->mask.mac_addr_byte_mask |= 0x1 << j;
2669 } else if (eth_mask->dst.addr_bytes[j]) {
2670 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2671 rte_flow_error_set(error, EINVAL,
2672 RTE_FLOW_ERROR_TYPE_ITEM,
2673 item, "Not supported by fdir filter");
2678 /* When no vlan, considered as full mask. */
2679 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2682 rule->b_spec = TRUE;
2683 eth_spec = item->spec;
2685 /* Get the dst MAC. */
2686 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
2687 rule->ixgbe_fdir.formatted.inner_mac[j] =
2688 eth_spec->dst.addr_bytes[j];
2693 * Check if the next not void item is vlan or ipv4.
2694 * IPv6 is not supported.
2696 item = next_no_void_pattern(pattern, item);
2697 if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2698 (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
2699 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2700 rte_flow_error_set(error, EINVAL,
2701 RTE_FLOW_ERROR_TYPE_ITEM,
2702 item, "Not supported by fdir filter");
2705 /*Not supported last point for range*/
2707 rte_flow_error_set(error, EINVAL,
2708 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2709 item, "Not supported last point for range");
2713 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2714 if (!(item->spec && item->mask)) {
2715 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2716 rte_flow_error_set(error, EINVAL,
2717 RTE_FLOW_ERROR_TYPE_ITEM,
2718 item, "Not supported by fdir filter");
2722 vlan_spec = item->spec;
2723 vlan_mask = item->mask;
2725 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2727 rule->mask.vlan_tci_mask = vlan_mask->tci;
2728 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
2729 /* More than one tags are not supported. */
2731 /* check if the next not void item is END */
2732 item = next_no_void_pattern(pattern, item);
2734 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2735 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2736 rte_flow_error_set(error, EINVAL,
2737 RTE_FLOW_ERROR_TYPE_ITEM,
2738 item, "Not supported by fdir filter");
2744 * If the tags is 0, it means don't care about the VLAN.
2748 return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2752 ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2753 const struct rte_flow_attr *attr,
2754 const struct rte_flow_item pattern[],
2755 const struct rte_flow_action actions[],
2756 struct ixgbe_fdir_rule *rule,
2757 struct rte_flow_error *error)
2760 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2761 enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2763 if (hw->mac.type != ixgbe_mac_82599EB &&
2764 hw->mac.type != ixgbe_mac_X540 &&
2765 hw->mac.type != ixgbe_mac_X550 &&
2766 hw->mac.type != ixgbe_mac_X550EM_x &&
2767 hw->mac.type != ixgbe_mac_X550EM_a)
2770 ret = ixgbe_parse_fdir_filter_normal(dev, attr, pattern,
2771 actions, rule, error);
2776 ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2777 actions, rule, error);
2784 if (hw->mac.type == ixgbe_mac_82599EB &&
2785 rule->fdirflags == IXGBE_FDIRCMD_DROP &&
2786 (rule->ixgbe_fdir.formatted.src_port != 0 ||
2787 rule->ixgbe_fdir.formatted.dst_port != 0))
2790 if (fdir_mode == RTE_FDIR_MODE_NONE ||
2791 fdir_mode != rule->mode)
2794 if (rule->queue >= dev->data->nb_rx_queues)
2801 ixgbe_parse_rss_filter(struct rte_eth_dev *dev,
2802 const struct rte_flow_attr *attr,
2803 const struct rte_flow_action actions[],
2804 struct ixgbe_rte_flow_rss_conf *rss_conf,
2805 struct rte_flow_error *error)
2807 const struct rte_flow_action *act;
2808 const struct rte_flow_action_rss *rss;
2812 * rss only supports forwarding,
2813 * check if the first not void action is RSS.
2815 act = next_no_void_action(actions, NULL);
2816 if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
2817 memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
2818 rte_flow_error_set(error, EINVAL,
2819 RTE_FLOW_ERROR_TYPE_ACTION,
2820 act, "Not supported action.");
2824 rss = (const struct rte_flow_action_rss *)act->conf;
2826 if (!rss || !rss->queue_num) {
2827 rte_flow_error_set(error, EINVAL,
2828 RTE_FLOW_ERROR_TYPE_ACTION,
2834 for (n = 0; n < rss->queue_num; n++) {
2835 if (rss->queue[n] >= dev->data->nb_rx_queues) {
2836 rte_flow_error_set(error, EINVAL,
2837 RTE_FLOW_ERROR_TYPE_ACTION,
2839 "queue id > max number of queues");
2844 if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
2845 return rte_flow_error_set
2846 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2847 "non-default RSS hash functions are not supported");
2849 return rte_flow_error_set
2850 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2851 "a nonzero RSS encapsulation level is not supported");
2852 if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
2853 return rte_flow_error_set
2854 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2855 "RSS hash key must be exactly 40 bytes");
2856 if (rss->queue_num > RTE_DIM(rss_conf->queue))
2857 return rte_flow_error_set
2858 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2859 "too many queues for RSS context");
2860 if (ixgbe_rss_conf_init(rss_conf, rss))
2861 return rte_flow_error_set
2862 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
2863 "RSS context initialization failure");
2865 /* check if the next not void item is END */
2866 act = next_no_void_action(actions, act);
2867 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
2868 memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
2869 rte_flow_error_set(error, EINVAL,
2870 RTE_FLOW_ERROR_TYPE_ACTION,
2871 act, "Not supported action.");
2876 /* must be input direction */
2877 if (!attr->ingress) {
2878 memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
2879 rte_flow_error_set(error, EINVAL,
2880 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
2881 attr, "Only support ingress.");
2887 memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
2888 rte_flow_error_set(error, EINVAL,
2889 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2890 attr, "Not support egress.");
2895 if (attr->transfer) {
2896 memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
2897 rte_flow_error_set(error, EINVAL,
2898 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
2899 attr, "No support for transfer.");
2903 if (attr->priority > 0xFFFF) {
2904 memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
2905 rte_flow_error_set(error, EINVAL,
2906 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
2907 attr, "Error priority.");
2914 /* remove the rss filter */
2916 ixgbe_clear_rss_filter(struct rte_eth_dev *dev)
2918 struct ixgbe_filter_info *filter_info =
2919 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2921 if (filter_info->rss_info.conf.queue_num)
2922 ixgbe_config_rss_filter(dev, &filter_info->rss_info, FALSE);
2926 ixgbe_filterlist_init(void)
2928 TAILQ_INIT(&filter_ntuple_list);
2929 TAILQ_INIT(&filter_ethertype_list);
2930 TAILQ_INIT(&filter_syn_list);
2931 TAILQ_INIT(&filter_fdir_list);
2932 TAILQ_INIT(&filter_l2_tunnel_list);
2933 TAILQ_INIT(&filter_rss_list);
2934 TAILQ_INIT(&ixgbe_flow_list);
2938 ixgbe_filterlist_flush(void)
2940 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2941 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2942 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2943 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2944 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2945 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2946 struct ixgbe_rss_conf_ele *rss_filter_ptr;
2948 while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2949 TAILQ_REMOVE(&filter_ntuple_list,
2952 rte_free(ntuple_filter_ptr);
2955 while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2956 TAILQ_REMOVE(&filter_ethertype_list,
2957 ethertype_filter_ptr,
2959 rte_free(ethertype_filter_ptr);
2962 while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2963 TAILQ_REMOVE(&filter_syn_list,
2966 rte_free(syn_filter_ptr);
2969 while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2970 TAILQ_REMOVE(&filter_l2_tunnel_list,
2973 rte_free(l2_tn_filter_ptr);
2976 while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2977 TAILQ_REMOVE(&filter_fdir_list,
2980 rte_free(fdir_rule_ptr);
2983 while ((rss_filter_ptr = TAILQ_FIRST(&filter_rss_list))) {
2984 TAILQ_REMOVE(&filter_rss_list,
2987 rte_free(rss_filter_ptr);
2990 while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
2991 TAILQ_REMOVE(&ixgbe_flow_list,
2994 rte_free(ixgbe_flow_mem_ptr->flow);
2995 rte_free(ixgbe_flow_mem_ptr);
3000 * Create or destroy a flow rule.
3001 * Theorically one rule can match more than one filters.
3002 * We will let it use the filter which it hitt first.
3003 * So, the sequence matters.
3005 static struct rte_flow *
3006 ixgbe_flow_create(struct rte_eth_dev *dev,
3007 const struct rte_flow_attr *attr,
3008 const struct rte_flow_item pattern[],
3009 const struct rte_flow_action actions[],
3010 struct rte_flow_error *error)
3013 struct rte_eth_ntuple_filter ntuple_filter;
3014 struct rte_eth_ethertype_filter ethertype_filter;
3015 struct rte_eth_syn_filter syn_filter;
3016 struct ixgbe_fdir_rule fdir_rule;
3017 struct ixgbe_l2_tunnel_conf l2_tn_filter;
3018 struct ixgbe_hw_fdir_info *fdir_info =
3019 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
3020 struct ixgbe_rte_flow_rss_conf rss_conf;
3021 struct rte_flow *flow = NULL;
3022 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
3023 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
3024 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
3025 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
3026 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
3027 struct ixgbe_rss_conf_ele *rss_filter_ptr;
3028 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
3029 uint8_t first_mask = FALSE;
3031 flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
3033 PMD_DRV_LOG(ERR, "failed to allocate memory");
3034 return (struct rte_flow *)flow;
3036 ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
3037 sizeof(struct ixgbe_flow_mem), 0);
3038 if (!ixgbe_flow_mem_ptr) {
3039 PMD_DRV_LOG(ERR, "failed to allocate memory");
3043 ixgbe_flow_mem_ptr->flow = flow;
3044 TAILQ_INSERT_TAIL(&ixgbe_flow_list,
3045 ixgbe_flow_mem_ptr, entries);
3047 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
3048 ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
3049 actions, &ntuple_filter, error);
3051 #ifdef RTE_LIB_SECURITY
3052 /* ESP flow not really a flow*/
3053 if (ntuple_filter.proto == IPPROTO_ESP)
3058 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
3060 ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
3061 sizeof(struct ixgbe_ntuple_filter_ele), 0);
3062 if (!ntuple_filter_ptr) {
3063 PMD_DRV_LOG(ERR, "failed to allocate memory");
3066 rte_memcpy(&ntuple_filter_ptr->filter_info,
3068 sizeof(struct rte_eth_ntuple_filter));
3069 TAILQ_INSERT_TAIL(&filter_ntuple_list,
3070 ntuple_filter_ptr, entries);
3071 flow->rule = ntuple_filter_ptr;
3072 flow->filter_type = RTE_ETH_FILTER_NTUPLE;
3078 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
3079 ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
3080 actions, ðertype_filter, error);
3082 ret = ixgbe_add_del_ethertype_filter(dev,
3083 ðertype_filter, TRUE);
3085 ethertype_filter_ptr = rte_zmalloc(
3086 "ixgbe_ethertype_filter",
3087 sizeof(struct ixgbe_ethertype_filter_ele), 0);
3088 if (!ethertype_filter_ptr) {
3089 PMD_DRV_LOG(ERR, "failed to allocate memory");
3092 rte_memcpy(ðertype_filter_ptr->filter_info,
3094 sizeof(struct rte_eth_ethertype_filter));
3095 TAILQ_INSERT_TAIL(&filter_ethertype_list,
3096 ethertype_filter_ptr, entries);
3097 flow->rule = ethertype_filter_ptr;
3098 flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
3104 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
3105 ret = ixgbe_parse_syn_filter(dev, attr, pattern,
3106 actions, &syn_filter, error);
3108 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
3110 syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
3111 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
3112 if (!syn_filter_ptr) {
3113 PMD_DRV_LOG(ERR, "failed to allocate memory");
3116 rte_memcpy(&syn_filter_ptr->filter_info,
3118 sizeof(struct rte_eth_syn_filter));
3119 TAILQ_INSERT_TAIL(&filter_syn_list,
3122 flow->rule = syn_filter_ptr;
3123 flow->filter_type = RTE_ETH_FILTER_SYN;
3129 memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
3130 ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
3131 actions, &fdir_rule, error);
3133 /* A mask cannot be deleted. */
3134 if (fdir_rule.b_mask) {
3135 if (!fdir_info->mask_added) {
3136 /* It's the first time the mask is set. */
3137 rte_memcpy(&fdir_info->mask,
3139 sizeof(struct ixgbe_hw_fdir_mask));
3141 if (fdir_rule.mask.flex_bytes_mask) {
3142 ret = ixgbe_fdir_set_flexbytes_offset(dev,
3143 fdir_rule.flex_bytes_offset);
3147 ret = ixgbe_fdir_set_input_mask(dev);
3151 fdir_info->mask_added = TRUE;
3155 * Only support one global mask,
3156 * all the masks should be the same.
3158 ret = memcmp(&fdir_info->mask,
3160 sizeof(struct ixgbe_hw_fdir_mask));
3164 if (fdir_rule.mask.flex_bytes_mask &&
3165 fdir_info->flex_bytes_offset !=
3166 fdir_rule.flex_bytes_offset)
3171 if (fdir_rule.b_spec) {
3172 ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
3175 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
3176 sizeof(struct ixgbe_fdir_rule_ele), 0);
3177 if (!fdir_rule_ptr) {
3178 PMD_DRV_LOG(ERR, "failed to allocate memory");
3181 rte_memcpy(&fdir_rule_ptr->filter_info,
3183 sizeof(struct ixgbe_fdir_rule));
3184 TAILQ_INSERT_TAIL(&filter_fdir_list,
3185 fdir_rule_ptr, entries);
3186 flow->rule = fdir_rule_ptr;
3187 flow->filter_type = RTE_ETH_FILTER_FDIR;
3194 * clean the mask_added flag if fail to
3198 fdir_info->mask_added = FALSE;
3206 memset(&l2_tn_filter, 0, sizeof(struct ixgbe_l2_tunnel_conf));
3207 ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
3208 actions, &l2_tn_filter, error);
3210 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
3212 l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
3213 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
3214 if (!l2_tn_filter_ptr) {
3215 PMD_DRV_LOG(ERR, "failed to allocate memory");
3218 rte_memcpy(&l2_tn_filter_ptr->filter_info,
3220 sizeof(struct ixgbe_l2_tunnel_conf));
3221 TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
3222 l2_tn_filter_ptr, entries);
3223 flow->rule = l2_tn_filter_ptr;
3224 flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
3229 memset(&rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
3230 ret = ixgbe_parse_rss_filter(dev, attr,
3231 actions, &rss_conf, error);
3233 ret = ixgbe_config_rss_filter(dev, &rss_conf, TRUE);
3235 rss_filter_ptr = rte_zmalloc("ixgbe_rss_filter",
3236 sizeof(struct ixgbe_rss_conf_ele), 0);
3237 if (!rss_filter_ptr) {
3238 PMD_DRV_LOG(ERR, "failed to allocate memory");
3241 ixgbe_rss_conf_init(&rss_filter_ptr->filter_info,
3243 TAILQ_INSERT_TAIL(&filter_rss_list,
3244 rss_filter_ptr, entries);
3245 flow->rule = rss_filter_ptr;
3246 flow->filter_type = RTE_ETH_FILTER_HASH;
3252 TAILQ_REMOVE(&ixgbe_flow_list,
3253 ixgbe_flow_mem_ptr, entries);
3254 rte_flow_error_set(error, -ret,
3255 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
3256 "Failed to create flow.");
3257 rte_free(ixgbe_flow_mem_ptr);
3263 * Check if the flow rule is supported by ixgbe.
3264 * It only checkes the format. Don't guarantee the rule can be programmed into
3265 * the HW. Because there can be no enough room for the rule.
3268 ixgbe_flow_validate(struct rte_eth_dev *dev,
3269 const struct rte_flow_attr *attr,
3270 const struct rte_flow_item pattern[],
3271 const struct rte_flow_action actions[],
3272 struct rte_flow_error *error)
3274 struct rte_eth_ntuple_filter ntuple_filter;
3275 struct rte_eth_ethertype_filter ethertype_filter;
3276 struct rte_eth_syn_filter syn_filter;
3277 struct ixgbe_l2_tunnel_conf l2_tn_filter;
3278 struct ixgbe_fdir_rule fdir_rule;
3279 struct ixgbe_rte_flow_rss_conf rss_conf;
3282 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
3283 ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
3284 actions, &ntuple_filter, error);
3288 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
3289 ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
3290 actions, ðertype_filter, error);
3294 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
3295 ret = ixgbe_parse_syn_filter(dev, attr, pattern,
3296 actions, &syn_filter, error);
3300 memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
3301 ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
3302 actions, &fdir_rule, error);
3306 memset(&l2_tn_filter, 0, sizeof(struct ixgbe_l2_tunnel_conf));
3307 ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
3308 actions, &l2_tn_filter, error);
3312 memset(&rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
3313 ret = ixgbe_parse_rss_filter(dev, attr,
3314 actions, &rss_conf, error);
3319 /* Destroy a flow rule on ixgbe. */
3321 ixgbe_flow_destroy(struct rte_eth_dev *dev,
3322 struct rte_flow *flow,
3323 struct rte_flow_error *error)
3326 struct rte_flow *pmd_flow = flow;
3327 enum rte_filter_type filter_type = pmd_flow->filter_type;
3328 struct rte_eth_ntuple_filter ntuple_filter;
3329 struct rte_eth_ethertype_filter ethertype_filter;
3330 struct rte_eth_syn_filter syn_filter;
3331 struct ixgbe_fdir_rule fdir_rule;
3332 struct ixgbe_l2_tunnel_conf l2_tn_filter;
3333 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
3334 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
3335 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
3336 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
3337 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
3338 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
3339 struct ixgbe_hw_fdir_info *fdir_info =
3340 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
3341 struct ixgbe_rss_conf_ele *rss_filter_ptr;
3343 switch (filter_type) {
3344 case RTE_ETH_FILTER_NTUPLE:
3345 ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
3347 rte_memcpy(&ntuple_filter,
3348 &ntuple_filter_ptr->filter_info,
3349 sizeof(struct rte_eth_ntuple_filter));
3350 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
3352 TAILQ_REMOVE(&filter_ntuple_list,
3353 ntuple_filter_ptr, entries);
3354 rte_free(ntuple_filter_ptr);
3357 case RTE_ETH_FILTER_ETHERTYPE:
3358 ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
3360 rte_memcpy(ðertype_filter,
3361 ðertype_filter_ptr->filter_info,
3362 sizeof(struct rte_eth_ethertype_filter));
3363 ret = ixgbe_add_del_ethertype_filter(dev,
3364 ðertype_filter, FALSE);
3366 TAILQ_REMOVE(&filter_ethertype_list,
3367 ethertype_filter_ptr, entries);
3368 rte_free(ethertype_filter_ptr);
3371 case RTE_ETH_FILTER_SYN:
3372 syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
3374 rte_memcpy(&syn_filter,
3375 &syn_filter_ptr->filter_info,
3376 sizeof(struct rte_eth_syn_filter));
3377 ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
3379 TAILQ_REMOVE(&filter_syn_list,
3380 syn_filter_ptr, entries);
3381 rte_free(syn_filter_ptr);
3384 case RTE_ETH_FILTER_FDIR:
3385 fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
3386 rte_memcpy(&fdir_rule,
3387 &fdir_rule_ptr->filter_info,
3388 sizeof(struct ixgbe_fdir_rule));
3389 ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
3391 TAILQ_REMOVE(&filter_fdir_list,
3392 fdir_rule_ptr, entries);
3393 rte_free(fdir_rule_ptr);
3394 if (TAILQ_EMPTY(&filter_fdir_list))
3395 fdir_info->mask_added = false;
3398 case RTE_ETH_FILTER_L2_TUNNEL:
3399 l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
3401 rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
3402 sizeof(struct ixgbe_l2_tunnel_conf));
3403 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
3405 TAILQ_REMOVE(&filter_l2_tunnel_list,
3406 l2_tn_filter_ptr, entries);
3407 rte_free(l2_tn_filter_ptr);
3410 case RTE_ETH_FILTER_HASH:
3411 rss_filter_ptr = (struct ixgbe_rss_conf_ele *)
3413 ret = ixgbe_config_rss_filter(dev,
3414 &rss_filter_ptr->filter_info, FALSE);
3416 TAILQ_REMOVE(&filter_rss_list,
3417 rss_filter_ptr, entries);
3418 rte_free(rss_filter_ptr);
3422 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
3429 rte_flow_error_set(error, EINVAL,
3430 RTE_FLOW_ERROR_TYPE_HANDLE,
3431 NULL, "Failed to destroy flow");
3435 TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
3436 if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
3437 TAILQ_REMOVE(&ixgbe_flow_list,
3438 ixgbe_flow_mem_ptr, entries);
3439 rte_free(ixgbe_flow_mem_ptr);
3448 /* Destroy all flow rules associated with a port on ixgbe. */
3450 ixgbe_flow_flush(struct rte_eth_dev *dev,
3451 struct rte_flow_error *error)
3455 ixgbe_clear_all_ntuple_filter(dev);
3456 ixgbe_clear_all_ethertype_filter(dev);
3457 ixgbe_clear_syn_filter(dev);
3459 ret = ixgbe_clear_all_fdir_filter(dev);
3461 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3462 NULL, "Failed to flush rule");
3466 ret = ixgbe_clear_all_l2_tn_filter(dev);
3468 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3469 NULL, "Failed to flush rule");
3473 ixgbe_clear_rss_filter(dev);
3475 ixgbe_filterlist_flush();
3480 const struct rte_flow_ops ixgbe_flow_ops = {
3481 .validate = ixgbe_flow_validate,
3482 .create = ixgbe_flow_create,
3483 .destroy = ixgbe_flow_destroy,
3484 .flush = ixgbe_flow_flush,