1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2016 Intel Corporation
13 #include <netinet/in.h>
14 #include <rte_byteorder.h>
15 #include <rte_common.h>
16 #include <rte_cycles.h>
18 #include <rte_interrupts.h>
20 #include <rte_debug.h>
22 #include <rte_atomic.h>
23 #include <rte_branch_prediction.h>
24 #include <rte_memory.h>
26 #include <rte_alarm.h>
27 #include <rte_ether.h>
28 #include <rte_ethdev_driver.h>
29 #include <rte_malloc.h>
30 #include <rte_random.h>
32 #include <rte_hash_crc.h>
34 #include <rte_flow_driver.h>
36 #include "ixgbe_logs.h"
37 #include "base/ixgbe_api.h"
38 #include "base/ixgbe_vf.h"
39 #include "base/ixgbe_common.h"
40 #include "ixgbe_ethdev.h"
41 #include "ixgbe_bypass.h"
42 #include "ixgbe_rxtx.h"
43 #include "base/ixgbe_type.h"
44 #include "base/ixgbe_phy.h"
45 #include "rte_pmd_ixgbe.h"
48 #define IXGBE_MIN_N_TUPLE_PRIO 1
49 #define IXGBE_MAX_N_TUPLE_PRIO 7
50 #define IXGBE_MAX_FLX_SOURCE_OFF 62
52 /* ntuple filter list structure */
53 struct ixgbe_ntuple_filter_ele {
54 TAILQ_ENTRY(ixgbe_ntuple_filter_ele) entries;
55 struct rte_eth_ntuple_filter filter_info;
57 /* ethertype filter list structure */
58 struct ixgbe_ethertype_filter_ele {
59 TAILQ_ENTRY(ixgbe_ethertype_filter_ele) entries;
60 struct rte_eth_ethertype_filter filter_info;
62 /* syn filter list structure */
63 struct ixgbe_eth_syn_filter_ele {
64 TAILQ_ENTRY(ixgbe_eth_syn_filter_ele) entries;
65 struct rte_eth_syn_filter filter_info;
67 /* fdir filter list structure */
68 struct ixgbe_fdir_rule_ele {
69 TAILQ_ENTRY(ixgbe_fdir_rule_ele) entries;
70 struct ixgbe_fdir_rule filter_info;
72 /* l2_tunnel filter list structure */
73 struct ixgbe_eth_l2_tunnel_conf_ele {
74 TAILQ_ENTRY(ixgbe_eth_l2_tunnel_conf_ele) entries;
75 struct rte_eth_l2_tunnel_conf filter_info;
77 /* rss filter list structure */
78 struct ixgbe_rss_conf_ele {
79 TAILQ_ENTRY(ixgbe_rss_conf_ele) entries;
80 struct ixgbe_rte_flow_rss_conf filter_info;
82 /* ixgbe_flow memory list structure */
83 struct ixgbe_flow_mem {
84 TAILQ_ENTRY(ixgbe_flow_mem) entries;
85 struct rte_flow *flow;
88 TAILQ_HEAD(ixgbe_ntuple_filter_list, ixgbe_ntuple_filter_ele);
89 TAILQ_HEAD(ixgbe_ethertype_filter_list, ixgbe_ethertype_filter_ele);
90 TAILQ_HEAD(ixgbe_syn_filter_list, ixgbe_eth_syn_filter_ele);
91 TAILQ_HEAD(ixgbe_fdir_rule_filter_list, ixgbe_fdir_rule_ele);
92 TAILQ_HEAD(ixgbe_l2_tunnel_filter_list, ixgbe_eth_l2_tunnel_conf_ele);
93 TAILQ_HEAD(ixgbe_rss_filter_list, ixgbe_rss_conf_ele);
94 TAILQ_HEAD(ixgbe_flow_mem_list, ixgbe_flow_mem);
96 static struct ixgbe_ntuple_filter_list filter_ntuple_list;
97 static struct ixgbe_ethertype_filter_list filter_ethertype_list;
98 static struct ixgbe_syn_filter_list filter_syn_list;
99 static struct ixgbe_fdir_rule_filter_list filter_fdir_list;
100 static struct ixgbe_l2_tunnel_filter_list filter_l2_tunnel_list;
101 static struct ixgbe_rss_filter_list filter_rss_list;
102 static struct ixgbe_flow_mem_list ixgbe_flow_list;
105 * Endless loop will never happen with below assumption
106 * 1. there is at least one no-void item(END)
107 * 2. cur is before END.
110 const struct rte_flow_item *next_no_void_pattern(
111 const struct rte_flow_item pattern[],
112 const struct rte_flow_item *cur)
114 const struct rte_flow_item *next =
115 cur ? cur + 1 : &pattern[0];
117 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
124 const struct rte_flow_action *next_no_void_action(
125 const struct rte_flow_action actions[],
126 const struct rte_flow_action *cur)
128 const struct rte_flow_action *next =
129 cur ? cur + 1 : &actions[0];
131 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
138 * Please aware there's an asumption for all the parsers.
139 * rte_flow_item is using big endian, rte_flow_attr and
140 * rte_flow_action are using CPU order.
141 * Because the pattern is used to describe the packets,
142 * normally the packets should use network order.
146 * Parse the rule to see if it is a n-tuple rule.
147 * And get the n-tuple filter info BTW.
149 * The first not void item can be ETH or IPV4.
150 * The second not void item must be IPV4 if the first one is ETH.
151 * The third not void item must be UDP or TCP.
152 * The next not void item must be END.
154 * The first not void action should be QUEUE.
155 * The next not void action should be END.
159 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
160 * dst_addr 192.167.3.50 0xFFFFFFFF
161 * next_proto_id 17 0xFF
162 * UDP/TCP/ src_port 80 0xFFFF
163 * SCTP dst_port 80 0xFFFF
165 * other members in mask and spec should set to 0x00.
166 * item->last should be NULL.
168 * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY.
172 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
173 const struct rte_flow_item pattern[],
174 const struct rte_flow_action actions[],
175 struct rte_eth_ntuple_filter *filter,
176 struct rte_flow_error *error)
178 const struct rte_flow_item *item;
179 const struct rte_flow_action *act;
180 const struct rte_flow_item_ipv4 *ipv4_spec;
181 const struct rte_flow_item_ipv4 *ipv4_mask;
182 const struct rte_flow_item_tcp *tcp_spec;
183 const struct rte_flow_item_tcp *tcp_mask;
184 const struct rte_flow_item_udp *udp_spec;
185 const struct rte_flow_item_udp *udp_mask;
186 const struct rte_flow_item_sctp *sctp_spec;
187 const struct rte_flow_item_sctp *sctp_mask;
188 const struct rte_flow_item_eth *eth_spec;
189 const struct rte_flow_item_eth *eth_mask;
190 const struct rte_flow_item_vlan *vlan_spec;
191 const struct rte_flow_item_vlan *vlan_mask;
192 struct rte_flow_item_eth eth_null;
193 struct rte_flow_item_vlan vlan_null;
196 rte_flow_error_set(error,
197 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
198 NULL, "NULL pattern.");
203 rte_flow_error_set(error, EINVAL,
204 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
205 NULL, "NULL action.");
209 rte_flow_error_set(error, EINVAL,
210 RTE_FLOW_ERROR_TYPE_ATTR,
211 NULL, "NULL attribute.");
215 memset(ð_null, 0, sizeof(struct rte_flow_item_eth));
216 memset(&vlan_null, 0, sizeof(struct rte_flow_item_vlan));
218 #ifdef RTE_LIBRTE_SECURITY
220 * Special case for flow action type RTE_FLOW_ACTION_TYPE_SECURITY
222 act = next_no_void_action(actions, NULL);
223 if (act->type == RTE_FLOW_ACTION_TYPE_SECURITY) {
224 const void *conf = act->conf;
225 /* check if the next not void item is END */
226 act = next_no_void_action(actions, act);
227 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
228 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
229 rte_flow_error_set(error, EINVAL,
230 RTE_FLOW_ERROR_TYPE_ACTION,
231 act, "Not supported action.");
235 /* get the IP pattern*/
236 item = next_no_void_pattern(pattern, NULL);
237 while (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
238 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
240 item->type == RTE_FLOW_ITEM_TYPE_END) {
241 rte_flow_error_set(error, EINVAL,
242 RTE_FLOW_ERROR_TYPE_ITEM,
243 item, "IP pattern missing.");
246 item = next_no_void_pattern(pattern, item);
249 filter->proto = IPPROTO_ESP;
250 return ixgbe_crypto_add_ingress_sa_from_flow(conf, item->spec,
251 item->type == RTE_FLOW_ITEM_TYPE_IPV6);
255 /* the first not void item can be MAC or IPv4 */
256 item = next_no_void_pattern(pattern, NULL);
258 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
259 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
260 rte_flow_error_set(error, EINVAL,
261 RTE_FLOW_ERROR_TYPE_ITEM,
262 item, "Not supported by ntuple filter");
266 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
267 eth_spec = item->spec;
268 eth_mask = item->mask;
269 /*Not supported last point for range*/
271 rte_flow_error_set(error,
273 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
274 item, "Not supported last point for range");
278 /* if the first item is MAC, the content should be NULL */
279 if ((item->spec || item->mask) &&
280 (memcmp(eth_spec, ð_null,
281 sizeof(struct rte_flow_item_eth)) ||
282 memcmp(eth_mask, ð_null,
283 sizeof(struct rte_flow_item_eth)))) {
284 rte_flow_error_set(error, EINVAL,
285 RTE_FLOW_ERROR_TYPE_ITEM,
286 item, "Not supported by ntuple filter");
289 /* check if the next not void item is IPv4 or Vlan */
290 item = next_no_void_pattern(pattern, item);
291 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
292 item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
293 rte_flow_error_set(error,
294 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
295 item, "Not supported by ntuple filter");
300 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
301 vlan_spec = item->spec;
302 vlan_mask = item->mask;
303 /*Not supported last point for range*/
305 rte_flow_error_set(error,
307 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
308 item, "Not supported last point for range");
311 /* the content should be NULL */
312 if ((item->spec || item->mask) &&
313 (memcmp(vlan_spec, &vlan_null,
314 sizeof(struct rte_flow_item_vlan)) ||
315 memcmp(vlan_mask, &vlan_null,
316 sizeof(struct rte_flow_item_vlan)))) {
318 rte_flow_error_set(error, EINVAL,
319 RTE_FLOW_ERROR_TYPE_ITEM,
320 item, "Not supported by ntuple filter");
323 /* check if the next not void item is IPv4 */
324 item = next_no_void_pattern(pattern, item);
325 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
326 rte_flow_error_set(error,
327 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
328 item, "Not supported by ntuple filter");
334 /* get the IPv4 info */
335 if (!item->spec || !item->mask) {
336 rte_flow_error_set(error, EINVAL,
337 RTE_FLOW_ERROR_TYPE_ITEM,
338 item, "Invalid ntuple mask");
341 /*Not supported last point for range*/
343 rte_flow_error_set(error, EINVAL,
344 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
345 item, "Not supported last point for range");
349 ipv4_mask = item->mask;
351 * Only support src & dst addresses, protocol,
352 * others should be masked.
354 if (ipv4_mask->hdr.version_ihl ||
355 ipv4_mask->hdr.type_of_service ||
356 ipv4_mask->hdr.total_length ||
357 ipv4_mask->hdr.packet_id ||
358 ipv4_mask->hdr.fragment_offset ||
359 ipv4_mask->hdr.time_to_live ||
360 ipv4_mask->hdr.hdr_checksum) {
361 rte_flow_error_set(error,
362 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
363 item, "Not supported by ntuple filter");
366 if ((ipv4_mask->hdr.src_addr != 0 &&
367 ipv4_mask->hdr.src_addr != UINT32_MAX) ||
368 (ipv4_mask->hdr.dst_addr != 0 &&
369 ipv4_mask->hdr.dst_addr != UINT32_MAX) ||
370 (ipv4_mask->hdr.next_proto_id != UINT8_MAX &&
371 ipv4_mask->hdr.next_proto_id != 0)) {
372 rte_flow_error_set(error,
373 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
374 item, "Not supported by ntuple filter");
378 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
379 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
380 filter->proto_mask = ipv4_mask->hdr.next_proto_id;
382 ipv4_spec = item->spec;
383 filter->dst_ip = ipv4_spec->hdr.dst_addr;
384 filter->src_ip = ipv4_spec->hdr.src_addr;
385 filter->proto = ipv4_spec->hdr.next_proto_id;
388 /* check if the next not void item is TCP or UDP */
389 item = next_no_void_pattern(pattern, item);
390 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
391 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
392 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
393 item->type != RTE_FLOW_ITEM_TYPE_END) {
394 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
395 rte_flow_error_set(error, EINVAL,
396 RTE_FLOW_ERROR_TYPE_ITEM,
397 item, "Not supported by ntuple filter");
401 if ((item->type != RTE_FLOW_ITEM_TYPE_END) &&
402 (!item->spec && !item->mask)) {
406 /* get the TCP/UDP/SCTP info */
407 if (item->type != RTE_FLOW_ITEM_TYPE_END &&
408 (!item->spec || !item->mask)) {
409 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
410 rte_flow_error_set(error, EINVAL,
411 RTE_FLOW_ERROR_TYPE_ITEM,
412 item, "Invalid ntuple mask");
416 /*Not supported last point for range*/
418 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
419 rte_flow_error_set(error, EINVAL,
420 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
421 item, "Not supported last point for range");
426 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
427 tcp_mask = item->mask;
430 * Only support src & dst ports, tcp flags,
431 * others should be masked.
433 if (tcp_mask->hdr.sent_seq ||
434 tcp_mask->hdr.recv_ack ||
435 tcp_mask->hdr.data_off ||
436 tcp_mask->hdr.rx_win ||
437 tcp_mask->hdr.cksum ||
438 tcp_mask->hdr.tcp_urp) {
440 sizeof(struct rte_eth_ntuple_filter));
441 rte_flow_error_set(error, EINVAL,
442 RTE_FLOW_ERROR_TYPE_ITEM,
443 item, "Not supported by ntuple filter");
446 if ((tcp_mask->hdr.src_port != 0 &&
447 tcp_mask->hdr.src_port != UINT16_MAX) ||
448 (tcp_mask->hdr.dst_port != 0 &&
449 tcp_mask->hdr.dst_port != UINT16_MAX)) {
450 rte_flow_error_set(error,
451 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
452 item, "Not supported by ntuple filter");
456 filter->dst_port_mask = tcp_mask->hdr.dst_port;
457 filter->src_port_mask = tcp_mask->hdr.src_port;
458 if (tcp_mask->hdr.tcp_flags == 0xFF) {
459 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
460 } else if (!tcp_mask->hdr.tcp_flags) {
461 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
463 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
464 rte_flow_error_set(error, EINVAL,
465 RTE_FLOW_ERROR_TYPE_ITEM,
466 item, "Not supported by ntuple filter");
470 tcp_spec = item->spec;
471 filter->dst_port = tcp_spec->hdr.dst_port;
472 filter->src_port = tcp_spec->hdr.src_port;
473 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
474 } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
475 udp_mask = item->mask;
478 * Only support src & dst ports,
479 * others should be masked.
481 if (udp_mask->hdr.dgram_len ||
482 udp_mask->hdr.dgram_cksum) {
484 sizeof(struct rte_eth_ntuple_filter));
485 rte_flow_error_set(error, EINVAL,
486 RTE_FLOW_ERROR_TYPE_ITEM,
487 item, "Not supported by ntuple filter");
490 if ((udp_mask->hdr.src_port != 0 &&
491 udp_mask->hdr.src_port != UINT16_MAX) ||
492 (udp_mask->hdr.dst_port != 0 &&
493 udp_mask->hdr.dst_port != UINT16_MAX)) {
494 rte_flow_error_set(error,
495 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
496 item, "Not supported by ntuple filter");
500 filter->dst_port_mask = udp_mask->hdr.dst_port;
501 filter->src_port_mask = udp_mask->hdr.src_port;
503 udp_spec = item->spec;
504 filter->dst_port = udp_spec->hdr.dst_port;
505 filter->src_port = udp_spec->hdr.src_port;
506 } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
507 sctp_mask = item->mask;
510 * Only support src & dst ports,
511 * others should be masked.
513 if (sctp_mask->hdr.tag ||
514 sctp_mask->hdr.cksum) {
516 sizeof(struct rte_eth_ntuple_filter));
517 rte_flow_error_set(error, EINVAL,
518 RTE_FLOW_ERROR_TYPE_ITEM,
519 item, "Not supported by ntuple filter");
523 filter->dst_port_mask = sctp_mask->hdr.dst_port;
524 filter->src_port_mask = sctp_mask->hdr.src_port;
526 sctp_spec = item->spec;
527 filter->dst_port = sctp_spec->hdr.dst_port;
528 filter->src_port = sctp_spec->hdr.src_port;
533 /* check if the next not void item is END */
534 item = next_no_void_pattern(pattern, item);
535 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
536 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
537 rte_flow_error_set(error, EINVAL,
538 RTE_FLOW_ERROR_TYPE_ITEM,
539 item, "Not supported by ntuple filter");
546 * n-tuple only supports forwarding,
547 * check if the first not void action is QUEUE.
549 act = next_no_void_action(actions, NULL);
550 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
551 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
552 rte_flow_error_set(error, EINVAL,
553 RTE_FLOW_ERROR_TYPE_ACTION,
554 item, "Not supported action.");
558 ((const struct rte_flow_action_queue *)act->conf)->index;
560 /* check if the next not void item is END */
561 act = next_no_void_action(actions, act);
562 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
563 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
564 rte_flow_error_set(error, EINVAL,
565 RTE_FLOW_ERROR_TYPE_ACTION,
566 act, "Not supported action.");
571 /* must be input direction */
572 if (!attr->ingress) {
573 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
574 rte_flow_error_set(error, EINVAL,
575 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
576 attr, "Only support ingress.");
582 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
583 rte_flow_error_set(error, EINVAL,
584 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
585 attr, "Not support egress.");
590 if (attr->transfer) {
591 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
592 rte_flow_error_set(error, EINVAL,
593 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
594 attr, "No support for transfer.");
598 if (attr->priority > 0xFFFF) {
599 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
600 rte_flow_error_set(error, EINVAL,
601 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
602 attr, "Error priority.");
605 filter->priority = (uint16_t)attr->priority;
606 if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
607 attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
608 filter->priority = 1;
613 /* a specific function for ixgbe because the flags is specific */
615 ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
616 const struct rte_flow_attr *attr,
617 const struct rte_flow_item pattern[],
618 const struct rte_flow_action actions[],
619 struct rte_eth_ntuple_filter *filter,
620 struct rte_flow_error *error)
623 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
625 MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
627 ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
632 #ifdef RTE_LIBRTE_SECURITY
633 /* ESP flow not really a flow*/
634 if (filter->proto == IPPROTO_ESP)
638 /* Ixgbe doesn't support tcp flags. */
639 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
640 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
641 rte_flow_error_set(error, EINVAL,
642 RTE_FLOW_ERROR_TYPE_ITEM,
643 NULL, "Not supported by ntuple filter");
647 /* Ixgbe doesn't support many priorities. */
648 if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
649 filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
650 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
651 rte_flow_error_set(error, EINVAL,
652 RTE_FLOW_ERROR_TYPE_ITEM,
653 NULL, "Priority not supported by ntuple filter");
657 if (filter->queue >= dev->data->nb_rx_queues)
660 /* fixed value for ixgbe */
661 filter->flags = RTE_5TUPLE_FLAGS;
666 * Parse the rule to see if it is a ethertype rule.
667 * And get the ethertype filter info BTW.
669 * The first not void item can be ETH.
670 * The next not void item must be END.
672 * The first not void action should be QUEUE.
673 * The next not void action should be END.
676 * ETH type 0x0807 0xFFFF
678 * other members in mask and spec should set to 0x00.
679 * item->last should be NULL.
682 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
683 const struct rte_flow_item *pattern,
684 const struct rte_flow_action *actions,
685 struct rte_eth_ethertype_filter *filter,
686 struct rte_flow_error *error)
688 const struct rte_flow_item *item;
689 const struct rte_flow_action *act;
690 const struct rte_flow_item_eth *eth_spec;
691 const struct rte_flow_item_eth *eth_mask;
692 const struct rte_flow_action_queue *act_q;
695 rte_flow_error_set(error, EINVAL,
696 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
697 NULL, "NULL pattern.");
702 rte_flow_error_set(error, EINVAL,
703 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
704 NULL, "NULL action.");
709 rte_flow_error_set(error, EINVAL,
710 RTE_FLOW_ERROR_TYPE_ATTR,
711 NULL, "NULL attribute.");
715 item = next_no_void_pattern(pattern, NULL);
716 /* The first non-void item should be MAC. */
717 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
718 rte_flow_error_set(error, EINVAL,
719 RTE_FLOW_ERROR_TYPE_ITEM,
720 item, "Not supported by ethertype filter");
724 /*Not supported last point for range*/
726 rte_flow_error_set(error, EINVAL,
727 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
728 item, "Not supported last point for range");
732 /* Get the MAC info. */
733 if (!item->spec || !item->mask) {
734 rte_flow_error_set(error, EINVAL,
735 RTE_FLOW_ERROR_TYPE_ITEM,
736 item, "Not supported by ethertype filter");
740 eth_spec = item->spec;
741 eth_mask = item->mask;
743 /* Mask bits of source MAC address must be full of 0.
744 * Mask bits of destination MAC address must be full
747 if (!rte_is_zero_ether_addr(ð_mask->src) ||
748 (!rte_is_zero_ether_addr(ð_mask->dst) &&
749 !rte_is_broadcast_ether_addr(ð_mask->dst))) {
750 rte_flow_error_set(error, EINVAL,
751 RTE_FLOW_ERROR_TYPE_ITEM,
752 item, "Invalid ether address mask");
756 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
757 rte_flow_error_set(error, EINVAL,
758 RTE_FLOW_ERROR_TYPE_ITEM,
759 item, "Invalid ethertype mask");
763 /* If mask bits of destination MAC address
764 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
766 if (rte_is_broadcast_ether_addr(ð_mask->dst)) {
767 filter->mac_addr = eth_spec->dst;
768 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
770 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
772 filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
774 /* Check if the next non-void item is END. */
775 item = next_no_void_pattern(pattern, item);
776 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
777 rte_flow_error_set(error, EINVAL,
778 RTE_FLOW_ERROR_TYPE_ITEM,
779 item, "Not supported by ethertype filter.");
785 act = next_no_void_action(actions, NULL);
786 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
787 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
788 rte_flow_error_set(error, EINVAL,
789 RTE_FLOW_ERROR_TYPE_ACTION,
790 act, "Not supported action.");
794 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
795 act_q = (const struct rte_flow_action_queue *)act->conf;
796 filter->queue = act_q->index;
798 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
801 /* Check if the next non-void item is END */
802 act = next_no_void_action(actions, act);
803 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
804 rte_flow_error_set(error, EINVAL,
805 RTE_FLOW_ERROR_TYPE_ACTION,
806 act, "Not supported action.");
811 /* Must be input direction */
812 if (!attr->ingress) {
813 rte_flow_error_set(error, EINVAL,
814 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
815 attr, "Only support ingress.");
821 rte_flow_error_set(error, EINVAL,
822 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
823 attr, "Not support egress.");
828 if (attr->transfer) {
829 rte_flow_error_set(error, EINVAL,
830 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
831 attr, "No support for transfer.");
836 if (attr->priority) {
837 rte_flow_error_set(error, EINVAL,
838 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
839 attr, "Not support priority.");
845 rte_flow_error_set(error, EINVAL,
846 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
847 attr, "Not support group.");
855 ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
856 const struct rte_flow_attr *attr,
857 const struct rte_flow_item pattern[],
858 const struct rte_flow_action actions[],
859 struct rte_eth_ethertype_filter *filter,
860 struct rte_flow_error *error)
863 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
865 MAC_TYPE_FILTER_SUP(hw->mac.type);
867 ret = cons_parse_ethertype_filter(attr, pattern,
868 actions, filter, error);
873 /* Ixgbe doesn't support MAC address. */
874 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
875 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
876 rte_flow_error_set(error, EINVAL,
877 RTE_FLOW_ERROR_TYPE_ITEM,
878 NULL, "Not supported by ethertype filter");
882 if (filter->queue >= dev->data->nb_rx_queues) {
883 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
884 rte_flow_error_set(error, EINVAL,
885 RTE_FLOW_ERROR_TYPE_ITEM,
886 NULL, "queue index much too big");
890 if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
891 filter->ether_type == RTE_ETHER_TYPE_IPV6) {
892 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
893 rte_flow_error_set(error, EINVAL,
894 RTE_FLOW_ERROR_TYPE_ITEM,
895 NULL, "IPv4/IPv6 not supported by ethertype filter");
899 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
900 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
901 rte_flow_error_set(error, EINVAL,
902 RTE_FLOW_ERROR_TYPE_ITEM,
903 NULL, "mac compare is unsupported");
907 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
908 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
909 rte_flow_error_set(error, EINVAL,
910 RTE_FLOW_ERROR_TYPE_ITEM,
911 NULL, "drop option is unsupported");
919 * Parse the rule to see if it is a TCP SYN rule.
920 * And get the TCP SYN filter info BTW.
922 * The first not void item must be ETH.
923 * The second not void item must be IPV4 or IPV6.
924 * The third not void item must be TCP.
925 * The next not void item must be END.
927 * The first not void action should be QUEUE.
928 * The next not void action should be END.
932 * IPV4/IPV6 NULL NULL
933 * TCP tcp_flags 0x02 0xFF
935 * other members in mask and spec should set to 0x00.
936 * item->last should be NULL.
939 cons_parse_syn_filter(const struct rte_flow_attr *attr,
940 const struct rte_flow_item pattern[],
941 const struct rte_flow_action actions[],
942 struct rte_eth_syn_filter *filter,
943 struct rte_flow_error *error)
945 const struct rte_flow_item *item;
946 const struct rte_flow_action *act;
947 const struct rte_flow_item_tcp *tcp_spec;
948 const struct rte_flow_item_tcp *tcp_mask;
949 const struct rte_flow_action_queue *act_q;
952 rte_flow_error_set(error, EINVAL,
953 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
954 NULL, "NULL pattern.");
959 rte_flow_error_set(error, EINVAL,
960 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
961 NULL, "NULL action.");
966 rte_flow_error_set(error, EINVAL,
967 RTE_FLOW_ERROR_TYPE_ATTR,
968 NULL, "NULL attribute.");
973 /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
974 item = next_no_void_pattern(pattern, NULL);
975 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
976 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
977 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
978 item->type != RTE_FLOW_ITEM_TYPE_TCP) {
979 rte_flow_error_set(error, EINVAL,
980 RTE_FLOW_ERROR_TYPE_ITEM,
981 item, "Not supported by syn filter");
984 /*Not supported last point for range*/
986 rte_flow_error_set(error, EINVAL,
987 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
988 item, "Not supported last point for range");
993 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
994 /* if the item is MAC, the content should be NULL */
995 if (item->spec || item->mask) {
996 rte_flow_error_set(error, EINVAL,
997 RTE_FLOW_ERROR_TYPE_ITEM,
998 item, "Invalid SYN address mask");
1002 /* check if the next not void item is IPv4 or IPv6 */
1003 item = next_no_void_pattern(pattern, item);
1004 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1005 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
1006 rte_flow_error_set(error, EINVAL,
1007 RTE_FLOW_ERROR_TYPE_ITEM,
1008 item, "Not supported by syn filter");
1014 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
1015 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1016 /* if the item is IP, the content should be NULL */
1017 if (item->spec || item->mask) {
1018 rte_flow_error_set(error, EINVAL,
1019 RTE_FLOW_ERROR_TYPE_ITEM,
1020 item, "Invalid SYN mask");
1024 /* check if the next not void item is TCP */
1025 item = next_no_void_pattern(pattern, item);
1026 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
1027 rte_flow_error_set(error, EINVAL,
1028 RTE_FLOW_ERROR_TYPE_ITEM,
1029 item, "Not supported by syn filter");
1034 /* Get the TCP info. Only support SYN. */
1035 if (!item->spec || !item->mask) {
1036 rte_flow_error_set(error, EINVAL,
1037 RTE_FLOW_ERROR_TYPE_ITEM,
1038 item, "Invalid SYN mask");
1041 /*Not supported last point for range*/
1043 rte_flow_error_set(error, EINVAL,
1044 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1045 item, "Not supported last point for range");
1049 tcp_spec = item->spec;
1050 tcp_mask = item->mask;
1051 if (!(tcp_spec->hdr.tcp_flags & RTE_TCP_SYN_FLAG) ||
1052 tcp_mask->hdr.src_port ||
1053 tcp_mask->hdr.dst_port ||
1054 tcp_mask->hdr.sent_seq ||
1055 tcp_mask->hdr.recv_ack ||
1056 tcp_mask->hdr.data_off ||
1057 tcp_mask->hdr.tcp_flags != RTE_TCP_SYN_FLAG ||
1058 tcp_mask->hdr.rx_win ||
1059 tcp_mask->hdr.cksum ||
1060 tcp_mask->hdr.tcp_urp) {
1061 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1062 rte_flow_error_set(error, EINVAL,
1063 RTE_FLOW_ERROR_TYPE_ITEM,
1064 item, "Not supported by syn filter");
1068 /* check if the next not void item is END */
1069 item = next_no_void_pattern(pattern, item);
1070 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1071 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1072 rte_flow_error_set(error, EINVAL,
1073 RTE_FLOW_ERROR_TYPE_ITEM,
1074 item, "Not supported by syn filter");
1078 /* check if the first not void action is QUEUE. */
1079 act = next_no_void_action(actions, NULL);
1080 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1081 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1082 rte_flow_error_set(error, EINVAL,
1083 RTE_FLOW_ERROR_TYPE_ACTION,
1084 act, "Not supported action.");
1088 act_q = (const struct rte_flow_action_queue *)act->conf;
1089 filter->queue = act_q->index;
1090 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
1091 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1092 rte_flow_error_set(error, EINVAL,
1093 RTE_FLOW_ERROR_TYPE_ACTION,
1094 act, "Not supported action.");
1098 /* check if the next not void item is END */
1099 act = next_no_void_action(actions, act);
1100 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1101 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1102 rte_flow_error_set(error, EINVAL,
1103 RTE_FLOW_ERROR_TYPE_ACTION,
1104 act, "Not supported action.");
1109 /* must be input direction */
1110 if (!attr->ingress) {
1111 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1112 rte_flow_error_set(error, EINVAL,
1113 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1114 attr, "Only support ingress.");
1120 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1121 rte_flow_error_set(error, EINVAL,
1122 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1123 attr, "Not support egress.");
1128 if (attr->transfer) {
1129 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1130 rte_flow_error_set(error, EINVAL,
1131 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1132 attr, "No support for transfer.");
1136 /* Support 2 priorities, the lowest or highest. */
1137 if (!attr->priority) {
1138 filter->hig_pri = 0;
1139 } else if (attr->priority == (uint32_t)~0U) {
1140 filter->hig_pri = 1;
1142 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1143 rte_flow_error_set(error, EINVAL,
1144 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1145 attr, "Not support priority.");
1153 ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
1154 const struct rte_flow_attr *attr,
1155 const struct rte_flow_item pattern[],
1156 const struct rte_flow_action actions[],
1157 struct rte_eth_syn_filter *filter,
1158 struct rte_flow_error *error)
1161 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1163 MAC_TYPE_FILTER_SUP(hw->mac.type);
1165 ret = cons_parse_syn_filter(attr, pattern,
1166 actions, filter, error);
1168 if (filter->queue >= dev->data->nb_rx_queues)
1178 * Parse the rule to see if it is a L2 tunnel rule.
1179 * And get the L2 tunnel filter info BTW.
1180 * Only support E-tag now.
1182 * The first not void item can be E_TAG.
1183 * The next not void item must be END.
1185 * The first not void action should be VF or PF.
1186 * The next not void action should be END.
1190 e_cid_base 0x309 0xFFF
1192 * other members in mask and spec should set to 0x00.
1193 * item->last should be NULL.
1196 cons_parse_l2_tn_filter(struct rte_eth_dev *dev,
1197 const struct rte_flow_attr *attr,
1198 const struct rte_flow_item pattern[],
1199 const struct rte_flow_action actions[],
1200 struct rte_eth_l2_tunnel_conf *filter,
1201 struct rte_flow_error *error)
1203 const struct rte_flow_item *item;
1204 const struct rte_flow_item_e_tag *e_tag_spec;
1205 const struct rte_flow_item_e_tag *e_tag_mask;
1206 const struct rte_flow_action *act;
1207 const struct rte_flow_action_vf *act_vf;
1208 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1211 rte_flow_error_set(error, EINVAL,
1212 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1213 NULL, "NULL pattern.");
1218 rte_flow_error_set(error, EINVAL,
1219 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1220 NULL, "NULL action.");
1225 rte_flow_error_set(error, EINVAL,
1226 RTE_FLOW_ERROR_TYPE_ATTR,
1227 NULL, "NULL attribute.");
1231 /* The first not void item should be e-tag. */
1232 item = next_no_void_pattern(pattern, NULL);
1233 if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1234 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1235 rte_flow_error_set(error, EINVAL,
1236 RTE_FLOW_ERROR_TYPE_ITEM,
1237 item, "Not supported by L2 tunnel filter");
1241 if (!item->spec || !item->mask) {
1242 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1243 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1244 item, "Not supported by L2 tunnel filter");
1248 /*Not supported last point for range*/
1250 rte_flow_error_set(error, EINVAL,
1251 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1252 item, "Not supported last point for range");
1256 e_tag_spec = item->spec;
1257 e_tag_mask = item->mask;
1259 /* Only care about GRP and E cid base. */
1260 if (e_tag_mask->epcp_edei_in_ecid_b ||
1261 e_tag_mask->in_ecid_e ||
1262 e_tag_mask->ecid_e ||
1263 e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1264 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1265 rte_flow_error_set(error, EINVAL,
1266 RTE_FLOW_ERROR_TYPE_ITEM,
1267 item, "Not supported by L2 tunnel filter");
1271 filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1273 * grp and e_cid_base are bit fields and only use 14 bits.
1274 * e-tag id is taken as little endian by HW.
1276 filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1278 /* check if the next not void item is END */
1279 item = next_no_void_pattern(pattern, item);
1280 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1281 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1282 rte_flow_error_set(error, EINVAL,
1283 RTE_FLOW_ERROR_TYPE_ITEM,
1284 item, "Not supported by L2 tunnel filter");
1289 /* must be input direction */
1290 if (!attr->ingress) {
1291 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1292 rte_flow_error_set(error, EINVAL,
1293 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1294 attr, "Only support ingress.");
1300 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1301 rte_flow_error_set(error, EINVAL,
1302 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1303 attr, "Not support egress.");
1308 if (attr->transfer) {
1309 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1310 rte_flow_error_set(error, EINVAL,
1311 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1312 attr, "No support for transfer.");
1317 if (attr->priority) {
1318 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1319 rte_flow_error_set(error, EINVAL,
1320 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1321 attr, "Not support priority.");
1325 /* check if the first not void action is VF or PF. */
1326 act = next_no_void_action(actions, NULL);
1327 if (act->type != RTE_FLOW_ACTION_TYPE_VF &&
1328 act->type != RTE_FLOW_ACTION_TYPE_PF) {
1329 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1330 rte_flow_error_set(error, EINVAL,
1331 RTE_FLOW_ERROR_TYPE_ACTION,
1332 act, "Not supported action.");
1336 if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
1337 act_vf = (const struct rte_flow_action_vf *)act->conf;
1338 filter->pool = act_vf->id;
1340 filter->pool = pci_dev->max_vfs;
1343 /* check if the next not void item is END */
1344 act = next_no_void_action(actions, act);
1345 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1346 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1347 rte_flow_error_set(error, EINVAL,
1348 RTE_FLOW_ERROR_TYPE_ACTION,
1349 act, "Not supported action.");
1357 ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1358 const struct rte_flow_attr *attr,
1359 const struct rte_flow_item pattern[],
1360 const struct rte_flow_action actions[],
1361 struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1362 struct rte_flow_error *error)
1365 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1366 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
1369 ret = cons_parse_l2_tn_filter(dev, attr, pattern,
1370 actions, l2_tn_filter, error);
1372 if (hw->mac.type != ixgbe_mac_X550 &&
1373 hw->mac.type != ixgbe_mac_X550EM_x &&
1374 hw->mac.type != ixgbe_mac_X550EM_a) {
1375 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1376 rte_flow_error_set(error, EINVAL,
1377 RTE_FLOW_ERROR_TYPE_ITEM,
1378 NULL, "Not supported by L2 tunnel filter");
1382 vf_num = pci_dev->max_vfs;
1384 if (l2_tn_filter->pool > vf_num)
1390 /* Parse to get the attr and action info of flow director rule. */
1392 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1393 const struct rte_flow_action actions[],
1394 struct ixgbe_fdir_rule *rule,
1395 struct rte_flow_error *error)
1397 const struct rte_flow_action *act;
1398 const struct rte_flow_action_queue *act_q;
1399 const struct rte_flow_action_mark *mark;
1402 /* must be input direction */
1403 if (!attr->ingress) {
1404 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1405 rte_flow_error_set(error, EINVAL,
1406 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1407 attr, "Only support ingress.");
1413 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1414 rte_flow_error_set(error, EINVAL,
1415 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1416 attr, "Not support egress.");
1421 if (attr->transfer) {
1422 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1423 rte_flow_error_set(error, EINVAL,
1424 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1425 attr, "No support for transfer.");
1430 if (attr->priority) {
1431 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1432 rte_flow_error_set(error, EINVAL,
1433 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1434 attr, "Not support priority.");
1438 /* check if the first not void action is QUEUE or DROP. */
1439 act = next_no_void_action(actions, NULL);
1440 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1441 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1442 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1443 rte_flow_error_set(error, EINVAL,
1444 RTE_FLOW_ERROR_TYPE_ACTION,
1445 act, "Not supported action.");
1449 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1450 act_q = (const struct rte_flow_action_queue *)act->conf;
1451 rule->queue = act_q->index;
1453 /* signature mode does not support drop action. */
1454 if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1455 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1456 rte_flow_error_set(error, EINVAL,
1457 RTE_FLOW_ERROR_TYPE_ACTION,
1458 act, "Not supported action.");
1461 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1464 /* check if the next not void item is MARK */
1465 act = next_no_void_action(actions, act);
1466 if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1467 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1468 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1469 rte_flow_error_set(error, EINVAL,
1470 RTE_FLOW_ERROR_TYPE_ACTION,
1471 act, "Not supported action.");
1477 if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1478 mark = (const struct rte_flow_action_mark *)act->conf;
1479 rule->soft_id = mark->id;
1480 act = next_no_void_action(actions, act);
1483 /* check if the next not void item is END */
1484 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1485 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1486 rte_flow_error_set(error, EINVAL,
1487 RTE_FLOW_ERROR_TYPE_ACTION,
1488 act, "Not supported action.");
1495 /* search next no void pattern and skip fuzzy */
1497 const struct rte_flow_item *next_no_fuzzy_pattern(
1498 const struct rte_flow_item pattern[],
1499 const struct rte_flow_item *cur)
1501 const struct rte_flow_item *next =
1502 next_no_void_pattern(pattern, cur);
1504 if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
1506 next = next_no_void_pattern(pattern, next);
1510 static inline uint8_t signature_match(const struct rte_flow_item pattern[])
1512 const struct rte_flow_item_fuzzy *spec, *last, *mask;
1513 const struct rte_flow_item *item;
1514 uint32_t sh, lh, mh;
1519 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1522 if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
1554 * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1555 * And get the flow director filter info BTW.
1556 * UDP/TCP/SCTP PATTERN:
1557 * The first not void item can be ETH or IPV4 or IPV6
1558 * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1559 * The next not void item could be UDP or TCP or SCTP (optional)
1560 * The next not void item could be RAW (for flexbyte, optional)
1561 * The next not void item must be END.
1562 * A Fuzzy Match pattern can appear at any place before END.
1563 * Fuzzy Match is optional for IPV4 but is required for IPV6
1565 * The first not void item must be ETH.
1566 * The second not void item must be MAC VLAN.
1567 * The next not void item must be END.
1569 * The first not void action should be QUEUE or DROP.
1570 * The second not void optional action should be MARK,
1571 * mark_id is a uint32_t number.
1572 * The next not void action should be END.
1573 * UDP/TCP/SCTP pattern example:
1576 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
1577 * dst_addr 192.167.3.50 0xFFFFFFFF
1578 * UDP/TCP/SCTP src_port 80 0xFFFF
1579 * dst_port 80 0xFFFF
1580 * FLEX relative 0 0x1
1583 * offset 12 0xFFFFFFFF
1586 * pattern[0] 0x86 0xFF
1587 * pattern[1] 0xDD 0xFF
1589 * MAC VLAN pattern example:
1592 {0xAC, 0x7B, 0xA1, {0xFF, 0xFF, 0xFF,
1593 0x2C, 0x6D, 0x36} 0xFF, 0xFF, 0xFF}
1594 * MAC VLAN tci 0x2016 0xEFFF
1596 * Other members in mask and spec should set to 0x00.
1597 * Item->last should be NULL.
1600 ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
1601 const struct rte_flow_attr *attr,
1602 const struct rte_flow_item pattern[],
1603 const struct rte_flow_action actions[],
1604 struct ixgbe_fdir_rule *rule,
1605 struct rte_flow_error *error)
1607 const struct rte_flow_item *item;
1608 const struct rte_flow_item_eth *eth_spec;
1609 const struct rte_flow_item_eth *eth_mask;
1610 const struct rte_flow_item_ipv4 *ipv4_spec;
1611 const struct rte_flow_item_ipv4 *ipv4_mask;
1612 const struct rte_flow_item_ipv6 *ipv6_spec;
1613 const struct rte_flow_item_ipv6 *ipv6_mask;
1614 const struct rte_flow_item_tcp *tcp_spec;
1615 const struct rte_flow_item_tcp *tcp_mask;
1616 const struct rte_flow_item_udp *udp_spec;
1617 const struct rte_flow_item_udp *udp_mask;
1618 const struct rte_flow_item_sctp *sctp_spec;
1619 const struct rte_flow_item_sctp *sctp_mask;
1620 const struct rte_flow_item_vlan *vlan_spec;
1621 const struct rte_flow_item_vlan *vlan_mask;
1622 const struct rte_flow_item_raw *raw_mask;
1623 const struct rte_flow_item_raw *raw_spec;
1626 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1629 rte_flow_error_set(error, EINVAL,
1630 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1631 NULL, "NULL pattern.");
1636 rte_flow_error_set(error, EINVAL,
1637 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1638 NULL, "NULL action.");
1643 rte_flow_error_set(error, EINVAL,
1644 RTE_FLOW_ERROR_TYPE_ATTR,
1645 NULL, "NULL attribute.");
1650 * Some fields may not be provided. Set spec to 0 and mask to default
1651 * value. So, we need not do anything for the not provided fields later.
1653 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1654 memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1655 rule->mask.vlan_tci_mask = 0;
1656 rule->mask.flex_bytes_mask = 0;
1659 * The first not void item should be
1660 * MAC or IPv4 or TCP or UDP or SCTP.
1662 item = next_no_fuzzy_pattern(pattern, NULL);
1663 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1664 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1665 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1666 item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1667 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1668 item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1669 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1670 rte_flow_error_set(error, EINVAL,
1671 RTE_FLOW_ERROR_TYPE_ITEM,
1672 item, "Not supported by fdir filter");
1676 if (signature_match(pattern))
1677 rule->mode = RTE_FDIR_MODE_SIGNATURE;
1679 rule->mode = RTE_FDIR_MODE_PERFECT;
1681 /*Not supported last point for range*/
1683 rte_flow_error_set(error, EINVAL,
1684 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1685 item, "Not supported last point for range");
1689 /* Get the MAC info. */
1690 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1692 * Only support vlan and dst MAC address,
1693 * others should be masked.
1695 if (item->spec && !item->mask) {
1696 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1697 rte_flow_error_set(error, EINVAL,
1698 RTE_FLOW_ERROR_TYPE_ITEM,
1699 item, "Not supported by fdir filter");
1704 rule->b_spec = TRUE;
1705 eth_spec = item->spec;
1707 /* Get the dst MAC. */
1708 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
1709 rule->ixgbe_fdir.formatted.inner_mac[j] =
1710 eth_spec->dst.addr_bytes[j];
1717 rule->b_mask = TRUE;
1718 eth_mask = item->mask;
1720 /* Ether type should be masked. */
1721 if (eth_mask->type ||
1722 rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1723 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1724 rte_flow_error_set(error, EINVAL,
1725 RTE_FLOW_ERROR_TYPE_ITEM,
1726 item, "Not supported by fdir filter");
1730 /* If ethernet has meaning, it means MAC VLAN mode. */
1731 rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1734 * src MAC address must be masked,
1735 * and don't support dst MAC address mask.
1737 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
1738 if (eth_mask->src.addr_bytes[j] ||
1739 eth_mask->dst.addr_bytes[j] != 0xFF) {
1741 sizeof(struct ixgbe_fdir_rule));
1742 rte_flow_error_set(error, EINVAL,
1743 RTE_FLOW_ERROR_TYPE_ITEM,
1744 item, "Not supported by fdir filter");
1749 /* When no VLAN, considered as full mask. */
1750 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1752 /*** If both spec and mask are item,
1753 * it means don't care about ETH.
1758 * Check if the next not void item is vlan or ipv4.
1759 * IPv6 is not supported.
1761 item = next_no_fuzzy_pattern(pattern, item);
1762 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1763 if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1764 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1765 rte_flow_error_set(error, EINVAL,
1766 RTE_FLOW_ERROR_TYPE_ITEM,
1767 item, "Not supported by fdir filter");
1771 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1772 item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1773 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1774 rte_flow_error_set(error, EINVAL,
1775 RTE_FLOW_ERROR_TYPE_ITEM,
1776 item, "Not supported by fdir filter");
1782 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1783 if (!(item->spec && item->mask)) {
1784 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1785 rte_flow_error_set(error, EINVAL,
1786 RTE_FLOW_ERROR_TYPE_ITEM,
1787 item, "Not supported by fdir filter");
1791 /*Not supported last point for range*/
1793 rte_flow_error_set(error, EINVAL,
1794 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1795 item, "Not supported last point for range");
1799 vlan_spec = item->spec;
1800 vlan_mask = item->mask;
1802 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1804 rule->mask.vlan_tci_mask = vlan_mask->tci;
1805 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
1806 /* More than one tags are not supported. */
1808 /* Next not void item must be END */
1809 item = next_no_fuzzy_pattern(pattern, item);
1810 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1811 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1812 rte_flow_error_set(error, EINVAL,
1813 RTE_FLOW_ERROR_TYPE_ITEM,
1814 item, "Not supported by fdir filter");
1819 /* Get the IPV4 info. */
1820 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1822 * Set the flow type even if there's no content
1823 * as we must have a flow type.
1825 rule->ixgbe_fdir.formatted.flow_type =
1826 IXGBE_ATR_FLOW_TYPE_IPV4;
1827 /*Not supported last point for range*/
1829 rte_flow_error_set(error, EINVAL,
1830 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1831 item, "Not supported last point for range");
1835 * Only care about src & dst addresses,
1836 * others should be masked.
1839 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1840 rte_flow_error_set(error, EINVAL,
1841 RTE_FLOW_ERROR_TYPE_ITEM,
1842 item, "Not supported by fdir filter");
1845 rule->b_mask = TRUE;
1846 ipv4_mask = item->mask;
1847 if (ipv4_mask->hdr.version_ihl ||
1848 ipv4_mask->hdr.type_of_service ||
1849 ipv4_mask->hdr.total_length ||
1850 ipv4_mask->hdr.packet_id ||
1851 ipv4_mask->hdr.fragment_offset ||
1852 ipv4_mask->hdr.time_to_live ||
1853 ipv4_mask->hdr.next_proto_id ||
1854 ipv4_mask->hdr.hdr_checksum) {
1855 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1856 rte_flow_error_set(error, EINVAL,
1857 RTE_FLOW_ERROR_TYPE_ITEM,
1858 item, "Not supported by fdir filter");
1861 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1862 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1865 rule->b_spec = TRUE;
1866 ipv4_spec = item->spec;
1867 rule->ixgbe_fdir.formatted.dst_ip[0] =
1868 ipv4_spec->hdr.dst_addr;
1869 rule->ixgbe_fdir.formatted.src_ip[0] =
1870 ipv4_spec->hdr.src_addr;
1874 * Check if the next not void item is
1875 * TCP or UDP or SCTP or END.
1877 item = next_no_fuzzy_pattern(pattern, item);
1878 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1879 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1880 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1881 item->type != RTE_FLOW_ITEM_TYPE_END &&
1882 item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1883 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1884 rte_flow_error_set(error, EINVAL,
1885 RTE_FLOW_ERROR_TYPE_ITEM,
1886 item, "Not supported by fdir filter");
1891 /* Get the IPV6 info. */
1892 if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1894 * Set the flow type even if there's no content
1895 * as we must have a flow type.
1897 rule->ixgbe_fdir.formatted.flow_type =
1898 IXGBE_ATR_FLOW_TYPE_IPV6;
1901 * 1. must signature match
1902 * 2. not support last
1903 * 3. mask must not null
1905 if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
1908 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1909 rte_flow_error_set(error, EINVAL,
1910 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1911 item, "Not supported last point for range");
1915 rule->b_mask = TRUE;
1916 ipv6_mask = item->mask;
1917 if (ipv6_mask->hdr.vtc_flow ||
1918 ipv6_mask->hdr.payload_len ||
1919 ipv6_mask->hdr.proto ||
1920 ipv6_mask->hdr.hop_limits) {
1921 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1922 rte_flow_error_set(error, EINVAL,
1923 RTE_FLOW_ERROR_TYPE_ITEM,
1924 item, "Not supported by fdir filter");
1928 /* check src addr mask */
1929 for (j = 0; j < 16; j++) {
1930 if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
1931 rule->mask.src_ipv6_mask |= 1 << j;
1932 } else if (ipv6_mask->hdr.src_addr[j] != 0) {
1933 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1934 rte_flow_error_set(error, EINVAL,
1935 RTE_FLOW_ERROR_TYPE_ITEM,
1936 item, "Not supported by fdir filter");
1941 /* check dst addr mask */
1942 for (j = 0; j < 16; j++) {
1943 if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
1944 rule->mask.dst_ipv6_mask |= 1 << j;
1945 } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
1946 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1947 rte_flow_error_set(error, EINVAL,
1948 RTE_FLOW_ERROR_TYPE_ITEM,
1949 item, "Not supported by fdir filter");
1955 rule->b_spec = TRUE;
1956 ipv6_spec = item->spec;
1957 rte_memcpy(rule->ixgbe_fdir.formatted.src_ip,
1958 ipv6_spec->hdr.src_addr, 16);
1959 rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip,
1960 ipv6_spec->hdr.dst_addr, 16);
1964 * Check if the next not void item is
1965 * TCP or UDP or SCTP or END.
1967 item = next_no_fuzzy_pattern(pattern, item);
1968 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1969 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1970 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1971 item->type != RTE_FLOW_ITEM_TYPE_END &&
1972 item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1973 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1974 rte_flow_error_set(error, EINVAL,
1975 RTE_FLOW_ERROR_TYPE_ITEM,
1976 item, "Not supported by fdir filter");
1981 /* Get the TCP info. */
1982 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1984 * Set the flow type even if there's no content
1985 * as we must have a flow type.
1987 rule->ixgbe_fdir.formatted.flow_type |=
1988 IXGBE_ATR_L4TYPE_TCP;
1989 /*Not supported last point for range*/
1991 rte_flow_error_set(error, EINVAL,
1992 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1993 item, "Not supported last point for range");
1997 * Only care about src & dst ports,
1998 * others should be masked.
2001 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2002 rte_flow_error_set(error, EINVAL,
2003 RTE_FLOW_ERROR_TYPE_ITEM,
2004 item, "Not supported by fdir filter");
2007 rule->b_mask = TRUE;
2008 tcp_mask = item->mask;
2009 if (tcp_mask->hdr.sent_seq ||
2010 tcp_mask->hdr.recv_ack ||
2011 tcp_mask->hdr.data_off ||
2012 tcp_mask->hdr.tcp_flags ||
2013 tcp_mask->hdr.rx_win ||
2014 tcp_mask->hdr.cksum ||
2015 tcp_mask->hdr.tcp_urp) {
2016 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2017 rte_flow_error_set(error, EINVAL,
2018 RTE_FLOW_ERROR_TYPE_ITEM,
2019 item, "Not supported by fdir filter");
2022 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
2023 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
2026 rule->b_spec = TRUE;
2027 tcp_spec = item->spec;
2028 rule->ixgbe_fdir.formatted.src_port =
2029 tcp_spec->hdr.src_port;
2030 rule->ixgbe_fdir.formatted.dst_port =
2031 tcp_spec->hdr.dst_port;
2034 item = next_no_fuzzy_pattern(pattern, item);
2035 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
2036 item->type != RTE_FLOW_ITEM_TYPE_END) {
2037 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2038 rte_flow_error_set(error, EINVAL,
2039 RTE_FLOW_ERROR_TYPE_ITEM,
2040 item, "Not supported by fdir filter");
2046 /* Get the UDP info */
2047 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2049 * Set the flow type even if there's no content
2050 * as we must have a flow type.
2052 rule->ixgbe_fdir.formatted.flow_type |=
2053 IXGBE_ATR_L4TYPE_UDP;
2054 /*Not supported last point for range*/
2056 rte_flow_error_set(error, EINVAL,
2057 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2058 item, "Not supported last point for range");
2062 * Only care about src & dst ports,
2063 * others should be masked.
2066 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2067 rte_flow_error_set(error, EINVAL,
2068 RTE_FLOW_ERROR_TYPE_ITEM,
2069 item, "Not supported by fdir filter");
2072 rule->b_mask = TRUE;
2073 udp_mask = item->mask;
2074 if (udp_mask->hdr.dgram_len ||
2075 udp_mask->hdr.dgram_cksum) {
2076 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2077 rte_flow_error_set(error, EINVAL,
2078 RTE_FLOW_ERROR_TYPE_ITEM,
2079 item, "Not supported by fdir filter");
2082 rule->mask.src_port_mask = udp_mask->hdr.src_port;
2083 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
2086 rule->b_spec = TRUE;
2087 udp_spec = item->spec;
2088 rule->ixgbe_fdir.formatted.src_port =
2089 udp_spec->hdr.src_port;
2090 rule->ixgbe_fdir.formatted.dst_port =
2091 udp_spec->hdr.dst_port;
2094 item = next_no_fuzzy_pattern(pattern, item);
2095 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
2096 item->type != RTE_FLOW_ITEM_TYPE_END) {
2097 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2098 rte_flow_error_set(error, EINVAL,
2099 RTE_FLOW_ERROR_TYPE_ITEM,
2100 item, "Not supported by fdir filter");
2106 /* Get the SCTP info */
2107 if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
2109 * Set the flow type even if there's no content
2110 * as we must have a flow type.
2112 rule->ixgbe_fdir.formatted.flow_type |=
2113 IXGBE_ATR_L4TYPE_SCTP;
2114 /*Not supported last point for range*/
2116 rte_flow_error_set(error, EINVAL,
2117 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2118 item, "Not supported last point for range");
2122 /* only x550 family only support sctp port */
2123 if (hw->mac.type == ixgbe_mac_X550 ||
2124 hw->mac.type == ixgbe_mac_X550EM_x ||
2125 hw->mac.type == ixgbe_mac_X550EM_a) {
2127 * Only care about src & dst ports,
2128 * others should be masked.
2131 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2132 rte_flow_error_set(error, EINVAL,
2133 RTE_FLOW_ERROR_TYPE_ITEM,
2134 item, "Not supported by fdir filter");
2137 rule->b_mask = TRUE;
2138 sctp_mask = item->mask;
2139 if (sctp_mask->hdr.tag ||
2140 sctp_mask->hdr.cksum) {
2141 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2142 rte_flow_error_set(error, EINVAL,
2143 RTE_FLOW_ERROR_TYPE_ITEM,
2144 item, "Not supported by fdir filter");
2147 rule->mask.src_port_mask = sctp_mask->hdr.src_port;
2148 rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
2151 rule->b_spec = TRUE;
2152 sctp_spec = item->spec;
2153 rule->ixgbe_fdir.formatted.src_port =
2154 sctp_spec->hdr.src_port;
2155 rule->ixgbe_fdir.formatted.dst_port =
2156 sctp_spec->hdr.dst_port;
2158 /* others even sctp port is not supported */
2160 sctp_mask = item->mask;
2162 (sctp_mask->hdr.src_port ||
2163 sctp_mask->hdr.dst_port ||
2164 sctp_mask->hdr.tag ||
2165 sctp_mask->hdr.cksum)) {
2166 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2167 rte_flow_error_set(error, EINVAL,
2168 RTE_FLOW_ERROR_TYPE_ITEM,
2169 item, "Not supported by fdir filter");
2174 item = next_no_fuzzy_pattern(pattern, item);
2175 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
2176 item->type != RTE_FLOW_ITEM_TYPE_END) {
2177 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2178 rte_flow_error_set(error, EINVAL,
2179 RTE_FLOW_ERROR_TYPE_ITEM,
2180 item, "Not supported by fdir filter");
2185 /* Get the flex byte info */
2186 if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
2187 /* Not supported last point for range*/
2189 rte_flow_error_set(error, EINVAL,
2190 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2191 item, "Not supported last point for range");
2194 /* mask should not be null */
2195 if (!item->mask || !item->spec) {
2196 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2197 rte_flow_error_set(error, EINVAL,
2198 RTE_FLOW_ERROR_TYPE_ITEM,
2199 item, "Not supported by fdir filter");
2203 raw_mask = item->mask;
2206 if (raw_mask->relative != 0x1 ||
2207 raw_mask->search != 0x1 ||
2208 raw_mask->reserved != 0x0 ||
2209 (uint32_t)raw_mask->offset != 0xffffffff ||
2210 raw_mask->limit != 0xffff ||
2211 raw_mask->length != 0xffff) {
2212 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2213 rte_flow_error_set(error, EINVAL,
2214 RTE_FLOW_ERROR_TYPE_ITEM,
2215 item, "Not supported by fdir filter");
2219 raw_spec = item->spec;
2222 if (raw_spec->relative != 0 ||
2223 raw_spec->search != 0 ||
2224 raw_spec->reserved != 0 ||
2225 raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
2226 raw_spec->offset % 2 ||
2227 raw_spec->limit != 0 ||
2228 raw_spec->length != 2 ||
2229 /* pattern can't be 0xffff */
2230 (raw_spec->pattern[0] == 0xff &&
2231 raw_spec->pattern[1] == 0xff)) {
2232 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2233 rte_flow_error_set(error, EINVAL,
2234 RTE_FLOW_ERROR_TYPE_ITEM,
2235 item, "Not supported by fdir filter");
2239 /* check pattern mask */
2240 if (raw_mask->pattern[0] != 0xff ||
2241 raw_mask->pattern[1] != 0xff) {
2242 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2243 rte_flow_error_set(error, EINVAL,
2244 RTE_FLOW_ERROR_TYPE_ITEM,
2245 item, "Not supported by fdir filter");
2249 rule->mask.flex_bytes_mask = 0xffff;
2250 rule->ixgbe_fdir.formatted.flex_bytes =
2251 (((uint16_t)raw_spec->pattern[1]) << 8) |
2252 raw_spec->pattern[0];
2253 rule->flex_bytes_offset = raw_spec->offset;
2256 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2257 /* check if the next not void item is END */
2258 item = next_no_fuzzy_pattern(pattern, item);
2259 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2260 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2261 rte_flow_error_set(error, EINVAL,
2262 RTE_FLOW_ERROR_TYPE_ITEM,
2263 item, "Not supported by fdir filter");
2268 return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2271 #define NVGRE_PROTOCOL 0x6558
2274 * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
2275 * And get the flow director filter info BTW.
2277 * The first not void item must be ETH.
2278 * The second not void item must be IPV4/ IPV6.
2279 * The third not void item must be NVGRE.
2280 * The next not void item must be END.
2282 * The first not void item must be ETH.
2283 * The second not void item must be IPV4/ IPV6.
2284 * The third not void item must be NVGRE.
2285 * The next not void item must be END.
2287 * The first not void action should be QUEUE or DROP.
2288 * The second not void optional action should be MARK,
2289 * mark_id is a uint32_t number.
2290 * The next not void action should be END.
2291 * VxLAN pattern example:
2294 * IPV4/IPV6 NULL NULL
2296 * VxLAN vni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
2297 * MAC VLAN tci 0x2016 0xEFFF
2299 * NEGRV pattern example:
2302 * IPV4/IPV6 NULL NULL
2303 * NVGRE protocol 0x6558 0xFFFF
2304 * tni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
2305 * MAC VLAN tci 0x2016 0xEFFF
2307 * other members in mask and spec should set to 0x00.
2308 * item->last should be NULL.
2311 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
2312 const struct rte_flow_item pattern[],
2313 const struct rte_flow_action actions[],
2314 struct ixgbe_fdir_rule *rule,
2315 struct rte_flow_error *error)
2317 const struct rte_flow_item *item;
2318 const struct rte_flow_item_vxlan *vxlan_spec;
2319 const struct rte_flow_item_vxlan *vxlan_mask;
2320 const struct rte_flow_item_nvgre *nvgre_spec;
2321 const struct rte_flow_item_nvgre *nvgre_mask;
2322 const struct rte_flow_item_eth *eth_spec;
2323 const struct rte_flow_item_eth *eth_mask;
2324 const struct rte_flow_item_vlan *vlan_spec;
2325 const struct rte_flow_item_vlan *vlan_mask;
2329 rte_flow_error_set(error, EINVAL,
2330 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2331 NULL, "NULL pattern.");
2336 rte_flow_error_set(error, EINVAL,
2337 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2338 NULL, "NULL action.");
2343 rte_flow_error_set(error, EINVAL,
2344 RTE_FLOW_ERROR_TYPE_ATTR,
2345 NULL, "NULL attribute.");
2350 * Some fields may not be provided. Set spec to 0 and mask to default
2351 * value. So, we need not do anything for the not provided fields later.
2353 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2354 memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
2355 rule->mask.vlan_tci_mask = 0;
2358 * The first not void item should be
2359 * MAC or IPv4 or IPv6 or UDP or VxLAN.
2361 item = next_no_void_pattern(pattern, NULL);
2362 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
2363 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2364 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
2365 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2366 item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
2367 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2368 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2369 rte_flow_error_set(error, EINVAL,
2370 RTE_FLOW_ERROR_TYPE_ITEM,
2371 item, "Not supported by fdir filter");
2375 rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
2378 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
2379 /* Only used to describe the protocol stack. */
2380 if (item->spec || item->mask) {
2381 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2382 rte_flow_error_set(error, EINVAL,
2383 RTE_FLOW_ERROR_TYPE_ITEM,
2384 item, "Not supported by fdir filter");
2387 /* Not supported last point for range*/
2389 rte_flow_error_set(error, EINVAL,
2390 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2391 item, "Not supported last point for range");
2395 /* Check if the next not void item is IPv4 or IPv6. */
2396 item = next_no_void_pattern(pattern, item);
2397 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2398 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2399 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2400 rte_flow_error_set(error, EINVAL,
2401 RTE_FLOW_ERROR_TYPE_ITEM,
2402 item, "Not supported by fdir filter");
2408 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2409 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2410 /* Only used to describe the protocol stack. */
2411 if (item->spec || item->mask) {
2412 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2413 rte_flow_error_set(error, EINVAL,
2414 RTE_FLOW_ERROR_TYPE_ITEM,
2415 item, "Not supported by fdir filter");
2418 /*Not supported last point for range*/
2420 rte_flow_error_set(error, EINVAL,
2421 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2422 item, "Not supported last point for range");
2426 /* Check if the next not void item is UDP or NVGRE. */
2427 item = next_no_void_pattern(pattern, item);
2428 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2429 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2430 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2431 rte_flow_error_set(error, EINVAL,
2432 RTE_FLOW_ERROR_TYPE_ITEM,
2433 item, "Not supported by fdir filter");
2439 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2440 /* Only used to describe the protocol stack. */
2441 if (item->spec || item->mask) {
2442 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2443 rte_flow_error_set(error, EINVAL,
2444 RTE_FLOW_ERROR_TYPE_ITEM,
2445 item, "Not supported by fdir filter");
2448 /*Not supported last point for range*/
2450 rte_flow_error_set(error, EINVAL,
2451 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2452 item, "Not supported last point for range");
2456 /* Check if the next not void item is VxLAN. */
2457 item = next_no_void_pattern(pattern, item);
2458 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2459 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2460 rte_flow_error_set(error, EINVAL,
2461 RTE_FLOW_ERROR_TYPE_ITEM,
2462 item, "Not supported by fdir filter");
2467 /* Get the VxLAN info */
2468 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2469 rule->ixgbe_fdir.formatted.tunnel_type =
2470 IXGBE_FDIR_VXLAN_TUNNEL_TYPE;
2472 /* Only care about VNI, others should be masked. */
2474 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2475 rte_flow_error_set(error, EINVAL,
2476 RTE_FLOW_ERROR_TYPE_ITEM,
2477 item, "Not supported by fdir filter");
2480 /*Not supported last point for range*/
2482 rte_flow_error_set(error, EINVAL,
2483 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2484 item, "Not supported last point for range");
2487 rule->b_mask = TRUE;
2489 /* Tunnel type is always meaningful. */
2490 rule->mask.tunnel_type_mask = 1;
2492 vxlan_mask = item->mask;
2493 if (vxlan_mask->flags) {
2494 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2495 rte_flow_error_set(error, EINVAL,
2496 RTE_FLOW_ERROR_TYPE_ITEM,
2497 item, "Not supported by fdir filter");
2500 /* VNI must be totally masked or not. */
2501 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2502 vxlan_mask->vni[2]) &&
2503 ((vxlan_mask->vni[0] != 0xFF) ||
2504 (vxlan_mask->vni[1] != 0xFF) ||
2505 (vxlan_mask->vni[2] != 0xFF))) {
2506 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2507 rte_flow_error_set(error, EINVAL,
2508 RTE_FLOW_ERROR_TYPE_ITEM,
2509 item, "Not supported by fdir filter");
2513 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2514 RTE_DIM(vxlan_mask->vni));
2517 rule->b_spec = TRUE;
2518 vxlan_spec = item->spec;
2519 rte_memcpy(((uint8_t *)
2520 &rule->ixgbe_fdir.formatted.tni_vni),
2521 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2525 /* Get the NVGRE info */
2526 if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2527 rule->ixgbe_fdir.formatted.tunnel_type =
2528 IXGBE_FDIR_NVGRE_TUNNEL_TYPE;
2531 * Only care about flags0, flags1, protocol and TNI,
2532 * others should be masked.
2535 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2536 rte_flow_error_set(error, EINVAL,
2537 RTE_FLOW_ERROR_TYPE_ITEM,
2538 item, "Not supported by fdir filter");
2541 /*Not supported last point for range*/
2543 rte_flow_error_set(error, EINVAL,
2544 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2545 item, "Not supported last point for range");
2548 rule->b_mask = TRUE;
2550 /* Tunnel type is always meaningful. */
2551 rule->mask.tunnel_type_mask = 1;
2553 nvgre_mask = item->mask;
2554 if (nvgre_mask->flow_id) {
2555 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2556 rte_flow_error_set(error, EINVAL,
2557 RTE_FLOW_ERROR_TYPE_ITEM,
2558 item, "Not supported by fdir filter");
2561 if (nvgre_mask->protocol &&
2562 nvgre_mask->protocol != 0xFFFF) {
2563 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2564 rte_flow_error_set(error, EINVAL,
2565 RTE_FLOW_ERROR_TYPE_ITEM,
2566 item, "Not supported by fdir filter");
2569 if (nvgre_mask->c_k_s_rsvd0_ver &&
2570 nvgre_mask->c_k_s_rsvd0_ver !=
2571 rte_cpu_to_be_16(0xFFFF)) {
2572 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2573 rte_flow_error_set(error, EINVAL,
2574 RTE_FLOW_ERROR_TYPE_ITEM,
2575 item, "Not supported by fdir filter");
2578 /* TNI must be totally masked or not. */
2579 if (nvgre_mask->tni[0] &&
2580 ((nvgre_mask->tni[0] != 0xFF) ||
2581 (nvgre_mask->tni[1] != 0xFF) ||
2582 (nvgre_mask->tni[2] != 0xFF))) {
2583 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2584 rte_flow_error_set(error, EINVAL,
2585 RTE_FLOW_ERROR_TYPE_ITEM,
2586 item, "Not supported by fdir filter");
2589 /* tni is a 24-bits bit field */
2590 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2591 RTE_DIM(nvgre_mask->tni));
2592 rule->mask.tunnel_id_mask <<= 8;
2595 rule->b_spec = TRUE;
2596 nvgre_spec = item->spec;
2597 if (nvgre_spec->c_k_s_rsvd0_ver !=
2598 rte_cpu_to_be_16(0x2000) &&
2599 nvgre_mask->c_k_s_rsvd0_ver) {
2600 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2601 rte_flow_error_set(error, EINVAL,
2602 RTE_FLOW_ERROR_TYPE_ITEM,
2603 item, "Not supported by fdir filter");
2606 if (nvgre_mask->protocol &&
2607 nvgre_spec->protocol !=
2608 rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2609 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2610 rte_flow_error_set(error, EINVAL,
2611 RTE_FLOW_ERROR_TYPE_ITEM,
2612 item, "Not supported by fdir filter");
2615 /* tni is a 24-bits bit field */
2616 rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2617 nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2621 /* check if the next not void item is MAC */
2622 item = next_no_void_pattern(pattern, item);
2623 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2624 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2625 rte_flow_error_set(error, EINVAL,
2626 RTE_FLOW_ERROR_TYPE_ITEM,
2627 item, "Not supported by fdir filter");
2632 * Only support vlan and dst MAC address,
2633 * others should be masked.
2637 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2638 rte_flow_error_set(error, EINVAL,
2639 RTE_FLOW_ERROR_TYPE_ITEM,
2640 item, "Not supported by fdir filter");
2643 /*Not supported last point for range*/
2645 rte_flow_error_set(error, EINVAL,
2646 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2647 item, "Not supported last point for range");
2650 rule->b_mask = TRUE;
2651 eth_mask = item->mask;
2653 /* Ether type should be masked. */
2654 if (eth_mask->type) {
2655 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2656 rte_flow_error_set(error, EINVAL,
2657 RTE_FLOW_ERROR_TYPE_ITEM,
2658 item, "Not supported by fdir filter");
2662 /* src MAC address should be masked. */
2663 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
2664 if (eth_mask->src.addr_bytes[j]) {
2666 sizeof(struct ixgbe_fdir_rule));
2667 rte_flow_error_set(error, EINVAL,
2668 RTE_FLOW_ERROR_TYPE_ITEM,
2669 item, "Not supported by fdir filter");
2673 rule->mask.mac_addr_byte_mask = 0;
2674 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
2675 /* It's a per byte mask. */
2676 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2677 rule->mask.mac_addr_byte_mask |= 0x1 << j;
2678 } else if (eth_mask->dst.addr_bytes[j]) {
2679 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2680 rte_flow_error_set(error, EINVAL,
2681 RTE_FLOW_ERROR_TYPE_ITEM,
2682 item, "Not supported by fdir filter");
2687 /* When no vlan, considered as full mask. */
2688 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2691 rule->b_spec = TRUE;
2692 eth_spec = item->spec;
2694 /* Get the dst MAC. */
2695 for (j = 0; j < RTE_ETHER_ADDR_LEN; j++) {
2696 rule->ixgbe_fdir.formatted.inner_mac[j] =
2697 eth_spec->dst.addr_bytes[j];
2702 * Check if the next not void item is vlan or ipv4.
2703 * IPv6 is not supported.
2705 item = next_no_void_pattern(pattern, item);
2706 if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2707 (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
2708 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2709 rte_flow_error_set(error, EINVAL,
2710 RTE_FLOW_ERROR_TYPE_ITEM,
2711 item, "Not supported by fdir filter");
2714 /*Not supported last point for range*/
2716 rte_flow_error_set(error, EINVAL,
2717 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2718 item, "Not supported last point for range");
2722 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2723 if (!(item->spec && item->mask)) {
2724 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2725 rte_flow_error_set(error, EINVAL,
2726 RTE_FLOW_ERROR_TYPE_ITEM,
2727 item, "Not supported by fdir filter");
2731 vlan_spec = item->spec;
2732 vlan_mask = item->mask;
2734 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2736 rule->mask.vlan_tci_mask = vlan_mask->tci;
2737 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
2738 /* More than one tags are not supported. */
2740 /* check if the next not void item is END */
2741 item = next_no_void_pattern(pattern, item);
2743 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2744 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2745 rte_flow_error_set(error, EINVAL,
2746 RTE_FLOW_ERROR_TYPE_ITEM,
2747 item, "Not supported by fdir filter");
2753 * If the tags is 0, it means don't care about the VLAN.
2757 return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2761 ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2762 const struct rte_flow_attr *attr,
2763 const struct rte_flow_item pattern[],
2764 const struct rte_flow_action actions[],
2765 struct ixgbe_fdir_rule *rule,
2766 struct rte_flow_error *error)
2769 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2770 enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2772 if (hw->mac.type != ixgbe_mac_82599EB &&
2773 hw->mac.type != ixgbe_mac_X540 &&
2774 hw->mac.type != ixgbe_mac_X550 &&
2775 hw->mac.type != ixgbe_mac_X550EM_x &&
2776 hw->mac.type != ixgbe_mac_X550EM_a)
2779 ret = ixgbe_parse_fdir_filter_normal(dev, attr, pattern,
2780 actions, rule, error);
2785 ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2786 actions, rule, error);
2793 if (hw->mac.type == ixgbe_mac_82599EB &&
2794 rule->fdirflags == IXGBE_FDIRCMD_DROP &&
2795 (rule->ixgbe_fdir.formatted.src_port != 0 ||
2796 rule->ixgbe_fdir.formatted.dst_port != 0))
2799 if (fdir_mode == RTE_FDIR_MODE_NONE ||
2800 fdir_mode != rule->mode)
2803 if (rule->queue >= dev->data->nb_rx_queues)
2810 ixgbe_parse_rss_filter(struct rte_eth_dev *dev,
2811 const struct rte_flow_attr *attr,
2812 const struct rte_flow_action actions[],
2813 struct ixgbe_rte_flow_rss_conf *rss_conf,
2814 struct rte_flow_error *error)
2816 const struct rte_flow_action *act;
2817 const struct rte_flow_action_rss *rss;
2821 * rss only supports forwarding,
2822 * check if the first not void action is RSS.
2824 act = next_no_void_action(actions, NULL);
2825 if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
2826 memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
2827 rte_flow_error_set(error, EINVAL,
2828 RTE_FLOW_ERROR_TYPE_ACTION,
2829 act, "Not supported action.");
2833 rss = (const struct rte_flow_action_rss *)act->conf;
2835 if (!rss || !rss->queue_num) {
2836 rte_flow_error_set(error, EINVAL,
2837 RTE_FLOW_ERROR_TYPE_ACTION,
2843 for (n = 0; n < rss->queue_num; n++) {
2844 if (rss->queue[n] >= dev->data->nb_rx_queues) {
2845 rte_flow_error_set(error, EINVAL,
2846 RTE_FLOW_ERROR_TYPE_ACTION,
2848 "queue id > max number of queues");
2853 if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
2854 return rte_flow_error_set
2855 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2856 "non-default RSS hash functions are not supported");
2858 return rte_flow_error_set
2859 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2860 "a nonzero RSS encapsulation level is not supported");
2861 if (rss->key_len && rss->key_len != RTE_DIM(rss_conf->key))
2862 return rte_flow_error_set
2863 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2864 "RSS hash key must be exactly 40 bytes");
2865 if (rss->queue_num > RTE_DIM(rss_conf->queue))
2866 return rte_flow_error_set
2867 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
2868 "too many queues for RSS context");
2869 if (ixgbe_rss_conf_init(rss_conf, rss))
2870 return rte_flow_error_set
2871 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
2872 "RSS context initialization failure");
2874 /* check if the next not void item is END */
2875 act = next_no_void_action(actions, act);
2876 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
2877 memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
2878 rte_flow_error_set(error, EINVAL,
2879 RTE_FLOW_ERROR_TYPE_ACTION,
2880 act, "Not supported action.");
2885 /* must be input direction */
2886 if (!attr->ingress) {
2887 memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
2888 rte_flow_error_set(error, EINVAL,
2889 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
2890 attr, "Only support ingress.");
2896 memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
2897 rte_flow_error_set(error, EINVAL,
2898 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
2899 attr, "Not support egress.");
2904 if (attr->transfer) {
2905 memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
2906 rte_flow_error_set(error, EINVAL,
2907 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
2908 attr, "No support for transfer.");
2912 if (attr->priority > 0xFFFF) {
2913 memset(rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
2914 rte_flow_error_set(error, EINVAL,
2915 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
2916 attr, "Error priority.");
2923 /* remove the rss filter */
2925 ixgbe_clear_rss_filter(struct rte_eth_dev *dev)
2927 struct ixgbe_filter_info *filter_info =
2928 IXGBE_DEV_PRIVATE_TO_FILTER_INFO(dev->data->dev_private);
2930 if (filter_info->rss_info.conf.queue_num)
2931 ixgbe_config_rss_filter(dev, &filter_info->rss_info, FALSE);
2935 ixgbe_filterlist_init(void)
2937 TAILQ_INIT(&filter_ntuple_list);
2938 TAILQ_INIT(&filter_ethertype_list);
2939 TAILQ_INIT(&filter_syn_list);
2940 TAILQ_INIT(&filter_fdir_list);
2941 TAILQ_INIT(&filter_l2_tunnel_list);
2942 TAILQ_INIT(&filter_rss_list);
2943 TAILQ_INIT(&ixgbe_flow_list);
2947 ixgbe_filterlist_flush(void)
2949 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2950 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2951 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2952 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2953 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2954 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2955 struct ixgbe_rss_conf_ele *rss_filter_ptr;
2957 while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2958 TAILQ_REMOVE(&filter_ntuple_list,
2961 rte_free(ntuple_filter_ptr);
2964 while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2965 TAILQ_REMOVE(&filter_ethertype_list,
2966 ethertype_filter_ptr,
2968 rte_free(ethertype_filter_ptr);
2971 while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2972 TAILQ_REMOVE(&filter_syn_list,
2975 rte_free(syn_filter_ptr);
2978 while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2979 TAILQ_REMOVE(&filter_l2_tunnel_list,
2982 rte_free(l2_tn_filter_ptr);
2985 while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2986 TAILQ_REMOVE(&filter_fdir_list,
2989 rte_free(fdir_rule_ptr);
2992 while ((rss_filter_ptr = TAILQ_FIRST(&filter_rss_list))) {
2993 TAILQ_REMOVE(&filter_rss_list,
2996 rte_free(rss_filter_ptr);
2999 while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
3000 TAILQ_REMOVE(&ixgbe_flow_list,
3003 rte_free(ixgbe_flow_mem_ptr->flow);
3004 rte_free(ixgbe_flow_mem_ptr);
3009 * Create or destroy a flow rule.
3010 * Theorically one rule can match more than one filters.
3011 * We will let it use the filter which it hitt first.
3012 * So, the sequence matters.
3014 static struct rte_flow *
3015 ixgbe_flow_create(struct rte_eth_dev *dev,
3016 const struct rte_flow_attr *attr,
3017 const struct rte_flow_item pattern[],
3018 const struct rte_flow_action actions[],
3019 struct rte_flow_error *error)
3022 struct rte_eth_ntuple_filter ntuple_filter;
3023 struct rte_eth_ethertype_filter ethertype_filter;
3024 struct rte_eth_syn_filter syn_filter;
3025 struct ixgbe_fdir_rule fdir_rule;
3026 struct rte_eth_l2_tunnel_conf l2_tn_filter;
3027 struct ixgbe_hw_fdir_info *fdir_info =
3028 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
3029 struct ixgbe_rte_flow_rss_conf rss_conf;
3030 struct rte_flow *flow = NULL;
3031 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
3032 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
3033 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
3034 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
3035 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
3036 struct ixgbe_rss_conf_ele *rss_filter_ptr;
3037 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
3038 uint8_t first_mask = FALSE;
3040 flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
3042 PMD_DRV_LOG(ERR, "failed to allocate memory");
3043 return (struct rte_flow *)flow;
3045 ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
3046 sizeof(struct ixgbe_flow_mem), 0);
3047 if (!ixgbe_flow_mem_ptr) {
3048 PMD_DRV_LOG(ERR, "failed to allocate memory");
3052 ixgbe_flow_mem_ptr->flow = flow;
3053 TAILQ_INSERT_TAIL(&ixgbe_flow_list,
3054 ixgbe_flow_mem_ptr, entries);
3056 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
3057 ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
3058 actions, &ntuple_filter, error);
3060 #ifdef RTE_LIBRTE_SECURITY
3061 /* ESP flow not really a flow*/
3062 if (ntuple_filter.proto == IPPROTO_ESP)
3067 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
3069 ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
3070 sizeof(struct ixgbe_ntuple_filter_ele), 0);
3071 if (!ntuple_filter_ptr) {
3072 PMD_DRV_LOG(ERR, "failed to allocate memory");
3075 rte_memcpy(&ntuple_filter_ptr->filter_info,
3077 sizeof(struct rte_eth_ntuple_filter));
3078 TAILQ_INSERT_TAIL(&filter_ntuple_list,
3079 ntuple_filter_ptr, entries);
3080 flow->rule = ntuple_filter_ptr;
3081 flow->filter_type = RTE_ETH_FILTER_NTUPLE;
3087 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
3088 ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
3089 actions, ðertype_filter, error);
3091 ret = ixgbe_add_del_ethertype_filter(dev,
3092 ðertype_filter, TRUE);
3094 ethertype_filter_ptr = rte_zmalloc(
3095 "ixgbe_ethertype_filter",
3096 sizeof(struct ixgbe_ethertype_filter_ele), 0);
3097 if (!ethertype_filter_ptr) {
3098 PMD_DRV_LOG(ERR, "failed to allocate memory");
3101 rte_memcpy(ðertype_filter_ptr->filter_info,
3103 sizeof(struct rte_eth_ethertype_filter));
3104 TAILQ_INSERT_TAIL(&filter_ethertype_list,
3105 ethertype_filter_ptr, entries);
3106 flow->rule = ethertype_filter_ptr;
3107 flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
3113 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
3114 ret = ixgbe_parse_syn_filter(dev, attr, pattern,
3115 actions, &syn_filter, error);
3117 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
3119 syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
3120 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
3121 if (!syn_filter_ptr) {
3122 PMD_DRV_LOG(ERR, "failed to allocate memory");
3125 rte_memcpy(&syn_filter_ptr->filter_info,
3127 sizeof(struct rte_eth_syn_filter));
3128 TAILQ_INSERT_TAIL(&filter_syn_list,
3131 flow->rule = syn_filter_ptr;
3132 flow->filter_type = RTE_ETH_FILTER_SYN;
3138 memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
3139 ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
3140 actions, &fdir_rule, error);
3142 /* A mask cannot be deleted. */
3143 if (fdir_rule.b_mask) {
3144 if (!fdir_info->mask_added) {
3145 /* It's the first time the mask is set. */
3146 rte_memcpy(&fdir_info->mask,
3148 sizeof(struct ixgbe_hw_fdir_mask));
3149 fdir_info->flex_bytes_offset =
3150 fdir_rule.flex_bytes_offset;
3152 if (fdir_rule.mask.flex_bytes_mask)
3153 ixgbe_fdir_set_flexbytes_offset(dev,
3154 fdir_rule.flex_bytes_offset);
3156 ret = ixgbe_fdir_set_input_mask(dev);
3160 fdir_info->mask_added = TRUE;
3164 * Only support one global mask,
3165 * all the masks should be the same.
3167 ret = memcmp(&fdir_info->mask,
3169 sizeof(struct ixgbe_hw_fdir_mask));
3173 if (fdir_info->flex_bytes_offset !=
3174 fdir_rule.flex_bytes_offset)
3179 if (fdir_rule.b_spec) {
3180 ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
3183 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
3184 sizeof(struct ixgbe_fdir_rule_ele), 0);
3185 if (!fdir_rule_ptr) {
3186 PMD_DRV_LOG(ERR, "failed to allocate memory");
3189 rte_memcpy(&fdir_rule_ptr->filter_info,
3191 sizeof(struct ixgbe_fdir_rule));
3192 TAILQ_INSERT_TAIL(&filter_fdir_list,
3193 fdir_rule_ptr, entries);
3194 flow->rule = fdir_rule_ptr;
3195 flow->filter_type = RTE_ETH_FILTER_FDIR;
3202 * clean the mask_added flag if fail to
3206 fdir_info->mask_added = FALSE;
3214 memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
3215 ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
3216 actions, &l2_tn_filter, error);
3218 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
3220 l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
3221 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
3222 if (!l2_tn_filter_ptr) {
3223 PMD_DRV_LOG(ERR, "failed to allocate memory");
3226 rte_memcpy(&l2_tn_filter_ptr->filter_info,
3228 sizeof(struct rte_eth_l2_tunnel_conf));
3229 TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
3230 l2_tn_filter_ptr, entries);
3231 flow->rule = l2_tn_filter_ptr;
3232 flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
3237 memset(&rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
3238 ret = ixgbe_parse_rss_filter(dev, attr,
3239 actions, &rss_conf, error);
3241 ret = ixgbe_config_rss_filter(dev, &rss_conf, TRUE);
3243 rss_filter_ptr = rte_zmalloc("ixgbe_rss_filter",
3244 sizeof(struct ixgbe_rss_conf_ele), 0);
3245 if (!rss_filter_ptr) {
3246 PMD_DRV_LOG(ERR, "failed to allocate memory");
3249 ixgbe_rss_conf_init(&rss_filter_ptr->filter_info,
3251 TAILQ_INSERT_TAIL(&filter_rss_list,
3252 rss_filter_ptr, entries);
3253 flow->rule = rss_filter_ptr;
3254 flow->filter_type = RTE_ETH_FILTER_HASH;
3260 TAILQ_REMOVE(&ixgbe_flow_list,
3261 ixgbe_flow_mem_ptr, entries);
3262 rte_flow_error_set(error, -ret,
3263 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
3264 "Failed to create flow.");
3265 rte_free(ixgbe_flow_mem_ptr);
3271 * Check if the flow rule is supported by ixgbe.
3272 * It only checkes the format. Don't guarantee the rule can be programmed into
3273 * the HW. Because there can be no enough room for the rule.
3276 ixgbe_flow_validate(struct rte_eth_dev *dev,
3277 const struct rte_flow_attr *attr,
3278 const struct rte_flow_item pattern[],
3279 const struct rte_flow_action actions[],
3280 struct rte_flow_error *error)
3282 struct rte_eth_ntuple_filter ntuple_filter;
3283 struct rte_eth_ethertype_filter ethertype_filter;
3284 struct rte_eth_syn_filter syn_filter;
3285 struct rte_eth_l2_tunnel_conf l2_tn_filter;
3286 struct ixgbe_fdir_rule fdir_rule;
3287 struct ixgbe_rte_flow_rss_conf rss_conf;
3290 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
3291 ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
3292 actions, &ntuple_filter, error);
3296 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
3297 ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
3298 actions, ðertype_filter, error);
3302 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
3303 ret = ixgbe_parse_syn_filter(dev, attr, pattern,
3304 actions, &syn_filter, error);
3308 memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
3309 ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
3310 actions, &fdir_rule, error);
3314 memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
3315 ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
3316 actions, &l2_tn_filter, error);
3320 memset(&rss_conf, 0, sizeof(struct ixgbe_rte_flow_rss_conf));
3321 ret = ixgbe_parse_rss_filter(dev, attr,
3322 actions, &rss_conf, error);
3327 /* Destroy a flow rule on ixgbe. */
3329 ixgbe_flow_destroy(struct rte_eth_dev *dev,
3330 struct rte_flow *flow,
3331 struct rte_flow_error *error)
3334 struct rte_flow *pmd_flow = flow;
3335 enum rte_filter_type filter_type = pmd_flow->filter_type;
3336 struct rte_eth_ntuple_filter ntuple_filter;
3337 struct rte_eth_ethertype_filter ethertype_filter;
3338 struct rte_eth_syn_filter syn_filter;
3339 struct ixgbe_fdir_rule fdir_rule;
3340 struct rte_eth_l2_tunnel_conf l2_tn_filter;
3341 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
3342 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
3343 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
3344 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
3345 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
3346 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
3347 struct ixgbe_hw_fdir_info *fdir_info =
3348 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
3349 struct ixgbe_rss_conf_ele *rss_filter_ptr;
3351 switch (filter_type) {
3352 case RTE_ETH_FILTER_NTUPLE:
3353 ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
3355 rte_memcpy(&ntuple_filter,
3356 &ntuple_filter_ptr->filter_info,
3357 sizeof(struct rte_eth_ntuple_filter));
3358 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
3360 TAILQ_REMOVE(&filter_ntuple_list,
3361 ntuple_filter_ptr, entries);
3362 rte_free(ntuple_filter_ptr);
3365 case RTE_ETH_FILTER_ETHERTYPE:
3366 ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
3368 rte_memcpy(ðertype_filter,
3369 ðertype_filter_ptr->filter_info,
3370 sizeof(struct rte_eth_ethertype_filter));
3371 ret = ixgbe_add_del_ethertype_filter(dev,
3372 ðertype_filter, FALSE);
3374 TAILQ_REMOVE(&filter_ethertype_list,
3375 ethertype_filter_ptr, entries);
3376 rte_free(ethertype_filter_ptr);
3379 case RTE_ETH_FILTER_SYN:
3380 syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
3382 rte_memcpy(&syn_filter,
3383 &syn_filter_ptr->filter_info,
3384 sizeof(struct rte_eth_syn_filter));
3385 ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
3387 TAILQ_REMOVE(&filter_syn_list,
3388 syn_filter_ptr, entries);
3389 rte_free(syn_filter_ptr);
3392 case RTE_ETH_FILTER_FDIR:
3393 fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
3394 rte_memcpy(&fdir_rule,
3395 &fdir_rule_ptr->filter_info,
3396 sizeof(struct ixgbe_fdir_rule));
3397 ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
3399 TAILQ_REMOVE(&filter_fdir_list,
3400 fdir_rule_ptr, entries);
3401 rte_free(fdir_rule_ptr);
3402 if (TAILQ_EMPTY(&filter_fdir_list))
3403 fdir_info->mask_added = false;
3406 case RTE_ETH_FILTER_L2_TUNNEL:
3407 l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
3409 rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
3410 sizeof(struct rte_eth_l2_tunnel_conf));
3411 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
3413 TAILQ_REMOVE(&filter_l2_tunnel_list,
3414 l2_tn_filter_ptr, entries);
3415 rte_free(l2_tn_filter_ptr);
3418 case RTE_ETH_FILTER_HASH:
3419 rss_filter_ptr = (struct ixgbe_rss_conf_ele *)
3421 ret = ixgbe_config_rss_filter(dev,
3422 &rss_filter_ptr->filter_info, FALSE);
3424 TAILQ_REMOVE(&filter_rss_list,
3425 rss_filter_ptr, entries);
3426 rte_free(rss_filter_ptr);
3430 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
3437 rte_flow_error_set(error, EINVAL,
3438 RTE_FLOW_ERROR_TYPE_HANDLE,
3439 NULL, "Failed to destroy flow");
3443 TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
3444 if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
3445 TAILQ_REMOVE(&ixgbe_flow_list,
3446 ixgbe_flow_mem_ptr, entries);
3447 rte_free(ixgbe_flow_mem_ptr);
3455 /* Destroy all flow rules associated with a port on ixgbe. */
3457 ixgbe_flow_flush(struct rte_eth_dev *dev,
3458 struct rte_flow_error *error)
3462 ixgbe_clear_all_ntuple_filter(dev);
3463 ixgbe_clear_all_ethertype_filter(dev);
3464 ixgbe_clear_syn_filter(dev);
3466 ret = ixgbe_clear_all_fdir_filter(dev);
3468 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3469 NULL, "Failed to flush rule");
3473 ret = ixgbe_clear_all_l2_tn_filter(dev);
3475 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3476 NULL, "Failed to flush rule");
3480 ixgbe_clear_rss_filter(dev);
3482 ixgbe_filterlist_flush();
3487 const struct rte_flow_ops ixgbe_flow_ops = {
3488 .validate = ixgbe_flow_validate,
3489 .create = ixgbe_flow_create,
3490 .destroy = ixgbe_flow_destroy,
3491 .flush = ixgbe_flow_flush,