4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
42 #include <netinet/in.h>
43 #include <rte_byteorder.h>
44 #include <rte_common.h>
45 #include <rte_cycles.h>
47 #include <rte_interrupts.h>
49 #include <rte_debug.h>
51 #include <rte_atomic.h>
52 #include <rte_branch_prediction.h>
53 #include <rte_memory.h>
54 #include <rte_memzone.h>
56 #include <rte_alarm.h>
57 #include <rte_ether.h>
58 #include <rte_ethdev.h>
59 #include <rte_malloc.h>
60 #include <rte_random.h>
62 #include <rte_hash_crc.h>
64 #include <rte_flow_driver.h>
66 #include "ixgbe_logs.h"
67 #include "base/ixgbe_api.h"
68 #include "base/ixgbe_vf.h"
69 #include "base/ixgbe_common.h"
70 #include "ixgbe_ethdev.h"
71 #include "ixgbe_bypass.h"
72 #include "ixgbe_rxtx.h"
73 #include "base/ixgbe_type.h"
74 #include "base/ixgbe_phy.h"
75 #include "rte_pmd_ixgbe.h"
78 #define IXGBE_MIN_N_TUPLE_PRIO 1
79 #define IXGBE_MAX_N_TUPLE_PRIO 7
80 #define IXGBE_MAX_FLX_SOURCE_OFF 62
82 /* ntuple filter list structure */
83 struct ixgbe_ntuple_filter_ele {
84 TAILQ_ENTRY(ixgbe_ntuple_filter_ele) entries;
85 struct rte_eth_ntuple_filter filter_info;
87 /* ethertype filter list structure */
88 struct ixgbe_ethertype_filter_ele {
89 TAILQ_ENTRY(ixgbe_ethertype_filter_ele) entries;
90 struct rte_eth_ethertype_filter filter_info;
92 /* syn filter list structure */
93 struct ixgbe_eth_syn_filter_ele {
94 TAILQ_ENTRY(ixgbe_eth_syn_filter_ele) entries;
95 struct rte_eth_syn_filter filter_info;
97 /* fdir filter list structure */
98 struct ixgbe_fdir_rule_ele {
99 TAILQ_ENTRY(ixgbe_fdir_rule_ele) entries;
100 struct ixgbe_fdir_rule filter_info;
102 /* l2_tunnel filter list structure */
103 struct ixgbe_eth_l2_tunnel_conf_ele {
104 TAILQ_ENTRY(ixgbe_eth_l2_tunnel_conf_ele) entries;
105 struct rte_eth_l2_tunnel_conf filter_info;
107 /* ixgbe_flow memory list structure */
108 struct ixgbe_flow_mem {
109 TAILQ_ENTRY(ixgbe_flow_mem) entries;
110 struct rte_flow *flow;
113 TAILQ_HEAD(ixgbe_ntuple_filter_list, ixgbe_ntuple_filter_ele);
114 TAILQ_HEAD(ixgbe_ethertype_filter_list, ixgbe_ethertype_filter_ele);
115 TAILQ_HEAD(ixgbe_syn_filter_list, ixgbe_eth_syn_filter_ele);
116 TAILQ_HEAD(ixgbe_fdir_rule_filter_list, ixgbe_fdir_rule_ele);
117 TAILQ_HEAD(ixgbe_l2_tunnel_filter_list, ixgbe_eth_l2_tunnel_conf_ele);
118 TAILQ_HEAD(ixgbe_flow_mem_list, ixgbe_flow_mem);
120 static struct ixgbe_ntuple_filter_list filter_ntuple_list;
121 static struct ixgbe_ethertype_filter_list filter_ethertype_list;
122 static struct ixgbe_syn_filter_list filter_syn_list;
123 static struct ixgbe_fdir_rule_filter_list filter_fdir_list;
124 static struct ixgbe_l2_tunnel_filter_list filter_l2_tunnel_list;
125 static struct ixgbe_flow_mem_list ixgbe_flow_list;
128 * Endless loop will never happen with below assumption
129 * 1. there is at least one no-void item(END)
130 * 2. cur is before END.
133 const struct rte_flow_item *next_no_void_pattern(
134 const struct rte_flow_item pattern[],
135 const struct rte_flow_item *cur)
137 const struct rte_flow_item *next =
138 cur ? cur + 1 : &pattern[0];
140 if (next->type != RTE_FLOW_ITEM_TYPE_VOID)
147 const struct rte_flow_action *next_no_void_action(
148 const struct rte_flow_action actions[],
149 const struct rte_flow_action *cur)
151 const struct rte_flow_action *next =
152 cur ? cur + 1 : &actions[0];
154 if (next->type != RTE_FLOW_ACTION_TYPE_VOID)
161 * Please aware there's an asumption for all the parsers.
162 * rte_flow_item is using big endian, rte_flow_attr and
163 * rte_flow_action are using CPU order.
164 * Because the pattern is used to describe the packets,
165 * normally the packets should use network order.
169 * Parse the rule to see if it is a n-tuple rule.
170 * And get the n-tuple filter info BTW.
172 * The first not void item can be ETH or IPV4.
173 * The second not void item must be IPV4 if the first one is ETH.
174 * The third not void item must be UDP or TCP.
175 * The next not void item must be END.
177 * The first not void action should be QUEUE.
178 * The next not void action should be END.
182 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
183 * dst_addr 192.167.3.50 0xFFFFFFFF
184 * next_proto_id 17 0xFF
185 * UDP/TCP/ src_port 80 0xFFFF
186 * SCTP dst_port 80 0xFFFF
188 * other members in mask and spec should set to 0x00.
189 * item->last should be NULL.
192 cons_parse_ntuple_filter(const struct rte_flow_attr *attr,
193 const struct rte_flow_item pattern[],
194 const struct rte_flow_action actions[],
195 struct rte_eth_ntuple_filter *filter,
196 struct rte_flow_error *error)
198 const struct rte_flow_item *item;
199 const struct rte_flow_action *act;
200 const struct rte_flow_item_ipv4 *ipv4_spec;
201 const struct rte_flow_item_ipv4 *ipv4_mask;
202 const struct rte_flow_item_tcp *tcp_spec;
203 const struct rte_flow_item_tcp *tcp_mask;
204 const struct rte_flow_item_udp *udp_spec;
205 const struct rte_flow_item_udp *udp_mask;
206 const struct rte_flow_item_sctp *sctp_spec;
207 const struct rte_flow_item_sctp *sctp_mask;
210 rte_flow_error_set(error,
211 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
212 NULL, "NULL pattern.");
217 rte_flow_error_set(error, EINVAL,
218 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
219 NULL, "NULL action.");
223 rte_flow_error_set(error, EINVAL,
224 RTE_FLOW_ERROR_TYPE_ATTR,
225 NULL, "NULL attribute.");
229 /* the first not void item can be MAC or IPv4 */
230 item = next_no_void_pattern(pattern, NULL);
232 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
233 item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
234 rte_flow_error_set(error, EINVAL,
235 RTE_FLOW_ERROR_TYPE_ITEM,
236 item, "Not supported by ntuple filter");
240 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
241 /*Not supported last point for range*/
243 rte_flow_error_set(error,
245 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
246 item, "Not supported last point for range");
250 /* if the first item is MAC, the content should be NULL */
251 if (item->spec || item->mask) {
252 rte_flow_error_set(error, EINVAL,
253 RTE_FLOW_ERROR_TYPE_ITEM,
254 item, "Not supported by ntuple filter");
257 /* check if the next not void item is IPv4 */
258 item = next_no_void_pattern(pattern, item);
259 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
260 rte_flow_error_set(error,
261 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
262 item, "Not supported by ntuple filter");
267 /* get the IPv4 info */
268 if (!item->spec || !item->mask) {
269 rte_flow_error_set(error, EINVAL,
270 RTE_FLOW_ERROR_TYPE_ITEM,
271 item, "Invalid ntuple mask");
274 /*Not supported last point for range*/
276 rte_flow_error_set(error, EINVAL,
277 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
278 item, "Not supported last point for range");
283 ipv4_mask = (const struct rte_flow_item_ipv4 *)item->mask;
285 * Only support src & dst addresses, protocol,
286 * others should be masked.
288 if (ipv4_mask->hdr.version_ihl ||
289 ipv4_mask->hdr.type_of_service ||
290 ipv4_mask->hdr.total_length ||
291 ipv4_mask->hdr.packet_id ||
292 ipv4_mask->hdr.fragment_offset ||
293 ipv4_mask->hdr.time_to_live ||
294 ipv4_mask->hdr.hdr_checksum) {
295 rte_flow_error_set(error,
296 EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
297 item, "Not supported by ntuple filter");
301 filter->dst_ip_mask = ipv4_mask->hdr.dst_addr;
302 filter->src_ip_mask = ipv4_mask->hdr.src_addr;
303 filter->proto_mask = ipv4_mask->hdr.next_proto_id;
305 ipv4_spec = (const struct rte_flow_item_ipv4 *)item->spec;
306 filter->dst_ip = ipv4_spec->hdr.dst_addr;
307 filter->src_ip = ipv4_spec->hdr.src_addr;
308 filter->proto = ipv4_spec->hdr.next_proto_id;
310 /* check if the next not void item is TCP or UDP */
311 item = next_no_void_pattern(pattern, item);
312 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
313 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
314 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
315 item->type != RTE_FLOW_ITEM_TYPE_END) {
316 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
317 rte_flow_error_set(error, EINVAL,
318 RTE_FLOW_ERROR_TYPE_ITEM,
319 item, "Not supported by ntuple filter");
323 /* get the TCP/UDP info */
324 if ((item->type != RTE_FLOW_ITEM_TYPE_END) &&
325 (!item->spec || !item->mask)) {
326 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
327 rte_flow_error_set(error, EINVAL,
328 RTE_FLOW_ERROR_TYPE_ITEM,
329 item, "Invalid ntuple mask");
333 /*Not supported last point for range*/
335 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
336 rte_flow_error_set(error, EINVAL,
337 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
338 item, "Not supported last point for range");
343 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
344 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
347 * Only support src & dst ports, tcp flags,
348 * others should be masked.
350 if (tcp_mask->hdr.sent_seq ||
351 tcp_mask->hdr.recv_ack ||
352 tcp_mask->hdr.data_off ||
353 tcp_mask->hdr.rx_win ||
354 tcp_mask->hdr.cksum ||
355 tcp_mask->hdr.tcp_urp) {
357 sizeof(struct rte_eth_ntuple_filter));
358 rte_flow_error_set(error, EINVAL,
359 RTE_FLOW_ERROR_TYPE_ITEM,
360 item, "Not supported by ntuple filter");
364 filter->dst_port_mask = tcp_mask->hdr.dst_port;
365 filter->src_port_mask = tcp_mask->hdr.src_port;
366 if (tcp_mask->hdr.tcp_flags == 0xFF) {
367 filter->flags |= RTE_NTUPLE_FLAGS_TCP_FLAG;
368 } else if (!tcp_mask->hdr.tcp_flags) {
369 filter->flags &= ~RTE_NTUPLE_FLAGS_TCP_FLAG;
371 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
372 rte_flow_error_set(error, EINVAL,
373 RTE_FLOW_ERROR_TYPE_ITEM,
374 item, "Not supported by ntuple filter");
378 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
379 filter->dst_port = tcp_spec->hdr.dst_port;
380 filter->src_port = tcp_spec->hdr.src_port;
381 filter->tcp_flags = tcp_spec->hdr.tcp_flags;
382 } else if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
383 udp_mask = (const struct rte_flow_item_udp *)item->mask;
386 * Only support src & dst ports,
387 * others should be masked.
389 if (udp_mask->hdr.dgram_len ||
390 udp_mask->hdr.dgram_cksum) {
392 sizeof(struct rte_eth_ntuple_filter));
393 rte_flow_error_set(error, EINVAL,
394 RTE_FLOW_ERROR_TYPE_ITEM,
395 item, "Not supported by ntuple filter");
399 filter->dst_port_mask = udp_mask->hdr.dst_port;
400 filter->src_port_mask = udp_mask->hdr.src_port;
402 udp_spec = (const struct rte_flow_item_udp *)item->spec;
403 filter->dst_port = udp_spec->hdr.dst_port;
404 filter->src_port = udp_spec->hdr.src_port;
405 } else if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
406 sctp_mask = (const struct rte_flow_item_sctp *)item->mask;
409 * Only support src & dst ports,
410 * others should be masked.
412 if (sctp_mask->hdr.tag ||
413 sctp_mask->hdr.cksum) {
415 sizeof(struct rte_eth_ntuple_filter));
416 rte_flow_error_set(error, EINVAL,
417 RTE_FLOW_ERROR_TYPE_ITEM,
418 item, "Not supported by ntuple filter");
422 filter->dst_port_mask = sctp_mask->hdr.dst_port;
423 filter->src_port_mask = sctp_mask->hdr.src_port;
425 sctp_spec = (const struct rte_flow_item_sctp *)item->spec;
426 filter->dst_port = sctp_spec->hdr.dst_port;
427 filter->src_port = sctp_spec->hdr.src_port;
432 /* check if the next not void item is END */
433 item = next_no_void_pattern(pattern, item);
434 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
435 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
436 rte_flow_error_set(error, EINVAL,
437 RTE_FLOW_ERROR_TYPE_ITEM,
438 item, "Not supported by ntuple filter");
445 * n-tuple only supports forwarding,
446 * check if the first not void action is QUEUE.
448 act = next_no_void_action(actions, NULL);
449 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
450 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
451 rte_flow_error_set(error, EINVAL,
452 RTE_FLOW_ERROR_TYPE_ACTION,
453 item, "Not supported action.");
457 ((const struct rte_flow_action_queue *)act->conf)->index;
459 /* check if the next not void item is END */
460 act = next_no_void_action(actions, act);
461 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
462 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
463 rte_flow_error_set(error, EINVAL,
464 RTE_FLOW_ERROR_TYPE_ACTION,
465 act, "Not supported action.");
470 /* must be input direction */
471 if (!attr->ingress) {
472 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
473 rte_flow_error_set(error, EINVAL,
474 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
475 attr, "Only support ingress.");
481 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
482 rte_flow_error_set(error, EINVAL,
483 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
484 attr, "Not support egress.");
488 if (attr->priority > 0xFFFF) {
489 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
490 rte_flow_error_set(error, EINVAL,
491 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
492 attr, "Error priority.");
495 filter->priority = (uint16_t)attr->priority;
496 if (attr->priority < IXGBE_MIN_N_TUPLE_PRIO ||
497 attr->priority > IXGBE_MAX_N_TUPLE_PRIO)
498 filter->priority = 1;
503 /* a specific function for ixgbe because the flags is specific */
505 ixgbe_parse_ntuple_filter(struct rte_eth_dev *dev,
506 const struct rte_flow_attr *attr,
507 const struct rte_flow_item pattern[],
508 const struct rte_flow_action actions[],
509 struct rte_eth_ntuple_filter *filter,
510 struct rte_flow_error *error)
513 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
515 MAC_TYPE_FILTER_SUP_EXT(hw->mac.type);
517 ret = cons_parse_ntuple_filter(attr, pattern, actions, filter, error);
522 /* Ixgbe doesn't support tcp flags. */
523 if (filter->flags & RTE_NTUPLE_FLAGS_TCP_FLAG) {
524 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
525 rte_flow_error_set(error, EINVAL,
526 RTE_FLOW_ERROR_TYPE_ITEM,
527 NULL, "Not supported by ntuple filter");
531 /* Ixgbe doesn't support many priorities. */
532 if (filter->priority < IXGBE_MIN_N_TUPLE_PRIO ||
533 filter->priority > IXGBE_MAX_N_TUPLE_PRIO) {
534 memset(filter, 0, sizeof(struct rte_eth_ntuple_filter));
535 rte_flow_error_set(error, EINVAL,
536 RTE_FLOW_ERROR_TYPE_ITEM,
537 NULL, "Priority not supported by ntuple filter");
541 if (filter->queue >= dev->data->nb_rx_queues)
544 /* fixed value for ixgbe */
545 filter->flags = RTE_5TUPLE_FLAGS;
550 * Parse the rule to see if it is a ethertype rule.
551 * And get the ethertype filter info BTW.
553 * The first not void item can be ETH.
554 * The next not void item must be END.
556 * The first not void action should be QUEUE.
557 * The next not void action should be END.
560 * ETH type 0x0807 0xFFFF
562 * other members in mask and spec should set to 0x00.
563 * item->last should be NULL.
566 cons_parse_ethertype_filter(const struct rte_flow_attr *attr,
567 const struct rte_flow_item *pattern,
568 const struct rte_flow_action *actions,
569 struct rte_eth_ethertype_filter *filter,
570 struct rte_flow_error *error)
572 const struct rte_flow_item *item;
573 const struct rte_flow_action *act;
574 const struct rte_flow_item_eth *eth_spec;
575 const struct rte_flow_item_eth *eth_mask;
576 const struct rte_flow_action_queue *act_q;
579 rte_flow_error_set(error, EINVAL,
580 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
581 NULL, "NULL pattern.");
586 rte_flow_error_set(error, EINVAL,
587 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
588 NULL, "NULL action.");
593 rte_flow_error_set(error, EINVAL,
594 RTE_FLOW_ERROR_TYPE_ATTR,
595 NULL, "NULL attribute.");
599 item = next_no_void_pattern(pattern, NULL);
600 /* The first non-void item should be MAC. */
601 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
602 rte_flow_error_set(error, EINVAL,
603 RTE_FLOW_ERROR_TYPE_ITEM,
604 item, "Not supported by ethertype filter");
608 /*Not supported last point for range*/
610 rte_flow_error_set(error, EINVAL,
611 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
612 item, "Not supported last point for range");
616 /* Get the MAC info. */
617 if (!item->spec || !item->mask) {
618 rte_flow_error_set(error, EINVAL,
619 RTE_FLOW_ERROR_TYPE_ITEM,
620 item, "Not supported by ethertype filter");
624 eth_spec = (const struct rte_flow_item_eth *)item->spec;
625 eth_mask = (const struct rte_flow_item_eth *)item->mask;
627 /* Mask bits of source MAC address must be full of 0.
628 * Mask bits of destination MAC address must be full
631 if (!is_zero_ether_addr(ð_mask->src) ||
632 (!is_zero_ether_addr(ð_mask->dst) &&
633 !is_broadcast_ether_addr(ð_mask->dst))) {
634 rte_flow_error_set(error, EINVAL,
635 RTE_FLOW_ERROR_TYPE_ITEM,
636 item, "Invalid ether address mask");
640 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
641 rte_flow_error_set(error, EINVAL,
642 RTE_FLOW_ERROR_TYPE_ITEM,
643 item, "Invalid ethertype mask");
647 /* If mask bits of destination MAC address
648 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
650 if (is_broadcast_ether_addr(ð_mask->dst)) {
651 filter->mac_addr = eth_spec->dst;
652 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
654 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
656 filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
658 /* Check if the next non-void item is END. */
659 item = next_no_void_pattern(pattern, item);
660 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
661 rte_flow_error_set(error, EINVAL,
662 RTE_FLOW_ERROR_TYPE_ITEM,
663 item, "Not supported by ethertype filter.");
669 act = next_no_void_action(actions, NULL);
670 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
671 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
672 rte_flow_error_set(error, EINVAL,
673 RTE_FLOW_ERROR_TYPE_ACTION,
674 act, "Not supported action.");
678 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
679 act_q = (const struct rte_flow_action_queue *)act->conf;
680 filter->queue = act_q->index;
682 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
685 /* Check if the next non-void item is END */
686 act = next_no_void_action(actions, act);
687 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
688 rte_flow_error_set(error, EINVAL,
689 RTE_FLOW_ERROR_TYPE_ACTION,
690 act, "Not supported action.");
695 /* Must be input direction */
696 if (!attr->ingress) {
697 rte_flow_error_set(error, EINVAL,
698 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
699 attr, "Only support ingress.");
705 rte_flow_error_set(error, EINVAL,
706 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
707 attr, "Not support egress.");
712 if (attr->priority) {
713 rte_flow_error_set(error, EINVAL,
714 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
715 attr, "Not support priority.");
721 rte_flow_error_set(error, EINVAL,
722 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
723 attr, "Not support group.");
731 ixgbe_parse_ethertype_filter(struct rte_eth_dev *dev,
732 const struct rte_flow_attr *attr,
733 const struct rte_flow_item pattern[],
734 const struct rte_flow_action actions[],
735 struct rte_eth_ethertype_filter *filter,
736 struct rte_flow_error *error)
739 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
741 MAC_TYPE_FILTER_SUP(hw->mac.type);
743 ret = cons_parse_ethertype_filter(attr, pattern,
744 actions, filter, error);
749 /* Ixgbe doesn't support MAC address. */
750 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
751 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
752 rte_flow_error_set(error, EINVAL,
753 RTE_FLOW_ERROR_TYPE_ITEM,
754 NULL, "Not supported by ethertype filter");
758 if (filter->queue >= dev->data->nb_rx_queues) {
759 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
760 rte_flow_error_set(error, EINVAL,
761 RTE_FLOW_ERROR_TYPE_ITEM,
762 NULL, "queue index much too big");
766 if (filter->ether_type == ETHER_TYPE_IPv4 ||
767 filter->ether_type == ETHER_TYPE_IPv6) {
768 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
769 rte_flow_error_set(error, EINVAL,
770 RTE_FLOW_ERROR_TYPE_ITEM,
771 NULL, "IPv4/IPv6 not supported by ethertype filter");
775 if (filter->flags & RTE_ETHTYPE_FLAGS_MAC) {
776 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
777 rte_flow_error_set(error, EINVAL,
778 RTE_FLOW_ERROR_TYPE_ITEM,
779 NULL, "mac compare is unsupported");
783 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP) {
784 memset(filter, 0, sizeof(struct rte_eth_ethertype_filter));
785 rte_flow_error_set(error, EINVAL,
786 RTE_FLOW_ERROR_TYPE_ITEM,
787 NULL, "drop option is unsupported");
795 * Parse the rule to see if it is a TCP SYN rule.
796 * And get the TCP SYN filter info BTW.
798 * The first not void item must be ETH.
799 * The second not void item must be IPV4 or IPV6.
800 * The third not void item must be TCP.
801 * The next not void item must be END.
803 * The first not void action should be QUEUE.
804 * The next not void action should be END.
808 * IPV4/IPV6 NULL NULL
809 * TCP tcp_flags 0x02 0xFF
811 * other members in mask and spec should set to 0x00.
812 * item->last should be NULL.
815 cons_parse_syn_filter(const struct rte_flow_attr *attr,
816 const struct rte_flow_item pattern[],
817 const struct rte_flow_action actions[],
818 struct rte_eth_syn_filter *filter,
819 struct rte_flow_error *error)
821 const struct rte_flow_item *item;
822 const struct rte_flow_action *act;
823 const struct rte_flow_item_tcp *tcp_spec;
824 const struct rte_flow_item_tcp *tcp_mask;
825 const struct rte_flow_action_queue *act_q;
828 rte_flow_error_set(error, EINVAL,
829 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
830 NULL, "NULL pattern.");
835 rte_flow_error_set(error, EINVAL,
836 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
837 NULL, "NULL action.");
842 rte_flow_error_set(error, EINVAL,
843 RTE_FLOW_ERROR_TYPE_ATTR,
844 NULL, "NULL attribute.");
849 /* the first not void item should be MAC or IPv4 or IPv6 or TCP */
850 item = next_no_void_pattern(pattern, NULL);
851 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
852 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
853 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
854 item->type != RTE_FLOW_ITEM_TYPE_TCP) {
855 rte_flow_error_set(error, EINVAL,
856 RTE_FLOW_ERROR_TYPE_ITEM,
857 item, "Not supported by syn filter");
860 /*Not supported last point for range*/
862 rte_flow_error_set(error, EINVAL,
863 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
864 item, "Not supported last point for range");
869 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
870 /* if the item is MAC, the content should be NULL */
871 if (item->spec || item->mask) {
872 rte_flow_error_set(error, EINVAL,
873 RTE_FLOW_ERROR_TYPE_ITEM,
874 item, "Invalid SYN address mask");
878 /* check if the next not void item is IPv4 or IPv6 */
879 item = next_no_void_pattern(pattern, item);
880 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
881 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
882 rte_flow_error_set(error, EINVAL,
883 RTE_FLOW_ERROR_TYPE_ITEM,
884 item, "Not supported by syn filter");
890 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
891 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
892 /* if the item is IP, the content should be NULL */
893 if (item->spec || item->mask) {
894 rte_flow_error_set(error, EINVAL,
895 RTE_FLOW_ERROR_TYPE_ITEM,
896 item, "Invalid SYN mask");
900 /* check if the next not void item is TCP */
901 item = next_no_void_pattern(pattern, item);
902 if (item->type != RTE_FLOW_ITEM_TYPE_TCP) {
903 rte_flow_error_set(error, EINVAL,
904 RTE_FLOW_ERROR_TYPE_ITEM,
905 item, "Not supported by syn filter");
910 /* Get the TCP info. Only support SYN. */
911 if (!item->spec || !item->mask) {
912 rte_flow_error_set(error, EINVAL,
913 RTE_FLOW_ERROR_TYPE_ITEM,
914 item, "Invalid SYN mask");
917 /*Not supported last point for range*/
919 rte_flow_error_set(error, EINVAL,
920 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
921 item, "Not supported last point for range");
925 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
926 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
927 if (!(tcp_spec->hdr.tcp_flags & TCP_SYN_FLAG) ||
928 tcp_mask->hdr.src_port ||
929 tcp_mask->hdr.dst_port ||
930 tcp_mask->hdr.sent_seq ||
931 tcp_mask->hdr.recv_ack ||
932 tcp_mask->hdr.data_off ||
933 tcp_mask->hdr.tcp_flags != TCP_SYN_FLAG ||
934 tcp_mask->hdr.rx_win ||
935 tcp_mask->hdr.cksum ||
936 tcp_mask->hdr.tcp_urp) {
937 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
938 rte_flow_error_set(error, EINVAL,
939 RTE_FLOW_ERROR_TYPE_ITEM,
940 item, "Not supported by syn filter");
944 /* check if the next not void item is END */
945 item = next_no_void_pattern(pattern, item);
946 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
947 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
948 rte_flow_error_set(error, EINVAL,
949 RTE_FLOW_ERROR_TYPE_ITEM,
950 item, "Not supported by syn filter");
954 /* check if the first not void action is QUEUE. */
955 act = next_no_void_action(actions, NULL);
956 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
957 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
958 rte_flow_error_set(error, EINVAL,
959 RTE_FLOW_ERROR_TYPE_ACTION,
960 act, "Not supported action.");
964 act_q = (const struct rte_flow_action_queue *)act->conf;
965 filter->queue = act_q->index;
966 if (filter->queue >= IXGBE_MAX_RX_QUEUE_NUM) {
967 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
968 rte_flow_error_set(error, EINVAL,
969 RTE_FLOW_ERROR_TYPE_ACTION,
970 act, "Not supported action.");
974 /* check if the next not void item is END */
975 act = next_no_void_action(actions, act);
976 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
977 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
978 rte_flow_error_set(error, EINVAL,
979 RTE_FLOW_ERROR_TYPE_ACTION,
980 act, "Not supported action.");
985 /* must be input direction */
986 if (!attr->ingress) {
987 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
988 rte_flow_error_set(error, EINVAL,
989 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
990 attr, "Only support ingress.");
996 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
997 rte_flow_error_set(error, EINVAL,
998 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
999 attr, "Not support egress.");
1003 /* Support 2 priorities, the lowest or highest. */
1004 if (!attr->priority) {
1005 filter->hig_pri = 0;
1006 } else if (attr->priority == (uint32_t)~0U) {
1007 filter->hig_pri = 1;
1009 memset(filter, 0, sizeof(struct rte_eth_syn_filter));
1010 rte_flow_error_set(error, EINVAL,
1011 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1012 attr, "Not support priority.");
1020 ixgbe_parse_syn_filter(struct rte_eth_dev *dev,
1021 const struct rte_flow_attr *attr,
1022 const struct rte_flow_item pattern[],
1023 const struct rte_flow_action actions[],
1024 struct rte_eth_syn_filter *filter,
1025 struct rte_flow_error *error)
1028 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1030 MAC_TYPE_FILTER_SUP(hw->mac.type);
1032 ret = cons_parse_syn_filter(attr, pattern,
1033 actions, filter, error);
1035 if (filter->queue >= dev->data->nb_rx_queues)
1045 * Parse the rule to see if it is a L2 tunnel rule.
1046 * And get the L2 tunnel filter info BTW.
1047 * Only support E-tag now.
1049 * The first not void item can be E_TAG.
1050 * The next not void item must be END.
1052 * The first not void action should be QUEUE.
1053 * The next not void action should be END.
1057 e_cid_base 0x309 0xFFF
1059 * other members in mask and spec should set to 0x00.
1060 * item->last should be NULL.
1063 cons_parse_l2_tn_filter(const struct rte_flow_attr *attr,
1064 const struct rte_flow_item pattern[],
1065 const struct rte_flow_action actions[],
1066 struct rte_eth_l2_tunnel_conf *filter,
1067 struct rte_flow_error *error)
1069 const struct rte_flow_item *item;
1070 const struct rte_flow_item_e_tag *e_tag_spec;
1071 const struct rte_flow_item_e_tag *e_tag_mask;
1072 const struct rte_flow_action *act;
1073 const struct rte_flow_action_queue *act_q;
1076 rte_flow_error_set(error, EINVAL,
1077 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1078 NULL, "NULL pattern.");
1083 rte_flow_error_set(error, EINVAL,
1084 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1085 NULL, "NULL action.");
1090 rte_flow_error_set(error, EINVAL,
1091 RTE_FLOW_ERROR_TYPE_ATTR,
1092 NULL, "NULL attribute.");
1096 /* The first not void item should be e-tag. */
1097 item = next_no_void_pattern(pattern, NULL);
1098 if (item->type != RTE_FLOW_ITEM_TYPE_E_TAG) {
1099 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1100 rte_flow_error_set(error, EINVAL,
1101 RTE_FLOW_ERROR_TYPE_ITEM,
1102 item, "Not supported by L2 tunnel filter");
1106 if (!item->spec || !item->mask) {
1107 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1108 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
1109 item, "Not supported by L2 tunnel filter");
1113 /*Not supported last point for range*/
1115 rte_flow_error_set(error, EINVAL,
1116 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1117 item, "Not supported last point for range");
1121 e_tag_spec = (const struct rte_flow_item_e_tag *)item->spec;
1122 e_tag_mask = (const struct rte_flow_item_e_tag *)item->mask;
1124 /* Only care about GRP and E cid base. */
1125 if (e_tag_mask->epcp_edei_in_ecid_b ||
1126 e_tag_mask->in_ecid_e ||
1127 e_tag_mask->ecid_e ||
1128 e_tag_mask->rsvd_grp_ecid_b != rte_cpu_to_be_16(0x3FFF)) {
1129 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1130 rte_flow_error_set(error, EINVAL,
1131 RTE_FLOW_ERROR_TYPE_ITEM,
1132 item, "Not supported by L2 tunnel filter");
1136 filter->l2_tunnel_type = RTE_L2_TUNNEL_TYPE_E_TAG;
1138 * grp and e_cid_base are bit fields and only use 14 bits.
1139 * e-tag id is taken as little endian by HW.
1141 filter->tunnel_id = rte_be_to_cpu_16(e_tag_spec->rsvd_grp_ecid_b);
1143 /* check if the next not void item is END */
1144 item = next_no_void_pattern(pattern, item);
1145 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1146 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1147 rte_flow_error_set(error, EINVAL,
1148 RTE_FLOW_ERROR_TYPE_ITEM,
1149 item, "Not supported by L2 tunnel filter");
1154 /* must be input direction */
1155 if (!attr->ingress) {
1156 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1157 rte_flow_error_set(error, EINVAL,
1158 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1159 attr, "Only support ingress.");
1165 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1166 rte_flow_error_set(error, EINVAL,
1167 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1168 attr, "Not support egress.");
1173 if (attr->priority) {
1174 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1175 rte_flow_error_set(error, EINVAL,
1176 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1177 attr, "Not support priority.");
1181 /* check if the first not void action is QUEUE. */
1182 act = next_no_void_action(actions, NULL);
1183 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE) {
1184 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1185 rte_flow_error_set(error, EINVAL,
1186 RTE_FLOW_ERROR_TYPE_ACTION,
1187 act, "Not supported action.");
1191 act_q = (const struct rte_flow_action_queue *)act->conf;
1192 filter->pool = act_q->index;
1194 /* check if the next not void item is END */
1195 act = next_no_void_action(actions, act);
1196 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1197 memset(filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1198 rte_flow_error_set(error, EINVAL,
1199 RTE_FLOW_ERROR_TYPE_ACTION,
1200 act, "Not supported action.");
1208 ixgbe_parse_l2_tn_filter(struct rte_eth_dev *dev,
1209 const struct rte_flow_attr *attr,
1210 const struct rte_flow_item pattern[],
1211 const struct rte_flow_action actions[],
1212 struct rte_eth_l2_tunnel_conf *l2_tn_filter,
1213 struct rte_flow_error *error)
1216 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1218 ret = cons_parse_l2_tn_filter(attr, pattern,
1219 actions, l2_tn_filter, error);
1221 if (hw->mac.type != ixgbe_mac_X550 &&
1222 hw->mac.type != ixgbe_mac_X550EM_x &&
1223 hw->mac.type != ixgbe_mac_X550EM_a) {
1224 memset(l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
1225 rte_flow_error_set(error, EINVAL,
1226 RTE_FLOW_ERROR_TYPE_ITEM,
1227 NULL, "Not supported by L2 tunnel filter");
1231 if (l2_tn_filter->pool >= dev->data->nb_rx_queues)
1237 /* Parse to get the attr and action info of flow director rule. */
1239 ixgbe_parse_fdir_act_attr(const struct rte_flow_attr *attr,
1240 const struct rte_flow_action actions[],
1241 struct ixgbe_fdir_rule *rule,
1242 struct rte_flow_error *error)
1244 const struct rte_flow_action *act;
1245 const struct rte_flow_action_queue *act_q;
1246 const struct rte_flow_action_mark *mark;
1249 /* must be input direction */
1250 if (!attr->ingress) {
1251 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1252 rte_flow_error_set(error, EINVAL,
1253 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1254 attr, "Only support ingress.");
1260 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1261 rte_flow_error_set(error, EINVAL,
1262 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1263 attr, "Not support egress.");
1268 if (attr->priority) {
1269 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1270 rte_flow_error_set(error, EINVAL,
1271 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1272 attr, "Not support priority.");
1276 /* check if the first not void action is QUEUE or DROP. */
1277 act = next_no_void_action(actions, NULL);
1278 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1279 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1280 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1281 rte_flow_error_set(error, EINVAL,
1282 RTE_FLOW_ERROR_TYPE_ACTION,
1283 act, "Not supported action.");
1287 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1288 act_q = (const struct rte_flow_action_queue *)act->conf;
1289 rule->queue = act_q->index;
1291 /* signature mode does not support drop action. */
1292 if (rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1293 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1294 rte_flow_error_set(error, EINVAL,
1295 RTE_FLOW_ERROR_TYPE_ACTION,
1296 act, "Not supported action.");
1299 rule->fdirflags = IXGBE_FDIRCMD_DROP;
1302 /* check if the next not void item is MARK */
1303 act = next_no_void_action(actions, act);
1304 if ((act->type != RTE_FLOW_ACTION_TYPE_MARK) &&
1305 (act->type != RTE_FLOW_ACTION_TYPE_END)) {
1306 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1307 rte_flow_error_set(error, EINVAL,
1308 RTE_FLOW_ERROR_TYPE_ACTION,
1309 act, "Not supported action.");
1315 if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1316 mark = (const struct rte_flow_action_mark *)act->conf;
1317 rule->soft_id = mark->id;
1318 act = next_no_void_action(actions, act);
1321 /* check if the next not void item is END */
1322 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1323 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1324 rte_flow_error_set(error, EINVAL,
1325 RTE_FLOW_ERROR_TYPE_ACTION,
1326 act, "Not supported action.");
1333 /* search next no void pattern and skip fuzzy */
1335 const struct rte_flow_item *next_no_fuzzy_pattern(
1336 const struct rte_flow_item pattern[],
1337 const struct rte_flow_item *cur)
1339 const struct rte_flow_item *next =
1340 next_no_void_pattern(pattern, cur);
1342 if (next->type != RTE_FLOW_ITEM_TYPE_FUZZY)
1344 next = next_no_void_pattern(pattern, next);
1348 static inline uint8_t signature_match(const struct rte_flow_item pattern[])
1350 const struct rte_flow_item_fuzzy *spec, *last, *mask;
1351 const struct rte_flow_item *item;
1352 uint32_t sh, lh, mh;
1357 if (item->type == RTE_FLOW_ITEM_TYPE_END)
1360 if (item->type == RTE_FLOW_ITEM_TYPE_FUZZY) {
1362 (const struct rte_flow_item_fuzzy *)item->spec;
1364 (const struct rte_flow_item_fuzzy *)item->last;
1366 (const struct rte_flow_item_fuzzy *)item->mask;
1395 * Parse the rule to see if it is a IP or MAC VLAN flow director rule.
1396 * And get the flow director filter info BTW.
1397 * UDP/TCP/SCTP PATTERN:
1398 * The first not void item can be ETH or IPV4 or IPV6
1399 * The second not void item must be IPV4 or IPV6 if the first one is ETH.
1400 * The next not void item could be UDP or TCP or SCTP (optional)
1401 * The next not void item could be RAW (for flexbyte, optional)
1402 * The next not void item must be END.
1403 * A Fuzzy Match pattern can appear at any place before END.
1404 * Fuzzy Match is optional for IPV4 but is required for IPV6
1406 * The first not void item must be ETH.
1407 * The second not void item must be MAC VLAN.
1408 * The next not void item must be END.
1410 * The first not void action should be QUEUE or DROP.
1411 * The second not void optional action should be MARK,
1412 * mark_id is a uint32_t number.
1413 * The next not void action should be END.
1414 * UDP/TCP/SCTP pattern example:
1417 * IPV4 src_addr 192.168.1.20 0xFFFFFFFF
1418 * dst_addr 192.167.3.50 0xFFFFFFFF
1419 * UDP/TCP/SCTP src_port 80 0xFFFF
1420 * dst_port 80 0xFFFF
1421 * FLEX relative 0 0x1
1424 * offset 12 0xFFFFFFFF
1427 * pattern[0] 0x86 0xFF
1428 * pattern[1] 0xDD 0xFF
1430 * MAC VLAN pattern example:
1433 {0xAC, 0x7B, 0xA1, {0xFF, 0xFF, 0xFF,
1434 0x2C, 0x6D, 0x36} 0xFF, 0xFF, 0xFF}
1435 * MAC VLAN tci 0x2016 0xEFFF
1437 * Other members in mask and spec should set to 0x00.
1438 * Item->last should be NULL.
1441 ixgbe_parse_fdir_filter_normal(struct rte_eth_dev *dev,
1442 const struct rte_flow_attr *attr,
1443 const struct rte_flow_item pattern[],
1444 const struct rte_flow_action actions[],
1445 struct ixgbe_fdir_rule *rule,
1446 struct rte_flow_error *error)
1448 const struct rte_flow_item *item;
1449 const struct rte_flow_item_eth *eth_spec;
1450 const struct rte_flow_item_eth *eth_mask;
1451 const struct rte_flow_item_ipv4 *ipv4_spec;
1452 const struct rte_flow_item_ipv4 *ipv4_mask;
1453 const struct rte_flow_item_ipv6 *ipv6_spec;
1454 const struct rte_flow_item_ipv6 *ipv6_mask;
1455 const struct rte_flow_item_tcp *tcp_spec;
1456 const struct rte_flow_item_tcp *tcp_mask;
1457 const struct rte_flow_item_udp *udp_spec;
1458 const struct rte_flow_item_udp *udp_mask;
1459 const struct rte_flow_item_sctp *sctp_spec;
1460 const struct rte_flow_item_sctp *sctp_mask;
1461 const struct rte_flow_item_vlan *vlan_spec;
1462 const struct rte_flow_item_vlan *vlan_mask;
1463 const struct rte_flow_item_raw *raw_mask;
1464 const struct rte_flow_item_raw *raw_spec;
1467 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1470 rte_flow_error_set(error, EINVAL,
1471 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1472 NULL, "NULL pattern.");
1477 rte_flow_error_set(error, EINVAL,
1478 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1479 NULL, "NULL action.");
1484 rte_flow_error_set(error, EINVAL,
1485 RTE_FLOW_ERROR_TYPE_ATTR,
1486 NULL, "NULL attribute.");
1491 * Some fields may not be provided. Set spec to 0 and mask to default
1492 * value. So, we need not do anything for the not provided fields later.
1494 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1495 memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
1496 rule->mask.vlan_tci_mask = 0;
1497 rule->mask.flex_bytes_mask = 0;
1500 * The first not void item should be
1501 * MAC or IPv4 or TCP or UDP or SCTP.
1503 item = next_no_fuzzy_pattern(pattern, NULL);
1504 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
1505 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
1506 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
1507 item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1508 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1509 item->type != RTE_FLOW_ITEM_TYPE_SCTP) {
1510 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1511 rte_flow_error_set(error, EINVAL,
1512 RTE_FLOW_ERROR_TYPE_ITEM,
1513 item, "Not supported by fdir filter");
1517 if (signature_match(pattern))
1518 rule->mode = RTE_FDIR_MODE_SIGNATURE;
1520 rule->mode = RTE_FDIR_MODE_PERFECT;
1522 /*Not supported last point for range*/
1524 rte_flow_error_set(error, EINVAL,
1525 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1526 item, "Not supported last point for range");
1530 /* Get the MAC info. */
1531 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
1533 * Only support vlan and dst MAC address,
1534 * others should be masked.
1536 if (item->spec && !item->mask) {
1537 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1538 rte_flow_error_set(error, EINVAL,
1539 RTE_FLOW_ERROR_TYPE_ITEM,
1540 item, "Not supported by fdir filter");
1545 rule->b_spec = TRUE;
1546 eth_spec = (const struct rte_flow_item_eth *)item->spec;
1548 /* Get the dst MAC. */
1549 for (j = 0; j < ETHER_ADDR_LEN; j++) {
1550 rule->ixgbe_fdir.formatted.inner_mac[j] =
1551 eth_spec->dst.addr_bytes[j];
1558 rule->b_mask = TRUE;
1559 eth_mask = (const struct rte_flow_item_eth *)item->mask;
1561 /* Ether type should be masked. */
1562 if (eth_mask->type ||
1563 rule->mode == RTE_FDIR_MODE_SIGNATURE) {
1564 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1565 rte_flow_error_set(error, EINVAL,
1566 RTE_FLOW_ERROR_TYPE_ITEM,
1567 item, "Not supported by fdir filter");
1571 /* If ethernet has meaning, it means MAC VLAN mode. */
1572 rule->mode = RTE_FDIR_MODE_PERFECT_MAC_VLAN;
1575 * src MAC address must be masked,
1576 * and don't support dst MAC address mask.
1578 for (j = 0; j < ETHER_ADDR_LEN; j++) {
1579 if (eth_mask->src.addr_bytes[j] ||
1580 eth_mask->dst.addr_bytes[j] != 0xFF) {
1582 sizeof(struct ixgbe_fdir_rule));
1583 rte_flow_error_set(error, EINVAL,
1584 RTE_FLOW_ERROR_TYPE_ITEM,
1585 item, "Not supported by fdir filter");
1590 /* When no VLAN, considered as full mask. */
1591 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
1593 /*** If both spec and mask are item,
1594 * it means don't care about ETH.
1599 * Check if the next not void item is vlan or ipv4.
1600 * IPv6 is not supported.
1602 item = next_no_fuzzy_pattern(pattern, item);
1603 if (rule->mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
1604 if (item->type != RTE_FLOW_ITEM_TYPE_VLAN) {
1605 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1606 rte_flow_error_set(error, EINVAL,
1607 RTE_FLOW_ERROR_TYPE_ITEM,
1608 item, "Not supported by fdir filter");
1612 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4) {
1613 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1614 rte_flow_error_set(error, EINVAL,
1615 RTE_FLOW_ERROR_TYPE_ITEM,
1616 item, "Not supported by fdir filter");
1622 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
1623 if (!(item->spec && item->mask)) {
1624 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1625 rte_flow_error_set(error, EINVAL,
1626 RTE_FLOW_ERROR_TYPE_ITEM,
1627 item, "Not supported by fdir filter");
1631 /*Not supported last point for range*/
1633 rte_flow_error_set(error, EINVAL,
1634 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1635 item, "Not supported last point for range");
1639 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
1640 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
1642 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
1644 rule->mask.vlan_tci_mask = vlan_mask->tci;
1645 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
1646 /* More than one tags are not supported. */
1648 /* Next not void item must be END */
1649 item = next_no_fuzzy_pattern(pattern, item);
1650 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
1651 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1652 rte_flow_error_set(error, EINVAL,
1653 RTE_FLOW_ERROR_TYPE_ITEM,
1654 item, "Not supported by fdir filter");
1659 /* Get the IPV4 info. */
1660 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4) {
1662 * Set the flow type even if there's no content
1663 * as we must have a flow type.
1665 rule->ixgbe_fdir.formatted.flow_type =
1666 IXGBE_ATR_FLOW_TYPE_IPV4;
1667 /*Not supported last point for range*/
1669 rte_flow_error_set(error, EINVAL,
1670 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1671 item, "Not supported last point for range");
1675 * Only care about src & dst addresses,
1676 * others should be masked.
1679 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1680 rte_flow_error_set(error, EINVAL,
1681 RTE_FLOW_ERROR_TYPE_ITEM,
1682 item, "Not supported by fdir filter");
1685 rule->b_mask = TRUE;
1687 (const struct rte_flow_item_ipv4 *)item->mask;
1688 if (ipv4_mask->hdr.version_ihl ||
1689 ipv4_mask->hdr.type_of_service ||
1690 ipv4_mask->hdr.total_length ||
1691 ipv4_mask->hdr.packet_id ||
1692 ipv4_mask->hdr.fragment_offset ||
1693 ipv4_mask->hdr.time_to_live ||
1694 ipv4_mask->hdr.next_proto_id ||
1695 ipv4_mask->hdr.hdr_checksum) {
1696 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1697 rte_flow_error_set(error, EINVAL,
1698 RTE_FLOW_ERROR_TYPE_ITEM,
1699 item, "Not supported by fdir filter");
1702 rule->mask.dst_ipv4_mask = ipv4_mask->hdr.dst_addr;
1703 rule->mask.src_ipv4_mask = ipv4_mask->hdr.src_addr;
1706 rule->b_spec = TRUE;
1708 (const struct rte_flow_item_ipv4 *)item->spec;
1709 rule->ixgbe_fdir.formatted.dst_ip[0] =
1710 ipv4_spec->hdr.dst_addr;
1711 rule->ixgbe_fdir.formatted.src_ip[0] =
1712 ipv4_spec->hdr.src_addr;
1716 * Check if the next not void item is
1717 * TCP or UDP or SCTP or END.
1719 item = next_no_fuzzy_pattern(pattern, item);
1720 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1721 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1722 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1723 item->type != RTE_FLOW_ITEM_TYPE_END &&
1724 item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1725 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1726 rte_flow_error_set(error, EINVAL,
1727 RTE_FLOW_ERROR_TYPE_ITEM,
1728 item, "Not supported by fdir filter");
1733 /* Get the IPV6 info. */
1734 if (item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
1736 * Set the flow type even if there's no content
1737 * as we must have a flow type.
1739 rule->ixgbe_fdir.formatted.flow_type =
1740 IXGBE_ATR_FLOW_TYPE_IPV6;
1743 * 1. must signature match
1744 * 2. not support last
1745 * 3. mask must not null
1747 if (rule->mode != RTE_FDIR_MODE_SIGNATURE ||
1750 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1751 rte_flow_error_set(error, EINVAL,
1752 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1753 item, "Not supported last point for range");
1757 rule->b_mask = TRUE;
1759 (const struct rte_flow_item_ipv6 *)item->mask;
1760 if (ipv6_mask->hdr.vtc_flow ||
1761 ipv6_mask->hdr.payload_len ||
1762 ipv6_mask->hdr.proto ||
1763 ipv6_mask->hdr.hop_limits) {
1764 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1765 rte_flow_error_set(error, EINVAL,
1766 RTE_FLOW_ERROR_TYPE_ITEM,
1767 item, "Not supported by fdir filter");
1771 /* check src addr mask */
1772 for (j = 0; j < 16; j++) {
1773 if (ipv6_mask->hdr.src_addr[j] == UINT8_MAX) {
1774 rule->mask.src_ipv6_mask |= 1 << j;
1775 } else if (ipv6_mask->hdr.src_addr[j] != 0) {
1776 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1777 rte_flow_error_set(error, EINVAL,
1778 RTE_FLOW_ERROR_TYPE_ITEM,
1779 item, "Not supported by fdir filter");
1784 /* check dst addr mask */
1785 for (j = 0; j < 16; j++) {
1786 if (ipv6_mask->hdr.dst_addr[j] == UINT8_MAX) {
1787 rule->mask.dst_ipv6_mask |= 1 << j;
1788 } else if (ipv6_mask->hdr.dst_addr[j] != 0) {
1789 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1790 rte_flow_error_set(error, EINVAL,
1791 RTE_FLOW_ERROR_TYPE_ITEM,
1792 item, "Not supported by fdir filter");
1798 rule->b_spec = TRUE;
1800 (const struct rte_flow_item_ipv6 *)item->spec;
1801 rte_memcpy(rule->ixgbe_fdir.formatted.src_ip,
1802 ipv6_spec->hdr.src_addr, 16);
1803 rte_memcpy(rule->ixgbe_fdir.formatted.dst_ip,
1804 ipv6_spec->hdr.dst_addr, 16);
1808 * Check if the next not void item is
1809 * TCP or UDP or SCTP or END.
1811 item = next_no_fuzzy_pattern(pattern, item);
1812 if (item->type != RTE_FLOW_ITEM_TYPE_TCP &&
1813 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
1814 item->type != RTE_FLOW_ITEM_TYPE_SCTP &&
1815 item->type != RTE_FLOW_ITEM_TYPE_END &&
1816 item->type != RTE_FLOW_ITEM_TYPE_RAW) {
1817 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1818 rte_flow_error_set(error, EINVAL,
1819 RTE_FLOW_ERROR_TYPE_ITEM,
1820 item, "Not supported by fdir filter");
1825 /* Get the TCP info. */
1826 if (item->type == RTE_FLOW_ITEM_TYPE_TCP) {
1828 * Set the flow type even if there's no content
1829 * as we must have a flow type.
1831 rule->ixgbe_fdir.formatted.flow_type |=
1832 IXGBE_ATR_L4TYPE_TCP;
1833 /*Not supported last point for range*/
1835 rte_flow_error_set(error, EINVAL,
1836 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1837 item, "Not supported last point for range");
1841 * Only care about src & dst ports,
1842 * others should be masked.
1845 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1846 rte_flow_error_set(error, EINVAL,
1847 RTE_FLOW_ERROR_TYPE_ITEM,
1848 item, "Not supported by fdir filter");
1851 rule->b_mask = TRUE;
1852 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1853 if (tcp_mask->hdr.sent_seq ||
1854 tcp_mask->hdr.recv_ack ||
1855 tcp_mask->hdr.data_off ||
1856 tcp_mask->hdr.tcp_flags ||
1857 tcp_mask->hdr.rx_win ||
1858 tcp_mask->hdr.cksum ||
1859 tcp_mask->hdr.tcp_urp) {
1860 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1861 rte_flow_error_set(error, EINVAL,
1862 RTE_FLOW_ERROR_TYPE_ITEM,
1863 item, "Not supported by fdir filter");
1866 rule->mask.src_port_mask = tcp_mask->hdr.src_port;
1867 rule->mask.dst_port_mask = tcp_mask->hdr.dst_port;
1870 rule->b_spec = TRUE;
1871 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1872 rule->ixgbe_fdir.formatted.src_port =
1873 tcp_spec->hdr.src_port;
1874 rule->ixgbe_fdir.formatted.dst_port =
1875 tcp_spec->hdr.dst_port;
1878 item = next_no_fuzzy_pattern(pattern, item);
1879 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1880 item->type != RTE_FLOW_ITEM_TYPE_END) {
1881 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1882 rte_flow_error_set(error, EINVAL,
1883 RTE_FLOW_ERROR_TYPE_ITEM,
1884 item, "Not supported by fdir filter");
1890 /* Get the UDP info */
1891 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
1893 * Set the flow type even if there's no content
1894 * as we must have a flow type.
1896 rule->ixgbe_fdir.formatted.flow_type |=
1897 IXGBE_ATR_L4TYPE_UDP;
1898 /*Not supported last point for range*/
1900 rte_flow_error_set(error, EINVAL,
1901 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1902 item, "Not supported last point for range");
1906 * Only care about src & dst ports,
1907 * others should be masked.
1910 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1911 rte_flow_error_set(error, EINVAL,
1912 RTE_FLOW_ERROR_TYPE_ITEM,
1913 item, "Not supported by fdir filter");
1916 rule->b_mask = TRUE;
1917 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1918 if (udp_mask->hdr.dgram_len ||
1919 udp_mask->hdr.dgram_cksum) {
1920 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1921 rte_flow_error_set(error, EINVAL,
1922 RTE_FLOW_ERROR_TYPE_ITEM,
1923 item, "Not supported by fdir filter");
1926 rule->mask.src_port_mask = udp_mask->hdr.src_port;
1927 rule->mask.dst_port_mask = udp_mask->hdr.dst_port;
1930 rule->b_spec = TRUE;
1931 udp_spec = (const struct rte_flow_item_udp *)item->spec;
1932 rule->ixgbe_fdir.formatted.src_port =
1933 udp_spec->hdr.src_port;
1934 rule->ixgbe_fdir.formatted.dst_port =
1935 udp_spec->hdr.dst_port;
1938 item = next_no_fuzzy_pattern(pattern, item);
1939 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
1940 item->type != RTE_FLOW_ITEM_TYPE_END) {
1941 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1942 rte_flow_error_set(error, EINVAL,
1943 RTE_FLOW_ERROR_TYPE_ITEM,
1944 item, "Not supported by fdir filter");
1950 /* Get the SCTP info */
1951 if (item->type == RTE_FLOW_ITEM_TYPE_SCTP) {
1953 * Set the flow type even if there's no content
1954 * as we must have a flow type.
1956 rule->ixgbe_fdir.formatted.flow_type |=
1957 IXGBE_ATR_L4TYPE_SCTP;
1958 /*Not supported last point for range*/
1960 rte_flow_error_set(error, EINVAL,
1961 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1962 item, "Not supported last point for range");
1966 /* only x550 family only support sctp port */
1967 if (hw->mac.type == ixgbe_mac_X550 ||
1968 hw->mac.type == ixgbe_mac_X550EM_x ||
1969 hw->mac.type == ixgbe_mac_X550EM_a) {
1971 * Only care about src & dst ports,
1972 * others should be masked.
1975 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1976 rte_flow_error_set(error, EINVAL,
1977 RTE_FLOW_ERROR_TYPE_ITEM,
1978 item, "Not supported by fdir filter");
1981 rule->b_mask = TRUE;
1983 (const struct rte_flow_item_sctp *)item->mask;
1984 if (sctp_mask->hdr.tag ||
1985 sctp_mask->hdr.cksum) {
1986 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
1987 rte_flow_error_set(error, EINVAL,
1988 RTE_FLOW_ERROR_TYPE_ITEM,
1989 item, "Not supported by fdir filter");
1992 rule->mask.src_port_mask = sctp_mask->hdr.src_port;
1993 rule->mask.dst_port_mask = sctp_mask->hdr.dst_port;
1996 rule->b_spec = TRUE;
1998 (const struct rte_flow_item_sctp *)item->spec;
1999 rule->ixgbe_fdir.formatted.src_port =
2000 sctp_spec->hdr.src_port;
2001 rule->ixgbe_fdir.formatted.dst_port =
2002 sctp_spec->hdr.dst_port;
2004 /* others even sctp port is not supported */
2007 (const struct rte_flow_item_sctp *)item->mask;
2009 (sctp_mask->hdr.src_port ||
2010 sctp_mask->hdr.dst_port ||
2011 sctp_mask->hdr.tag ||
2012 sctp_mask->hdr.cksum)) {
2013 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2014 rte_flow_error_set(error, EINVAL,
2015 RTE_FLOW_ERROR_TYPE_ITEM,
2016 item, "Not supported by fdir filter");
2021 item = next_no_fuzzy_pattern(pattern, item);
2022 if (item->type != RTE_FLOW_ITEM_TYPE_RAW &&
2023 item->type != RTE_FLOW_ITEM_TYPE_END) {
2024 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2025 rte_flow_error_set(error, EINVAL,
2026 RTE_FLOW_ERROR_TYPE_ITEM,
2027 item, "Not supported by fdir filter");
2032 /* Get the flex byte info */
2033 if (item->type == RTE_FLOW_ITEM_TYPE_RAW) {
2034 /* Not supported last point for range*/
2036 rte_flow_error_set(error, EINVAL,
2037 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2038 item, "Not supported last point for range");
2041 /* mask should not be null */
2042 if (!item->mask || !item->spec) {
2043 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2044 rte_flow_error_set(error, EINVAL,
2045 RTE_FLOW_ERROR_TYPE_ITEM,
2046 item, "Not supported by fdir filter");
2050 raw_mask = (const struct rte_flow_item_raw *)item->mask;
2053 if (raw_mask->relative != 0x1 ||
2054 raw_mask->search != 0x1 ||
2055 raw_mask->reserved != 0x0 ||
2056 (uint32_t)raw_mask->offset != 0xffffffff ||
2057 raw_mask->limit != 0xffff ||
2058 raw_mask->length != 0xffff) {
2059 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2060 rte_flow_error_set(error, EINVAL,
2061 RTE_FLOW_ERROR_TYPE_ITEM,
2062 item, "Not supported by fdir filter");
2066 raw_spec = (const struct rte_flow_item_raw *)item->spec;
2069 if (raw_spec->relative != 0 ||
2070 raw_spec->search != 0 ||
2071 raw_spec->reserved != 0 ||
2072 raw_spec->offset > IXGBE_MAX_FLX_SOURCE_OFF ||
2073 raw_spec->offset % 2 ||
2074 raw_spec->limit != 0 ||
2075 raw_spec->length != 2 ||
2076 /* pattern can't be 0xffff */
2077 (raw_spec->pattern[0] == 0xff &&
2078 raw_spec->pattern[1] == 0xff)) {
2079 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2080 rte_flow_error_set(error, EINVAL,
2081 RTE_FLOW_ERROR_TYPE_ITEM,
2082 item, "Not supported by fdir filter");
2086 /* check pattern mask */
2087 if (raw_mask->pattern[0] != 0xff ||
2088 raw_mask->pattern[1] != 0xff) {
2089 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2090 rte_flow_error_set(error, EINVAL,
2091 RTE_FLOW_ERROR_TYPE_ITEM,
2092 item, "Not supported by fdir filter");
2096 rule->mask.flex_bytes_mask = 0xffff;
2097 rule->ixgbe_fdir.formatted.flex_bytes =
2098 (((uint16_t)raw_spec->pattern[1]) << 8) |
2099 raw_spec->pattern[0];
2100 rule->flex_bytes_offset = raw_spec->offset;
2103 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2104 /* check if the next not void item is END */
2105 item = next_no_fuzzy_pattern(pattern, item);
2106 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2107 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2108 rte_flow_error_set(error, EINVAL,
2109 RTE_FLOW_ERROR_TYPE_ITEM,
2110 item, "Not supported by fdir filter");
2115 return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2118 #define NVGRE_PROTOCOL 0x6558
2121 * Parse the rule to see if it is a VxLAN or NVGRE flow director rule.
2122 * And get the flow director filter info BTW.
2124 * The first not void item must be ETH.
2125 * The second not void item must be IPV4/ IPV6.
2126 * The third not void item must be NVGRE.
2127 * The next not void item must be END.
2129 * The first not void item must be ETH.
2130 * The second not void item must be IPV4/ IPV6.
2131 * The third not void item must be NVGRE.
2132 * The next not void item must be END.
2134 * The first not void action should be QUEUE or DROP.
2135 * The second not void optional action should be MARK,
2136 * mark_id is a uint32_t number.
2137 * The next not void action should be END.
2138 * VxLAN pattern example:
2141 * IPV4/IPV6 NULL NULL
2143 * VxLAN vni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
2144 * MAC VLAN tci 0x2016 0xEFFF
2146 * NEGRV pattern example:
2149 * IPV4/IPV6 NULL NULL
2150 * NVGRE protocol 0x6558 0xFFFF
2151 * tni{0x00, 0x32, 0x54} {0xFF, 0xFF, 0xFF}
2152 * MAC VLAN tci 0x2016 0xEFFF
2154 * other members in mask and spec should set to 0x00.
2155 * item->last should be NULL.
2158 ixgbe_parse_fdir_filter_tunnel(const struct rte_flow_attr *attr,
2159 const struct rte_flow_item pattern[],
2160 const struct rte_flow_action actions[],
2161 struct ixgbe_fdir_rule *rule,
2162 struct rte_flow_error *error)
2164 const struct rte_flow_item *item;
2165 const struct rte_flow_item_vxlan *vxlan_spec;
2166 const struct rte_flow_item_vxlan *vxlan_mask;
2167 const struct rte_flow_item_nvgre *nvgre_spec;
2168 const struct rte_flow_item_nvgre *nvgre_mask;
2169 const struct rte_flow_item_eth *eth_spec;
2170 const struct rte_flow_item_eth *eth_mask;
2171 const struct rte_flow_item_vlan *vlan_spec;
2172 const struct rte_flow_item_vlan *vlan_mask;
2176 rte_flow_error_set(error, EINVAL,
2177 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2178 NULL, "NULL pattern.");
2183 rte_flow_error_set(error, EINVAL,
2184 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2185 NULL, "NULL action.");
2190 rte_flow_error_set(error, EINVAL,
2191 RTE_FLOW_ERROR_TYPE_ATTR,
2192 NULL, "NULL attribute.");
2197 * Some fields may not be provided. Set spec to 0 and mask to default
2198 * value. So, we need not do anything for the not provided fields later.
2200 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2201 memset(&rule->mask, 0xFF, sizeof(struct ixgbe_hw_fdir_mask));
2202 rule->mask.vlan_tci_mask = 0;
2205 * The first not void item should be
2206 * MAC or IPv4 or IPv6 or UDP or VxLAN.
2208 item = next_no_void_pattern(pattern, NULL);
2209 if (item->type != RTE_FLOW_ITEM_TYPE_ETH &&
2210 item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2211 item->type != RTE_FLOW_ITEM_TYPE_IPV6 &&
2212 item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2213 item->type != RTE_FLOW_ITEM_TYPE_VXLAN &&
2214 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2215 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2216 rte_flow_error_set(error, EINVAL,
2217 RTE_FLOW_ERROR_TYPE_ITEM,
2218 item, "Not supported by fdir filter");
2222 rule->mode = RTE_FDIR_MODE_PERFECT_TUNNEL;
2225 if (item->type == RTE_FLOW_ITEM_TYPE_ETH) {
2226 /* Only used to describe the protocol stack. */
2227 if (item->spec || item->mask) {
2228 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2229 rte_flow_error_set(error, EINVAL,
2230 RTE_FLOW_ERROR_TYPE_ITEM,
2231 item, "Not supported by fdir filter");
2234 /* Not supported last point for range*/
2236 rte_flow_error_set(error, EINVAL,
2237 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2238 item, "Not supported last point for range");
2242 /* Check if the next not void item is IPv4 or IPv6. */
2243 item = next_no_void_pattern(pattern, item);
2244 if (item->type != RTE_FLOW_ITEM_TYPE_IPV4 &&
2245 item->type != RTE_FLOW_ITEM_TYPE_IPV6) {
2246 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2247 rte_flow_error_set(error, EINVAL,
2248 RTE_FLOW_ERROR_TYPE_ITEM,
2249 item, "Not supported by fdir filter");
2255 if (item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
2256 item->type == RTE_FLOW_ITEM_TYPE_IPV6) {
2257 /* Only used to describe the protocol stack. */
2258 if (item->spec || item->mask) {
2259 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2260 rte_flow_error_set(error, EINVAL,
2261 RTE_FLOW_ERROR_TYPE_ITEM,
2262 item, "Not supported by fdir filter");
2265 /*Not supported last point for range*/
2267 rte_flow_error_set(error, EINVAL,
2268 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2269 item, "Not supported last point for range");
2273 /* Check if the next not void item is UDP or NVGRE. */
2274 item = next_no_void_pattern(pattern, item);
2275 if (item->type != RTE_FLOW_ITEM_TYPE_UDP &&
2276 item->type != RTE_FLOW_ITEM_TYPE_NVGRE) {
2277 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2278 rte_flow_error_set(error, EINVAL,
2279 RTE_FLOW_ERROR_TYPE_ITEM,
2280 item, "Not supported by fdir filter");
2286 if (item->type == RTE_FLOW_ITEM_TYPE_UDP) {
2287 /* Only used to describe the protocol stack. */
2288 if (item->spec || item->mask) {
2289 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2290 rte_flow_error_set(error, EINVAL,
2291 RTE_FLOW_ERROR_TYPE_ITEM,
2292 item, "Not supported by fdir filter");
2295 /*Not supported last point for range*/
2297 rte_flow_error_set(error, EINVAL,
2298 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2299 item, "Not supported last point for range");
2303 /* Check if the next not void item is VxLAN. */
2304 item = next_no_void_pattern(pattern, item);
2305 if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
2306 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2307 rte_flow_error_set(error, EINVAL,
2308 RTE_FLOW_ERROR_TYPE_ITEM,
2309 item, "Not supported by fdir filter");
2314 /* Get the VxLAN info */
2315 if (item->type == RTE_FLOW_ITEM_TYPE_VXLAN) {
2316 rule->ixgbe_fdir.formatted.tunnel_type =
2317 RTE_FDIR_TUNNEL_TYPE_VXLAN;
2319 /* Only care about VNI, others should be masked. */
2321 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2322 rte_flow_error_set(error, EINVAL,
2323 RTE_FLOW_ERROR_TYPE_ITEM,
2324 item, "Not supported by fdir filter");
2327 /*Not supported last point for range*/
2329 rte_flow_error_set(error, EINVAL,
2330 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2331 item, "Not supported last point for range");
2334 rule->b_mask = TRUE;
2336 /* Tunnel type is always meaningful. */
2337 rule->mask.tunnel_type_mask = 1;
2340 (const struct rte_flow_item_vxlan *)item->mask;
2341 if (vxlan_mask->flags) {
2342 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2343 rte_flow_error_set(error, EINVAL,
2344 RTE_FLOW_ERROR_TYPE_ITEM,
2345 item, "Not supported by fdir filter");
2348 /* VNI must be totally masked or not. */
2349 if ((vxlan_mask->vni[0] || vxlan_mask->vni[1] ||
2350 vxlan_mask->vni[2]) &&
2351 ((vxlan_mask->vni[0] != 0xFF) ||
2352 (vxlan_mask->vni[1] != 0xFF) ||
2353 (vxlan_mask->vni[2] != 0xFF))) {
2354 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2355 rte_flow_error_set(error, EINVAL,
2356 RTE_FLOW_ERROR_TYPE_ITEM,
2357 item, "Not supported by fdir filter");
2361 rte_memcpy(&rule->mask.tunnel_id_mask, vxlan_mask->vni,
2362 RTE_DIM(vxlan_mask->vni));
2365 rule->b_spec = TRUE;
2366 vxlan_spec = (const struct rte_flow_item_vxlan *)
2368 rte_memcpy(((uint8_t *)
2369 &rule->ixgbe_fdir.formatted.tni_vni + 1),
2370 vxlan_spec->vni, RTE_DIM(vxlan_spec->vni));
2371 rule->ixgbe_fdir.formatted.tni_vni = rte_be_to_cpu_32(
2372 rule->ixgbe_fdir.formatted.tni_vni);
2376 /* Get the NVGRE info */
2377 if (item->type == RTE_FLOW_ITEM_TYPE_NVGRE) {
2378 rule->ixgbe_fdir.formatted.tunnel_type =
2379 RTE_FDIR_TUNNEL_TYPE_NVGRE;
2382 * Only care about flags0, flags1, protocol and TNI,
2383 * others should be masked.
2386 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2387 rte_flow_error_set(error, EINVAL,
2388 RTE_FLOW_ERROR_TYPE_ITEM,
2389 item, "Not supported by fdir filter");
2392 /*Not supported last point for range*/
2394 rte_flow_error_set(error, EINVAL,
2395 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2396 item, "Not supported last point for range");
2399 rule->b_mask = TRUE;
2401 /* Tunnel type is always meaningful. */
2402 rule->mask.tunnel_type_mask = 1;
2405 (const struct rte_flow_item_nvgre *)item->mask;
2406 if (nvgre_mask->flow_id) {
2407 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2408 rte_flow_error_set(error, EINVAL,
2409 RTE_FLOW_ERROR_TYPE_ITEM,
2410 item, "Not supported by fdir filter");
2413 if (nvgre_mask->c_k_s_rsvd0_ver !=
2414 rte_cpu_to_be_16(0x3000) ||
2415 nvgre_mask->protocol != 0xFFFF) {
2416 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2417 rte_flow_error_set(error, EINVAL,
2418 RTE_FLOW_ERROR_TYPE_ITEM,
2419 item, "Not supported by fdir filter");
2422 /* TNI must be totally masked or not. */
2423 if (nvgre_mask->tni[0] &&
2424 ((nvgre_mask->tni[0] != 0xFF) ||
2425 (nvgre_mask->tni[1] != 0xFF) ||
2426 (nvgre_mask->tni[2] != 0xFF))) {
2427 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2428 rte_flow_error_set(error, EINVAL,
2429 RTE_FLOW_ERROR_TYPE_ITEM,
2430 item, "Not supported by fdir filter");
2433 /* tni is a 24-bits bit field */
2434 rte_memcpy(&rule->mask.tunnel_id_mask, nvgre_mask->tni,
2435 RTE_DIM(nvgre_mask->tni));
2436 rule->mask.tunnel_id_mask <<= 8;
2439 rule->b_spec = TRUE;
2441 (const struct rte_flow_item_nvgre *)item->spec;
2442 if (nvgre_spec->c_k_s_rsvd0_ver !=
2443 rte_cpu_to_be_16(0x2000) ||
2444 nvgre_spec->protocol !=
2445 rte_cpu_to_be_16(NVGRE_PROTOCOL)) {
2446 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2447 rte_flow_error_set(error, EINVAL,
2448 RTE_FLOW_ERROR_TYPE_ITEM,
2449 item, "Not supported by fdir filter");
2452 /* tni is a 24-bits bit field */
2453 rte_memcpy(&rule->ixgbe_fdir.formatted.tni_vni,
2454 nvgre_spec->tni, RTE_DIM(nvgre_spec->tni));
2455 rule->ixgbe_fdir.formatted.tni_vni <<= 8;
2459 /* check if the next not void item is MAC */
2460 item = next_no_void_pattern(pattern, item);
2461 if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
2462 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2463 rte_flow_error_set(error, EINVAL,
2464 RTE_FLOW_ERROR_TYPE_ITEM,
2465 item, "Not supported by fdir filter");
2470 * Only support vlan and dst MAC address,
2471 * others should be masked.
2475 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2476 rte_flow_error_set(error, EINVAL,
2477 RTE_FLOW_ERROR_TYPE_ITEM,
2478 item, "Not supported by fdir filter");
2481 /*Not supported last point for range*/
2483 rte_flow_error_set(error, EINVAL,
2484 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2485 item, "Not supported last point for range");
2488 rule->b_mask = TRUE;
2489 eth_mask = (const struct rte_flow_item_eth *)item->mask;
2491 /* Ether type should be masked. */
2492 if (eth_mask->type) {
2493 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2494 rte_flow_error_set(error, EINVAL,
2495 RTE_FLOW_ERROR_TYPE_ITEM,
2496 item, "Not supported by fdir filter");
2500 /* src MAC address should be masked. */
2501 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2502 if (eth_mask->src.addr_bytes[j]) {
2504 sizeof(struct ixgbe_fdir_rule));
2505 rte_flow_error_set(error, EINVAL,
2506 RTE_FLOW_ERROR_TYPE_ITEM,
2507 item, "Not supported by fdir filter");
2511 rule->mask.mac_addr_byte_mask = 0;
2512 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2513 /* It's a per byte mask. */
2514 if (eth_mask->dst.addr_bytes[j] == 0xFF) {
2515 rule->mask.mac_addr_byte_mask |= 0x1 << j;
2516 } else if (eth_mask->dst.addr_bytes[j]) {
2517 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2518 rte_flow_error_set(error, EINVAL,
2519 RTE_FLOW_ERROR_TYPE_ITEM,
2520 item, "Not supported by fdir filter");
2525 /* When no vlan, considered as full mask. */
2526 rule->mask.vlan_tci_mask = rte_cpu_to_be_16(0xEFFF);
2529 rule->b_spec = TRUE;
2530 eth_spec = (const struct rte_flow_item_eth *)item->spec;
2532 /* Get the dst MAC. */
2533 for (j = 0; j < ETHER_ADDR_LEN; j++) {
2534 rule->ixgbe_fdir.formatted.inner_mac[j] =
2535 eth_spec->dst.addr_bytes[j];
2540 * Check if the next not void item is vlan or ipv4.
2541 * IPv6 is not supported.
2543 item = next_no_void_pattern(pattern, item);
2544 if ((item->type != RTE_FLOW_ITEM_TYPE_VLAN) &&
2545 (item->type != RTE_FLOW_ITEM_TYPE_IPV4)) {
2546 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2547 rte_flow_error_set(error, EINVAL,
2548 RTE_FLOW_ERROR_TYPE_ITEM,
2549 item, "Not supported by fdir filter");
2552 /*Not supported last point for range*/
2554 rte_flow_error_set(error, EINVAL,
2555 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2556 item, "Not supported last point for range");
2560 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2561 if (!(item->spec && item->mask)) {
2562 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2563 rte_flow_error_set(error, EINVAL,
2564 RTE_FLOW_ERROR_TYPE_ITEM,
2565 item, "Not supported by fdir filter");
2569 vlan_spec = (const struct rte_flow_item_vlan *)item->spec;
2570 vlan_mask = (const struct rte_flow_item_vlan *)item->mask;
2572 rule->ixgbe_fdir.formatted.vlan_id = vlan_spec->tci;
2574 rule->mask.vlan_tci_mask = vlan_mask->tci;
2575 rule->mask.vlan_tci_mask &= rte_cpu_to_be_16(0xEFFF);
2576 /* More than one tags are not supported. */
2578 /* check if the next not void item is END */
2579 item = next_no_void_pattern(pattern, item);
2581 if (item->type != RTE_FLOW_ITEM_TYPE_END) {
2582 memset(rule, 0, sizeof(struct ixgbe_fdir_rule));
2583 rte_flow_error_set(error, EINVAL,
2584 RTE_FLOW_ERROR_TYPE_ITEM,
2585 item, "Not supported by fdir filter");
2591 * If the tags is 0, it means don't care about the VLAN.
2595 return ixgbe_parse_fdir_act_attr(attr, actions, rule, error);
2599 ixgbe_parse_fdir_filter(struct rte_eth_dev *dev,
2600 const struct rte_flow_attr *attr,
2601 const struct rte_flow_item pattern[],
2602 const struct rte_flow_action actions[],
2603 struct ixgbe_fdir_rule *rule,
2604 struct rte_flow_error *error)
2607 struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2608 enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode;
2610 if (hw->mac.type != ixgbe_mac_82599EB &&
2611 hw->mac.type != ixgbe_mac_X540 &&
2612 hw->mac.type != ixgbe_mac_X550 &&
2613 hw->mac.type != ixgbe_mac_X550EM_x &&
2614 hw->mac.type != ixgbe_mac_X550EM_a)
2617 ret = ixgbe_parse_fdir_filter_normal(dev, attr, pattern,
2618 actions, rule, error);
2623 ret = ixgbe_parse_fdir_filter_tunnel(attr, pattern,
2624 actions, rule, error);
2631 if (hw->mac.type == ixgbe_mac_82599EB &&
2632 rule->fdirflags == IXGBE_FDIRCMD_DROP &&
2633 (rule->ixgbe_fdir.formatted.src_port != 0 ||
2634 rule->ixgbe_fdir.formatted.dst_port != 0))
2637 if (fdir_mode == RTE_FDIR_MODE_NONE ||
2638 fdir_mode != rule->mode)
2641 if (rule->queue >= dev->data->nb_rx_queues)
2648 ixgbe_filterlist_init(void)
2650 TAILQ_INIT(&filter_ntuple_list);
2651 TAILQ_INIT(&filter_ethertype_list);
2652 TAILQ_INIT(&filter_syn_list);
2653 TAILQ_INIT(&filter_fdir_list);
2654 TAILQ_INIT(&filter_l2_tunnel_list);
2655 TAILQ_INIT(&ixgbe_flow_list);
2659 ixgbe_filterlist_flush(void)
2661 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2662 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2663 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2664 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2665 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2666 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2668 while ((ntuple_filter_ptr = TAILQ_FIRST(&filter_ntuple_list))) {
2669 TAILQ_REMOVE(&filter_ntuple_list,
2672 rte_free(ntuple_filter_ptr);
2675 while ((ethertype_filter_ptr = TAILQ_FIRST(&filter_ethertype_list))) {
2676 TAILQ_REMOVE(&filter_ethertype_list,
2677 ethertype_filter_ptr,
2679 rte_free(ethertype_filter_ptr);
2682 while ((syn_filter_ptr = TAILQ_FIRST(&filter_syn_list))) {
2683 TAILQ_REMOVE(&filter_syn_list,
2686 rte_free(syn_filter_ptr);
2689 while ((l2_tn_filter_ptr = TAILQ_FIRST(&filter_l2_tunnel_list))) {
2690 TAILQ_REMOVE(&filter_l2_tunnel_list,
2693 rte_free(l2_tn_filter_ptr);
2696 while ((fdir_rule_ptr = TAILQ_FIRST(&filter_fdir_list))) {
2697 TAILQ_REMOVE(&filter_fdir_list,
2700 rte_free(fdir_rule_ptr);
2703 while ((ixgbe_flow_mem_ptr = TAILQ_FIRST(&ixgbe_flow_list))) {
2704 TAILQ_REMOVE(&ixgbe_flow_list,
2707 rte_free(ixgbe_flow_mem_ptr->flow);
2708 rte_free(ixgbe_flow_mem_ptr);
2713 * Create or destroy a flow rule.
2714 * Theorically one rule can match more than one filters.
2715 * We will let it use the filter which it hitt first.
2716 * So, the sequence matters.
2718 static struct rte_flow *
2719 ixgbe_flow_create(struct rte_eth_dev *dev,
2720 const struct rte_flow_attr *attr,
2721 const struct rte_flow_item pattern[],
2722 const struct rte_flow_action actions[],
2723 struct rte_flow_error *error)
2726 struct rte_eth_ntuple_filter ntuple_filter;
2727 struct rte_eth_ethertype_filter ethertype_filter;
2728 struct rte_eth_syn_filter syn_filter;
2729 struct ixgbe_fdir_rule fdir_rule;
2730 struct rte_eth_l2_tunnel_conf l2_tn_filter;
2731 struct ixgbe_hw_fdir_info *fdir_info =
2732 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
2733 struct rte_flow *flow = NULL;
2734 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
2735 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
2736 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
2737 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
2738 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
2739 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
2740 uint8_t first_mask = FALSE;
2742 flow = rte_zmalloc("ixgbe_rte_flow", sizeof(struct rte_flow), 0);
2744 PMD_DRV_LOG(ERR, "failed to allocate memory");
2745 return (struct rte_flow *)flow;
2747 ixgbe_flow_mem_ptr = rte_zmalloc("ixgbe_flow_mem",
2748 sizeof(struct ixgbe_flow_mem), 0);
2749 if (!ixgbe_flow_mem_ptr) {
2750 PMD_DRV_LOG(ERR, "failed to allocate memory");
2754 ixgbe_flow_mem_ptr->flow = flow;
2755 TAILQ_INSERT_TAIL(&ixgbe_flow_list,
2756 ixgbe_flow_mem_ptr, entries);
2758 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2759 ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2760 actions, &ntuple_filter, error);
2762 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, TRUE);
2764 ntuple_filter_ptr = rte_zmalloc("ixgbe_ntuple_filter",
2765 sizeof(struct ixgbe_ntuple_filter_ele), 0);
2766 if (!ntuple_filter_ptr) {
2767 PMD_DRV_LOG(ERR, "failed to allocate memory");
2770 rte_memcpy(&ntuple_filter_ptr->filter_info,
2772 sizeof(struct rte_eth_ntuple_filter));
2773 TAILQ_INSERT_TAIL(&filter_ntuple_list,
2774 ntuple_filter_ptr, entries);
2775 flow->rule = ntuple_filter_ptr;
2776 flow->filter_type = RTE_ETH_FILTER_NTUPLE;
2782 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2783 ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2784 actions, ðertype_filter, error);
2786 ret = ixgbe_add_del_ethertype_filter(dev,
2787 ðertype_filter, TRUE);
2789 ethertype_filter_ptr = rte_zmalloc(
2790 "ixgbe_ethertype_filter",
2791 sizeof(struct ixgbe_ethertype_filter_ele), 0);
2792 if (!ethertype_filter_ptr) {
2793 PMD_DRV_LOG(ERR, "failed to allocate memory");
2796 rte_memcpy(ðertype_filter_ptr->filter_info,
2798 sizeof(struct rte_eth_ethertype_filter));
2799 TAILQ_INSERT_TAIL(&filter_ethertype_list,
2800 ethertype_filter_ptr, entries);
2801 flow->rule = ethertype_filter_ptr;
2802 flow->filter_type = RTE_ETH_FILTER_ETHERTYPE;
2808 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2809 ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2810 actions, &syn_filter, error);
2812 ret = ixgbe_syn_filter_set(dev, &syn_filter, TRUE);
2814 syn_filter_ptr = rte_zmalloc("ixgbe_syn_filter",
2815 sizeof(struct ixgbe_eth_syn_filter_ele), 0);
2816 if (!syn_filter_ptr) {
2817 PMD_DRV_LOG(ERR, "failed to allocate memory");
2820 rte_memcpy(&syn_filter_ptr->filter_info,
2822 sizeof(struct rte_eth_syn_filter));
2823 TAILQ_INSERT_TAIL(&filter_syn_list,
2826 flow->rule = syn_filter_ptr;
2827 flow->filter_type = RTE_ETH_FILTER_SYN;
2833 memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2834 ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2835 actions, &fdir_rule, error);
2837 /* A mask cannot be deleted. */
2838 if (fdir_rule.b_mask) {
2839 if (!fdir_info->mask_added) {
2840 /* It's the first time the mask is set. */
2841 rte_memcpy(&fdir_info->mask,
2843 sizeof(struct ixgbe_hw_fdir_mask));
2844 fdir_info->flex_bytes_offset =
2845 fdir_rule.flex_bytes_offset;
2847 if (fdir_rule.mask.flex_bytes_mask)
2848 ixgbe_fdir_set_flexbytes_offset(dev,
2849 fdir_rule.flex_bytes_offset);
2851 ret = ixgbe_fdir_set_input_mask(dev);
2855 fdir_info->mask_added = TRUE;
2859 * Only support one global mask,
2860 * all the masks should be the same.
2862 ret = memcmp(&fdir_info->mask,
2864 sizeof(struct ixgbe_hw_fdir_mask));
2868 if (fdir_info->flex_bytes_offset !=
2869 fdir_rule.flex_bytes_offset)
2874 if (fdir_rule.b_spec) {
2875 ret = ixgbe_fdir_filter_program(dev, &fdir_rule,
2878 fdir_rule_ptr = rte_zmalloc("ixgbe_fdir_filter",
2879 sizeof(struct ixgbe_fdir_rule_ele), 0);
2880 if (!fdir_rule_ptr) {
2881 PMD_DRV_LOG(ERR, "failed to allocate memory");
2884 rte_memcpy(&fdir_rule_ptr->filter_info,
2886 sizeof(struct ixgbe_fdir_rule));
2887 TAILQ_INSERT_TAIL(&filter_fdir_list,
2888 fdir_rule_ptr, entries);
2889 flow->rule = fdir_rule_ptr;
2890 flow->filter_type = RTE_ETH_FILTER_FDIR;
2897 * clean the mask_added flag if fail to
2901 fdir_info->mask_added = FALSE;
2909 memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2910 ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2911 actions, &l2_tn_filter, error);
2913 ret = ixgbe_dev_l2_tunnel_filter_add(dev, &l2_tn_filter, FALSE);
2915 l2_tn_filter_ptr = rte_zmalloc("ixgbe_l2_tn_filter",
2916 sizeof(struct ixgbe_eth_l2_tunnel_conf_ele), 0);
2917 if (!l2_tn_filter_ptr) {
2918 PMD_DRV_LOG(ERR, "failed to allocate memory");
2921 rte_memcpy(&l2_tn_filter_ptr->filter_info,
2923 sizeof(struct rte_eth_l2_tunnel_conf));
2924 TAILQ_INSERT_TAIL(&filter_l2_tunnel_list,
2925 l2_tn_filter_ptr, entries);
2926 flow->rule = l2_tn_filter_ptr;
2927 flow->filter_type = RTE_ETH_FILTER_L2_TUNNEL;
2933 TAILQ_REMOVE(&ixgbe_flow_list,
2934 ixgbe_flow_mem_ptr, entries);
2935 rte_flow_error_set(error, -ret,
2936 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2937 "Failed to create flow.");
2938 rte_free(ixgbe_flow_mem_ptr);
2944 * Check if the flow rule is supported by ixgbe.
2945 * It only checkes the format. Don't guarantee the rule can be programmed into
2946 * the HW. Because there can be no enough room for the rule.
2949 ixgbe_flow_validate(struct rte_eth_dev *dev,
2950 const struct rte_flow_attr *attr,
2951 const struct rte_flow_item pattern[],
2952 const struct rte_flow_action actions[],
2953 struct rte_flow_error *error)
2955 struct rte_eth_ntuple_filter ntuple_filter;
2956 struct rte_eth_ethertype_filter ethertype_filter;
2957 struct rte_eth_syn_filter syn_filter;
2958 struct rte_eth_l2_tunnel_conf l2_tn_filter;
2959 struct ixgbe_fdir_rule fdir_rule;
2962 memset(&ntuple_filter, 0, sizeof(struct rte_eth_ntuple_filter));
2963 ret = ixgbe_parse_ntuple_filter(dev, attr, pattern,
2964 actions, &ntuple_filter, error);
2968 memset(ðertype_filter, 0, sizeof(struct rte_eth_ethertype_filter));
2969 ret = ixgbe_parse_ethertype_filter(dev, attr, pattern,
2970 actions, ðertype_filter, error);
2974 memset(&syn_filter, 0, sizeof(struct rte_eth_syn_filter));
2975 ret = ixgbe_parse_syn_filter(dev, attr, pattern,
2976 actions, &syn_filter, error);
2980 memset(&fdir_rule, 0, sizeof(struct ixgbe_fdir_rule));
2981 ret = ixgbe_parse_fdir_filter(dev, attr, pattern,
2982 actions, &fdir_rule, error);
2986 memset(&l2_tn_filter, 0, sizeof(struct rte_eth_l2_tunnel_conf));
2987 ret = ixgbe_parse_l2_tn_filter(dev, attr, pattern,
2988 actions, &l2_tn_filter, error);
2993 /* Destroy a flow rule on ixgbe. */
2995 ixgbe_flow_destroy(struct rte_eth_dev *dev,
2996 struct rte_flow *flow,
2997 struct rte_flow_error *error)
3000 struct rte_flow *pmd_flow = flow;
3001 enum rte_filter_type filter_type = pmd_flow->filter_type;
3002 struct rte_eth_ntuple_filter ntuple_filter;
3003 struct rte_eth_ethertype_filter ethertype_filter;
3004 struct rte_eth_syn_filter syn_filter;
3005 struct ixgbe_fdir_rule fdir_rule;
3006 struct rte_eth_l2_tunnel_conf l2_tn_filter;
3007 struct ixgbe_ntuple_filter_ele *ntuple_filter_ptr;
3008 struct ixgbe_ethertype_filter_ele *ethertype_filter_ptr;
3009 struct ixgbe_eth_syn_filter_ele *syn_filter_ptr;
3010 struct ixgbe_eth_l2_tunnel_conf_ele *l2_tn_filter_ptr;
3011 struct ixgbe_fdir_rule_ele *fdir_rule_ptr;
3012 struct ixgbe_flow_mem *ixgbe_flow_mem_ptr;
3013 struct ixgbe_hw_fdir_info *fdir_info =
3014 IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private);
3016 switch (filter_type) {
3017 case RTE_ETH_FILTER_NTUPLE:
3018 ntuple_filter_ptr = (struct ixgbe_ntuple_filter_ele *)
3020 rte_memcpy(&ntuple_filter,
3021 &ntuple_filter_ptr->filter_info,
3022 sizeof(struct rte_eth_ntuple_filter));
3023 ret = ixgbe_add_del_ntuple_filter(dev, &ntuple_filter, FALSE);
3025 TAILQ_REMOVE(&filter_ntuple_list,
3026 ntuple_filter_ptr, entries);
3027 rte_free(ntuple_filter_ptr);
3030 case RTE_ETH_FILTER_ETHERTYPE:
3031 ethertype_filter_ptr = (struct ixgbe_ethertype_filter_ele *)
3033 rte_memcpy(ðertype_filter,
3034 ðertype_filter_ptr->filter_info,
3035 sizeof(struct rte_eth_ethertype_filter));
3036 ret = ixgbe_add_del_ethertype_filter(dev,
3037 ðertype_filter, FALSE);
3039 TAILQ_REMOVE(&filter_ethertype_list,
3040 ethertype_filter_ptr, entries);
3041 rte_free(ethertype_filter_ptr);
3044 case RTE_ETH_FILTER_SYN:
3045 syn_filter_ptr = (struct ixgbe_eth_syn_filter_ele *)
3047 rte_memcpy(&syn_filter,
3048 &syn_filter_ptr->filter_info,
3049 sizeof(struct rte_eth_syn_filter));
3050 ret = ixgbe_syn_filter_set(dev, &syn_filter, FALSE);
3052 TAILQ_REMOVE(&filter_syn_list,
3053 syn_filter_ptr, entries);
3054 rte_free(syn_filter_ptr);
3057 case RTE_ETH_FILTER_FDIR:
3058 fdir_rule_ptr = (struct ixgbe_fdir_rule_ele *)pmd_flow->rule;
3059 rte_memcpy(&fdir_rule,
3060 &fdir_rule_ptr->filter_info,
3061 sizeof(struct ixgbe_fdir_rule));
3062 ret = ixgbe_fdir_filter_program(dev, &fdir_rule, TRUE, FALSE);
3064 TAILQ_REMOVE(&filter_fdir_list,
3065 fdir_rule_ptr, entries);
3066 rte_free(fdir_rule_ptr);
3067 if (TAILQ_EMPTY(&filter_fdir_list))
3068 fdir_info->mask_added = false;
3071 case RTE_ETH_FILTER_L2_TUNNEL:
3072 l2_tn_filter_ptr = (struct ixgbe_eth_l2_tunnel_conf_ele *)
3074 rte_memcpy(&l2_tn_filter, &l2_tn_filter_ptr->filter_info,
3075 sizeof(struct rte_eth_l2_tunnel_conf));
3076 ret = ixgbe_dev_l2_tunnel_filter_del(dev, &l2_tn_filter);
3078 TAILQ_REMOVE(&filter_l2_tunnel_list,
3079 l2_tn_filter_ptr, entries);
3080 rte_free(l2_tn_filter_ptr);
3084 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
3091 rte_flow_error_set(error, EINVAL,
3092 RTE_FLOW_ERROR_TYPE_HANDLE,
3093 NULL, "Failed to destroy flow");
3097 TAILQ_FOREACH(ixgbe_flow_mem_ptr, &ixgbe_flow_list, entries) {
3098 if (ixgbe_flow_mem_ptr->flow == pmd_flow) {
3099 TAILQ_REMOVE(&ixgbe_flow_list,
3100 ixgbe_flow_mem_ptr, entries);
3101 rte_free(ixgbe_flow_mem_ptr);
3109 /* Destroy all flow rules associated with a port on ixgbe. */
3111 ixgbe_flow_flush(struct rte_eth_dev *dev,
3112 struct rte_flow_error *error)
3116 ixgbe_clear_all_ntuple_filter(dev);
3117 ixgbe_clear_all_ethertype_filter(dev);
3118 ixgbe_clear_syn_filter(dev);
3120 ret = ixgbe_clear_all_fdir_filter(dev);
3122 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3123 NULL, "Failed to flush rule");
3127 ret = ixgbe_clear_all_l2_tn_filter(dev);
3129 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE,
3130 NULL, "Failed to flush rule");
3134 ixgbe_filterlist_flush();
3139 const struct rte_flow_ops ixgbe_flow_ops = {
3140 .validate = ixgbe_flow_validate,
3141 .create = ixgbe_flow_create,
3142 .destroy = ixgbe_flow_destroy,
3143 .flush = ixgbe_flow_flush,