4 * Copyright (c) 2016 Intel Corporation. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/queue.h>
41 #include <rte_ether.h>
42 #include <rte_ethdev.h>
44 #include <rte_memzone.h>
45 #include <rte_malloc.h>
46 #include <rte_eth_ctrl.h>
47 #include <rte_tailq.h>
48 #include <rte_flow_driver.h>
50 #include "i40e_logs.h"
51 #include "base/i40e_type.h"
52 #include "base/i40e_prototype.h"
53 #include "i40e_ethdev.h"
55 #define I40E_IPV4_TC_SHIFT 4
56 #define I40E_IPV6_TC_MASK (0x00FF << I40E_IPV4_TC_SHIFT)
57 #define I40E_IPV6_FRAG_HEADER 44
59 static int i40e_flow_validate(struct rte_eth_dev *dev,
60 const struct rte_flow_attr *attr,
61 const struct rte_flow_item pattern[],
62 const struct rte_flow_action actions[],
63 struct rte_flow_error *error);
65 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
66 const struct rte_flow_item *pattern,
67 struct rte_flow_error *error,
68 struct rte_eth_ethertype_filter *filter);
69 static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
70 const struct rte_flow_action *actions,
71 struct rte_flow_error *error,
72 struct rte_eth_ethertype_filter *filter);
73 static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
74 const struct rte_flow_item *pattern,
75 struct rte_flow_error *error,
76 struct rte_eth_fdir_filter *filter);
77 static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
78 const struct rte_flow_action *actions,
79 struct rte_flow_error *error,
80 struct rte_eth_fdir_filter *filter);
81 static int i40e_flow_parse_attr(const struct rte_flow_attr *attr,
82 struct rte_flow_error *error);
83 static int i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
84 const struct rte_flow_attr *attr,
85 const struct rte_flow_item pattern[],
86 const struct rte_flow_action actions[],
87 struct rte_flow_error *error,
88 union i40e_filter_t *filter);
89 static int i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
90 const struct rte_flow_attr *attr,
91 const struct rte_flow_item pattern[],
92 const struct rte_flow_action actions[],
93 struct rte_flow_error *error,
94 union i40e_filter_t *filter);
96 const struct rte_flow_ops i40e_flow_ops = {
97 .validate = i40e_flow_validate,
100 union i40e_filter_t cons_filter;
102 /* Pattern matched ethertype filter */
103 static enum rte_flow_item_type pattern_ethertype[] = {
104 RTE_FLOW_ITEM_TYPE_ETH,
105 RTE_FLOW_ITEM_TYPE_END,
108 /* Pattern matched flow director filter */
109 static enum rte_flow_item_type pattern_fdir_ipv4[] = {
110 RTE_FLOW_ITEM_TYPE_IPV4,
111 RTE_FLOW_ITEM_TYPE_END,
114 static enum rte_flow_item_type pattern_fdir_ipv4_ext[] = {
115 RTE_FLOW_ITEM_TYPE_ETH,
116 RTE_FLOW_ITEM_TYPE_IPV4,
117 RTE_FLOW_ITEM_TYPE_END,
120 static enum rte_flow_item_type pattern_fdir_ipv4_udp[] = {
121 RTE_FLOW_ITEM_TYPE_IPV4,
122 RTE_FLOW_ITEM_TYPE_UDP,
123 RTE_FLOW_ITEM_TYPE_END,
126 static enum rte_flow_item_type pattern_fdir_ipv4_udp_ext[] = {
127 RTE_FLOW_ITEM_TYPE_ETH,
128 RTE_FLOW_ITEM_TYPE_IPV4,
129 RTE_FLOW_ITEM_TYPE_UDP,
130 RTE_FLOW_ITEM_TYPE_END,
133 static enum rte_flow_item_type pattern_fdir_ipv4_tcp[] = {
134 RTE_FLOW_ITEM_TYPE_IPV4,
135 RTE_FLOW_ITEM_TYPE_TCP,
136 RTE_FLOW_ITEM_TYPE_END,
139 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_ext[] = {
140 RTE_FLOW_ITEM_TYPE_ETH,
141 RTE_FLOW_ITEM_TYPE_IPV4,
142 RTE_FLOW_ITEM_TYPE_TCP,
143 RTE_FLOW_ITEM_TYPE_END,
146 static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
147 RTE_FLOW_ITEM_TYPE_IPV4,
148 RTE_FLOW_ITEM_TYPE_SCTP,
149 RTE_FLOW_ITEM_TYPE_END,
152 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_ext[] = {
153 RTE_FLOW_ITEM_TYPE_ETH,
154 RTE_FLOW_ITEM_TYPE_IPV4,
155 RTE_FLOW_ITEM_TYPE_SCTP,
156 RTE_FLOW_ITEM_TYPE_END,
159 static enum rte_flow_item_type pattern_fdir_ipv6[] = {
160 RTE_FLOW_ITEM_TYPE_IPV6,
161 RTE_FLOW_ITEM_TYPE_END,
164 static enum rte_flow_item_type pattern_fdir_ipv6_ext[] = {
165 RTE_FLOW_ITEM_TYPE_ETH,
166 RTE_FLOW_ITEM_TYPE_IPV6,
167 RTE_FLOW_ITEM_TYPE_END,
170 static enum rte_flow_item_type pattern_fdir_ipv6_udp[] = {
171 RTE_FLOW_ITEM_TYPE_IPV6,
172 RTE_FLOW_ITEM_TYPE_UDP,
173 RTE_FLOW_ITEM_TYPE_END,
176 static enum rte_flow_item_type pattern_fdir_ipv6_udp_ext[] = {
177 RTE_FLOW_ITEM_TYPE_ETH,
178 RTE_FLOW_ITEM_TYPE_IPV6,
179 RTE_FLOW_ITEM_TYPE_UDP,
180 RTE_FLOW_ITEM_TYPE_END,
183 static enum rte_flow_item_type pattern_fdir_ipv6_tcp[] = {
184 RTE_FLOW_ITEM_TYPE_IPV6,
185 RTE_FLOW_ITEM_TYPE_TCP,
186 RTE_FLOW_ITEM_TYPE_END,
189 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_ext[] = {
190 RTE_FLOW_ITEM_TYPE_ETH,
191 RTE_FLOW_ITEM_TYPE_IPV6,
192 RTE_FLOW_ITEM_TYPE_TCP,
193 RTE_FLOW_ITEM_TYPE_END,
196 static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
197 RTE_FLOW_ITEM_TYPE_IPV6,
198 RTE_FLOW_ITEM_TYPE_SCTP,
199 RTE_FLOW_ITEM_TYPE_END,
202 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_ext[] = {
203 RTE_FLOW_ITEM_TYPE_ETH,
204 RTE_FLOW_ITEM_TYPE_IPV6,
205 RTE_FLOW_ITEM_TYPE_SCTP,
206 RTE_FLOW_ITEM_TYPE_END,
209 static struct i40e_valid_pattern i40e_supported_patterns[] = {
211 { pattern_ethertype, i40e_flow_parse_ethertype_filter },
213 { pattern_fdir_ipv4, i40e_flow_parse_fdir_filter },
214 { pattern_fdir_ipv4_ext, i40e_flow_parse_fdir_filter },
215 { pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
216 { pattern_fdir_ipv4_udp_ext, i40e_flow_parse_fdir_filter },
217 { pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
218 { pattern_fdir_ipv4_tcp_ext, i40e_flow_parse_fdir_filter },
219 { pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
220 { pattern_fdir_ipv4_sctp_ext, i40e_flow_parse_fdir_filter },
221 { pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
222 { pattern_fdir_ipv6_ext, i40e_flow_parse_fdir_filter },
223 { pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
224 { pattern_fdir_ipv6_udp_ext, i40e_flow_parse_fdir_filter },
225 { pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
226 { pattern_fdir_ipv6_tcp_ext, i40e_flow_parse_fdir_filter },
227 { pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
228 { pattern_fdir_ipv6_sctp_ext, i40e_flow_parse_fdir_filter },
231 #define NEXT_ITEM_OF_ACTION(act, actions, index) \
233 act = actions + index; \
234 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) { \
236 act = actions + index; \
240 /* Find the first VOID or non-VOID item pointer */
241 static const struct rte_flow_item *
242 i40e_find_first_item(const struct rte_flow_item *item, bool is_void)
246 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
248 is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
250 is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
258 /* Skip all VOID items of the pattern */
260 i40e_pattern_skip_void_item(struct rte_flow_item *items,
261 const struct rte_flow_item *pattern)
263 uint32_t cpy_count = 0;
264 const struct rte_flow_item *pb = pattern, *pe = pattern;
267 /* Find a non-void item first */
268 pb = i40e_find_first_item(pb, false);
269 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
274 /* Find a void item */
275 pe = i40e_find_first_item(pb + 1, true);
278 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
282 if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
289 /* Copy the END item. */
290 rte_memcpy(items, pe, sizeof(struct rte_flow_item));
293 /* Check if the pattern matches a supported item type array */
295 i40e_match_pattern(enum rte_flow_item_type *item_array,
296 struct rte_flow_item *pattern)
298 struct rte_flow_item *item = pattern;
300 while ((*item_array == item->type) &&
301 (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
306 return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
307 item->type == RTE_FLOW_ITEM_TYPE_END);
310 /* Find if there's parse filter function matched */
311 static parse_filter_t
312 i40e_find_parse_filter_func(struct rte_flow_item *pattern)
314 parse_filter_t parse_filter = NULL;
317 for (; i < RTE_DIM(i40e_supported_patterns); i++) {
318 if (i40e_match_pattern(i40e_supported_patterns[i].items,
320 parse_filter = i40e_supported_patterns[i].parse_filter;
328 /* Parse attributes */
330 i40e_flow_parse_attr(const struct rte_flow_attr *attr,
331 struct rte_flow_error *error)
333 /* Must be input direction */
334 if (!attr->ingress) {
335 rte_flow_error_set(error, EINVAL,
336 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
337 attr, "Only support ingress.");
343 rte_flow_error_set(error, EINVAL,
344 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
345 attr, "Not support egress.");
350 if (attr->priority) {
351 rte_flow_error_set(error, EINVAL,
352 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
353 attr, "Not support priority.");
359 rte_flow_error_set(error, EINVAL,
360 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
361 attr, "Not support group.");
369 i40e_get_outer_vlan(struct rte_eth_dev *dev)
371 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
372 int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
382 i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
385 tpid = (reg_r >> I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT) & 0xFFFF;
390 /* 1. Last in item should be NULL as range is not supported.
391 * 2. Supported filter types: MAC_ETHTYPE and ETHTYPE.
392 * 3. SRC mac_addr mask should be 00:00:00:00:00:00.
393 * 4. DST mac_addr mask should be 00:00:00:00:00:00 or
395 * 5. Ether_type mask should be 0xFFFF.
398 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
399 const struct rte_flow_item *pattern,
400 struct rte_flow_error *error,
401 struct rte_eth_ethertype_filter *filter)
403 const struct rte_flow_item *item = pattern;
404 const struct rte_flow_item_eth *eth_spec;
405 const struct rte_flow_item_eth *eth_mask;
406 enum rte_flow_item_type item_type;
409 outer_tpid = i40e_get_outer_vlan(dev);
411 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
413 rte_flow_error_set(error, EINVAL,
414 RTE_FLOW_ERROR_TYPE_ITEM,
416 "Not support range");
419 item_type = item->type;
421 case RTE_FLOW_ITEM_TYPE_ETH:
422 eth_spec = (const struct rte_flow_item_eth *)item->spec;
423 eth_mask = (const struct rte_flow_item_eth *)item->mask;
424 /* Get the MAC info. */
425 if (!eth_spec || !eth_mask) {
426 rte_flow_error_set(error, EINVAL,
427 RTE_FLOW_ERROR_TYPE_ITEM,
429 "NULL ETH spec/mask");
433 /* Mask bits of source MAC address must be full of 0.
434 * Mask bits of destination MAC address must be full
437 if (!is_zero_ether_addr(ð_mask->src) ||
438 (!is_zero_ether_addr(ð_mask->dst) &&
439 !is_broadcast_ether_addr(ð_mask->dst))) {
440 rte_flow_error_set(error, EINVAL,
441 RTE_FLOW_ERROR_TYPE_ITEM,
443 "Invalid MAC_addr mask");
447 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
448 rte_flow_error_set(error, EINVAL,
449 RTE_FLOW_ERROR_TYPE_ITEM,
451 "Invalid ethertype mask");
455 /* If mask bits of destination MAC address
456 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
458 if (is_broadcast_ether_addr(ð_mask->dst)) {
459 filter->mac_addr = eth_spec->dst;
460 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
462 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
464 filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
466 if (filter->ether_type == ETHER_TYPE_IPv4 ||
467 filter->ether_type == ETHER_TYPE_IPv6 ||
468 filter->ether_type == outer_tpid) {
469 rte_flow_error_set(error, EINVAL,
470 RTE_FLOW_ERROR_TYPE_ITEM,
472 "Unsupported ether_type in"
473 " control packet filter.");
485 /* Ethertype action only supports QUEUE or DROP. */
487 i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
488 const struct rte_flow_action *actions,
489 struct rte_flow_error *error,
490 struct rte_eth_ethertype_filter *filter)
492 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
493 const struct rte_flow_action *act;
494 const struct rte_flow_action_queue *act_q;
497 /* Check if the first non-void action is QUEUE or DROP. */
498 NEXT_ITEM_OF_ACTION(act, actions, index);
499 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
500 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
501 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
502 act, "Not supported action.");
506 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
507 act_q = (const struct rte_flow_action_queue *)act->conf;
508 filter->queue = act_q->index;
509 if (filter->queue >= pf->dev_data->nb_rx_queues) {
510 rte_flow_error_set(error, EINVAL,
511 RTE_FLOW_ERROR_TYPE_ACTION,
512 act, "Invalid queue ID for"
513 " ethertype_filter.");
517 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
520 /* Check if the next non-void item is END */
522 NEXT_ITEM_OF_ACTION(act, actions, index);
523 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
524 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
525 act, "Not supported action.");
533 i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
534 const struct rte_flow_attr *attr,
535 const struct rte_flow_item pattern[],
536 const struct rte_flow_action actions[],
537 struct rte_flow_error *error,
538 union i40e_filter_t *filter)
540 struct rte_eth_ethertype_filter *ethertype_filter =
541 &filter->ethertype_filter;
544 ret = i40e_flow_parse_ethertype_pattern(dev, pattern, error,
549 ret = i40e_flow_parse_ethertype_action(dev, actions, error,
554 ret = i40e_flow_parse_attr(attr, error);
561 /* 1. Last in item should be NULL as range is not supported.
562 * 2. Supported flow type and input set: refer to array
563 * default_inset_table in i40e_ethdev.c.
564 * 3. Mask of fields which need to be matched should be
566 * 4. Mask of fields which needn't to be matched should be
570 i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
571 const struct rte_flow_item *pattern,
572 struct rte_flow_error *error,
573 struct rte_eth_fdir_filter *filter)
575 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
576 const struct rte_flow_item *item = pattern;
577 const struct rte_flow_item_eth *eth_spec, *eth_mask;
578 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
579 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
580 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
581 const struct rte_flow_item_udp *udp_spec, *udp_mask;
582 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
583 const struct rte_flow_item_vf *vf_spec;
584 uint32_t flow_type = RTE_ETH_FLOW_UNKNOWN;
585 enum i40e_filter_pctype pctype;
586 uint64_t input_set = I40E_INSET_NONE;
587 uint16_t flag_offset;
588 enum rte_flow_item_type item_type;
589 enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
592 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
594 rte_flow_error_set(error, EINVAL,
595 RTE_FLOW_ERROR_TYPE_ITEM,
597 "Not support range");
600 item_type = item->type;
602 case RTE_FLOW_ITEM_TYPE_ETH:
603 eth_spec = (const struct rte_flow_item_eth *)item->spec;
604 eth_mask = (const struct rte_flow_item_eth *)item->mask;
605 if (eth_spec || eth_mask) {
606 rte_flow_error_set(error, EINVAL,
607 RTE_FLOW_ERROR_TYPE_ITEM,
609 "Invalid ETH spec/mask");
613 case RTE_FLOW_ITEM_TYPE_IPV4:
614 l3 = RTE_FLOW_ITEM_TYPE_IPV4;
616 (const struct rte_flow_item_ipv4 *)item->spec;
618 (const struct rte_flow_item_ipv4 *)item->mask;
619 if (!ipv4_spec || !ipv4_mask) {
620 rte_flow_error_set(error, EINVAL,
621 RTE_FLOW_ERROR_TYPE_ITEM,
623 "NULL IPv4 spec/mask");
627 /* Check IPv4 mask and update input set */
628 if (ipv4_mask->hdr.version_ihl ||
629 ipv4_mask->hdr.total_length ||
630 ipv4_mask->hdr.packet_id ||
631 ipv4_mask->hdr.fragment_offset ||
632 ipv4_mask->hdr.hdr_checksum) {
633 rte_flow_error_set(error, EINVAL,
634 RTE_FLOW_ERROR_TYPE_ITEM,
636 "Invalid IPv4 mask.");
640 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
641 input_set |= I40E_INSET_IPV4_SRC;
642 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
643 input_set |= I40E_INSET_IPV4_DST;
644 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
645 input_set |= I40E_INSET_IPV4_TOS;
646 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
647 input_set |= I40E_INSET_IPV4_TTL;
648 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
649 input_set |= I40E_INSET_IPV4_PROTO;
651 /* Get filter info */
652 flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER;
653 /* Check if it is fragment. */
655 rte_be_to_cpu_16(ipv4_spec->hdr.fragment_offset);
656 if (flag_offset & IPV4_HDR_OFFSET_MASK ||
657 flag_offset & IPV4_HDR_MF_FLAG)
658 flow_type = RTE_ETH_FLOW_FRAG_IPV4;
660 /* Get the filter info */
661 filter->input.flow.ip4_flow.proto =
662 ipv4_spec->hdr.next_proto_id;
663 filter->input.flow.ip4_flow.tos =
664 ipv4_spec->hdr.type_of_service;
665 filter->input.flow.ip4_flow.ttl =
666 ipv4_spec->hdr.time_to_live;
667 filter->input.flow.ip4_flow.src_ip =
668 ipv4_spec->hdr.src_addr;
669 filter->input.flow.ip4_flow.dst_ip =
670 ipv4_spec->hdr.dst_addr;
673 case RTE_FLOW_ITEM_TYPE_IPV6:
674 l3 = RTE_FLOW_ITEM_TYPE_IPV6;
676 (const struct rte_flow_item_ipv6 *)item->spec;
678 (const struct rte_flow_item_ipv6 *)item->mask;
679 if (!ipv6_spec || !ipv6_mask) {
680 rte_flow_error_set(error, EINVAL,
681 RTE_FLOW_ERROR_TYPE_ITEM,
683 "NULL IPv6 spec/mask");
687 /* Check IPv6 mask and update input set */
688 if (ipv6_mask->hdr.payload_len) {
689 rte_flow_error_set(error, EINVAL,
690 RTE_FLOW_ERROR_TYPE_ITEM,
692 "Invalid IPv6 mask");
696 /* SCR and DST address of IPv6 shouldn't be masked */
697 for (j = 0; j < RTE_DIM(ipv6_mask->hdr.src_addr); j++) {
698 if (ipv6_mask->hdr.src_addr[j] != UINT8_MAX ||
699 ipv6_mask->hdr.dst_addr[j] != UINT8_MAX) {
700 rte_flow_error_set(error, EINVAL,
701 RTE_FLOW_ERROR_TYPE_ITEM,
703 "Invalid IPv6 mask");
708 input_set |= I40E_INSET_IPV6_SRC;
709 input_set |= I40E_INSET_IPV6_DST;
711 if ((ipv6_mask->hdr.vtc_flow &
712 rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
713 == rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
714 input_set |= I40E_INSET_IPV6_TC;
715 if (ipv6_mask->hdr.proto == UINT8_MAX)
716 input_set |= I40E_INSET_IPV6_NEXT_HDR;
717 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
718 input_set |= I40E_INSET_IPV6_HOP_LIMIT;
720 /* Get filter info */
721 filter->input.flow.ipv6_flow.tc =
722 (uint8_t)(ipv6_spec->hdr.vtc_flow <<
724 filter->input.flow.ipv6_flow.proto =
725 ipv6_spec->hdr.proto;
726 filter->input.flow.ipv6_flow.hop_limits =
727 ipv6_spec->hdr.hop_limits;
729 rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
730 ipv6_spec->hdr.src_addr, 16);
731 rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
732 ipv6_spec->hdr.dst_addr, 16);
734 /* Check if it is fragment. */
735 if (ipv6_spec->hdr.proto == I40E_IPV6_FRAG_HEADER)
736 flow_type = RTE_ETH_FLOW_FRAG_IPV6;
738 flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
740 case RTE_FLOW_ITEM_TYPE_TCP:
741 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
742 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
743 if (!tcp_spec || !tcp_mask) {
744 rte_flow_error_set(error, EINVAL,
745 RTE_FLOW_ERROR_TYPE_ITEM,
747 "NULL TCP spec/mask");
751 /* Check TCP mask and update input set */
752 if (tcp_mask->hdr.sent_seq ||
753 tcp_mask->hdr.recv_ack ||
754 tcp_mask->hdr.data_off ||
755 tcp_mask->hdr.tcp_flags ||
756 tcp_mask->hdr.rx_win ||
757 tcp_mask->hdr.cksum ||
758 tcp_mask->hdr.tcp_urp) {
759 rte_flow_error_set(error, EINVAL,
760 RTE_FLOW_ERROR_TYPE_ITEM,
766 if (tcp_mask->hdr.src_port != UINT16_MAX ||
767 tcp_mask->hdr.dst_port != UINT16_MAX) {
768 rte_flow_error_set(error, EINVAL,
769 RTE_FLOW_ERROR_TYPE_ITEM,
775 input_set |= I40E_INSET_SRC_PORT;
776 input_set |= I40E_INSET_DST_PORT;
778 /* Get filter info */
779 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
780 flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
781 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
782 flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_TCP;
784 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
785 filter->input.flow.tcp4_flow.src_port =
786 tcp_spec->hdr.src_port;
787 filter->input.flow.tcp4_flow.dst_port =
788 tcp_spec->hdr.dst_port;
789 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
790 filter->input.flow.tcp6_flow.src_port =
791 tcp_spec->hdr.src_port;
792 filter->input.flow.tcp6_flow.dst_port =
793 tcp_spec->hdr.dst_port;
796 case RTE_FLOW_ITEM_TYPE_UDP:
797 udp_spec = (const struct rte_flow_item_udp *)item->spec;
798 udp_mask = (const struct rte_flow_item_udp *)item->mask;
799 if (!udp_spec || !udp_mask) {
800 rte_flow_error_set(error, EINVAL,
801 RTE_FLOW_ERROR_TYPE_ITEM,
803 "NULL UDP spec/mask");
807 /* Check UDP mask and update input set*/
808 if (udp_mask->hdr.dgram_len ||
809 udp_mask->hdr.dgram_cksum) {
810 rte_flow_error_set(error, EINVAL,
811 RTE_FLOW_ERROR_TYPE_ITEM,
817 if (udp_mask->hdr.src_port != UINT16_MAX ||
818 udp_mask->hdr.dst_port != UINT16_MAX) {
819 rte_flow_error_set(error, EINVAL,
820 RTE_FLOW_ERROR_TYPE_ITEM,
826 input_set |= I40E_INSET_SRC_PORT;
827 input_set |= I40E_INSET_DST_PORT;
829 /* Get filter info */
830 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
832 RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
833 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
835 RTE_ETH_FLOW_NONFRAG_IPV6_UDP;
837 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
838 filter->input.flow.udp4_flow.src_port =
839 udp_spec->hdr.src_port;
840 filter->input.flow.udp4_flow.dst_port =
841 udp_spec->hdr.dst_port;
842 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
843 filter->input.flow.udp6_flow.src_port =
844 udp_spec->hdr.src_port;
845 filter->input.flow.udp6_flow.dst_port =
846 udp_spec->hdr.dst_port;
849 case RTE_FLOW_ITEM_TYPE_SCTP:
851 (const struct rte_flow_item_sctp *)item->spec;
853 (const struct rte_flow_item_sctp *)item->mask;
854 if (!sctp_spec || !sctp_mask) {
855 rte_flow_error_set(error, EINVAL,
856 RTE_FLOW_ERROR_TYPE_ITEM,
858 "NULL SCTP spec/mask");
862 /* Check SCTP mask and update input set */
863 if (sctp_mask->hdr.cksum) {
864 rte_flow_error_set(error, EINVAL,
865 RTE_FLOW_ERROR_TYPE_ITEM,
871 if (sctp_mask->hdr.src_port != UINT16_MAX ||
872 sctp_mask->hdr.dst_port != UINT16_MAX ||
873 sctp_mask->hdr.tag != UINT32_MAX) {
874 rte_flow_error_set(error, EINVAL,
875 RTE_FLOW_ERROR_TYPE_ITEM,
880 input_set |= I40E_INSET_SRC_PORT;
881 input_set |= I40E_INSET_DST_PORT;
882 input_set |= I40E_INSET_SCTP_VT;
884 /* Get filter info */
885 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
886 flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_SCTP;
887 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
888 flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_SCTP;
890 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
891 filter->input.flow.sctp4_flow.src_port =
892 sctp_spec->hdr.src_port;
893 filter->input.flow.sctp4_flow.dst_port =
894 sctp_spec->hdr.dst_port;
895 filter->input.flow.sctp4_flow.verify_tag =
897 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
898 filter->input.flow.sctp6_flow.src_port =
899 sctp_spec->hdr.src_port;
900 filter->input.flow.sctp6_flow.dst_port =
901 sctp_spec->hdr.dst_port;
902 filter->input.flow.sctp6_flow.verify_tag =
906 case RTE_FLOW_ITEM_TYPE_VF:
907 vf_spec = (const struct rte_flow_item_vf *)item->spec;
908 filter->input.flow_ext.is_vf = 1;
909 filter->input.flow_ext.dst_id = vf_spec->id;
910 if (filter->input.flow_ext.is_vf &&
911 filter->input.flow_ext.dst_id >= pf->vf_num) {
912 rte_flow_error_set(error, EINVAL,
913 RTE_FLOW_ERROR_TYPE_ITEM,
915 "Invalid VF ID for FDIR.");
924 pctype = i40e_flowtype_to_pctype(flow_type);
925 if (pctype == 0 || pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) {
926 rte_flow_error_set(error, EINVAL,
927 RTE_FLOW_ERROR_TYPE_ITEM, item,
928 "Unsupported flow type");
932 if (input_set != i40e_get_default_input_set(pctype)) {
933 rte_flow_error_set(error, EINVAL,
934 RTE_FLOW_ERROR_TYPE_ITEM, item,
935 "Invalid input set.");
938 filter->input.flow_type = flow_type;
943 /* Parse to get the action info of a FDIR filter.
944 * FDIR action supports QUEUE or (QUEUE + MARK).
947 i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
948 const struct rte_flow_action *actions,
949 struct rte_flow_error *error,
950 struct rte_eth_fdir_filter *filter)
952 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
953 const struct rte_flow_action *act;
954 const struct rte_flow_action_queue *act_q;
955 const struct rte_flow_action_mark *mark_spec;
958 /* Check if the first non-void action is QUEUE or DROP. */
959 NEXT_ITEM_OF_ACTION(act, actions, index);
960 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
961 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
962 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
963 act, "Invalid action.");
967 act_q = (const struct rte_flow_action_queue *)act->conf;
968 filter->action.flex_off = 0;
969 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE)
970 filter->action.behavior = RTE_ETH_FDIR_ACCEPT;
972 filter->action.behavior = RTE_ETH_FDIR_REJECT;
974 filter->action.report_status = RTE_ETH_FDIR_REPORT_ID;
975 filter->action.rx_queue = act_q->index;
977 if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
978 rte_flow_error_set(error, EINVAL,
979 RTE_FLOW_ERROR_TYPE_ACTION, act,
980 "Invalid queue ID for FDIR.");
984 /* Check if the next non-void item is MARK or END. */
986 NEXT_ITEM_OF_ACTION(act, actions, index);
987 if (act->type != RTE_FLOW_ACTION_TYPE_MARK &&
988 act->type != RTE_FLOW_ACTION_TYPE_END) {
989 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
990 act, "Invalid action.");
994 if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
995 mark_spec = (const struct rte_flow_action_mark *)act->conf;
996 filter->soft_id = mark_spec->id;
998 /* Check if the next non-void item is END */
1000 NEXT_ITEM_OF_ACTION(act, actions, index);
1001 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1002 rte_flow_error_set(error, EINVAL,
1003 RTE_FLOW_ERROR_TYPE_ACTION,
1004 act, "Invalid action.");
1013 i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
1014 const struct rte_flow_attr *attr,
1015 const struct rte_flow_item pattern[],
1016 const struct rte_flow_action actions[],
1017 struct rte_flow_error *error,
1018 union i40e_filter_t *filter)
1020 struct rte_eth_fdir_filter *fdir_filter =
1021 &filter->fdir_filter;
1024 ret = i40e_flow_parse_fdir_pattern(dev, pattern, error, fdir_filter);
1028 ret = i40e_flow_parse_fdir_action(dev, actions, error, fdir_filter);
1032 ret = i40e_flow_parse_attr(attr, error);
1036 if (dev->data->dev_conf.fdir_conf.mode !=
1037 RTE_FDIR_MODE_PERFECT) {
1038 rte_flow_error_set(error, ENOTSUP,
1039 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1041 "Check the mode in fdir_conf.");
1049 i40e_flow_validate(struct rte_eth_dev *dev,
1050 const struct rte_flow_attr *attr,
1051 const struct rte_flow_item pattern[],
1052 const struct rte_flow_action actions[],
1053 struct rte_flow_error *error)
1055 struct rte_flow_item *items; /* internal pattern w/o VOID items */
1056 parse_filter_t parse_filter;
1057 uint32_t item_num = 0; /* non-void item number of pattern*/
1062 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1063 NULL, "NULL pattern.");
1068 rte_flow_error_set(error, EINVAL,
1069 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1070 NULL, "NULL action.");
1075 rte_flow_error_set(error, EINVAL,
1076 RTE_FLOW_ERROR_TYPE_ATTR,
1077 NULL, "NULL attribute.");
1081 memset(&cons_filter, 0, sizeof(cons_filter));
1083 /* Get the non-void item number of pattern */
1084 while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
1085 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
1091 items = rte_zmalloc("i40e_pattern",
1092 item_num * sizeof(struct rte_flow_item), 0);
1094 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1095 NULL, "No memory for PMD internal items.");
1099 i40e_pattern_skip_void_item(items, pattern);
1101 /* Find if there's matched parse filter function */
1102 parse_filter = i40e_find_parse_filter_func(items);
1103 if (!parse_filter) {
1104 rte_flow_error_set(error, EINVAL,
1105 RTE_FLOW_ERROR_TYPE_ITEM,
1106 pattern, "Unsupported pattern");
1110 ret = parse_filter(dev, attr, items, actions, error, &cons_filter);