4 * Copyright (c) 2016 Intel Corporation. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/queue.h>
41 #include <rte_ether.h>
42 #include <rte_ethdev.h>
44 #include <rte_memzone.h>
45 #include <rte_malloc.h>
46 #include <rte_eth_ctrl.h>
47 #include <rte_tailq.h>
48 #include <rte_flow_driver.h>
50 #include "i40e_logs.h"
51 #include "base/i40e_type.h"
52 #include "base/i40e_prototype.h"
53 #include "i40e_ethdev.h"
55 #define I40E_IPV4_TC_SHIFT 4
56 #define I40E_IPV6_TC_MASK (0x00FF << I40E_IPV4_TC_SHIFT)
57 #define I40E_IPV6_FRAG_HEADER 44
58 #define I40E_TENANT_ARRAY_NUM 3
59 #define I40E_TCI_MASK 0xFFFF
61 static int i40e_flow_validate(struct rte_eth_dev *dev,
62 const struct rte_flow_attr *attr,
63 const struct rte_flow_item pattern[],
64 const struct rte_flow_action actions[],
65 struct rte_flow_error *error);
66 static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
67 const struct rte_flow_attr *attr,
68 const struct rte_flow_item pattern[],
69 const struct rte_flow_action actions[],
70 struct rte_flow_error *error);
71 static int i40e_flow_destroy(struct rte_eth_dev *dev,
72 struct rte_flow *flow,
73 struct rte_flow_error *error);
74 static int i40e_flow_flush(struct rte_eth_dev *dev,
75 struct rte_flow_error *error);
77 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
78 const struct rte_flow_item *pattern,
79 struct rte_flow_error *error,
80 struct rte_eth_ethertype_filter *filter);
81 static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
82 const struct rte_flow_action *actions,
83 struct rte_flow_error *error,
84 struct rte_eth_ethertype_filter *filter);
85 static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
86 const struct rte_flow_item *pattern,
87 struct rte_flow_error *error,
88 struct rte_eth_fdir_filter *filter);
89 static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
90 const struct rte_flow_action *actions,
91 struct rte_flow_error *error,
92 struct rte_eth_fdir_filter *filter);
93 static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
94 const struct rte_flow_action *actions,
95 struct rte_flow_error *error,
96 struct i40e_tunnel_filter_conf *filter);
97 static int i40e_flow_parse_attr(const struct rte_flow_attr *attr,
98 struct rte_flow_error *error);
99 static int i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
100 const struct rte_flow_attr *attr,
101 const struct rte_flow_item pattern[],
102 const struct rte_flow_action actions[],
103 struct rte_flow_error *error,
104 union i40e_filter_t *filter);
105 static int i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
106 const struct rte_flow_attr *attr,
107 const struct rte_flow_item pattern[],
108 const struct rte_flow_action actions[],
109 struct rte_flow_error *error,
110 union i40e_filter_t *filter);
111 static int i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
112 const struct rte_flow_attr *attr,
113 const struct rte_flow_item pattern[],
114 const struct rte_flow_action actions[],
115 struct rte_flow_error *error,
116 union i40e_filter_t *filter);
117 static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
118 struct i40e_ethertype_filter *filter);
119 static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
120 struct i40e_tunnel_filter *filter);
121 static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);
122 static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf);
123 static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf);
125 const struct rte_flow_ops i40e_flow_ops = {
126 .validate = i40e_flow_validate,
127 .create = i40e_flow_create,
128 .destroy = i40e_flow_destroy,
129 .flush = i40e_flow_flush,
132 union i40e_filter_t cons_filter;
133 enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
135 /* Pattern matched ethertype filter */
136 static enum rte_flow_item_type pattern_ethertype[] = {
137 RTE_FLOW_ITEM_TYPE_ETH,
138 RTE_FLOW_ITEM_TYPE_END,
141 /* Pattern matched flow director filter */
142 static enum rte_flow_item_type pattern_fdir_ipv4[] = {
143 RTE_FLOW_ITEM_TYPE_IPV4,
144 RTE_FLOW_ITEM_TYPE_END,
147 static enum rte_flow_item_type pattern_fdir_ipv4_ext[] = {
148 RTE_FLOW_ITEM_TYPE_ETH,
149 RTE_FLOW_ITEM_TYPE_IPV4,
150 RTE_FLOW_ITEM_TYPE_END,
153 static enum rte_flow_item_type pattern_fdir_ipv4_udp[] = {
154 RTE_FLOW_ITEM_TYPE_IPV4,
155 RTE_FLOW_ITEM_TYPE_UDP,
156 RTE_FLOW_ITEM_TYPE_END,
159 static enum rte_flow_item_type pattern_fdir_ipv4_udp_ext[] = {
160 RTE_FLOW_ITEM_TYPE_ETH,
161 RTE_FLOW_ITEM_TYPE_IPV4,
162 RTE_FLOW_ITEM_TYPE_UDP,
163 RTE_FLOW_ITEM_TYPE_END,
166 static enum rte_flow_item_type pattern_fdir_ipv4_tcp[] = {
167 RTE_FLOW_ITEM_TYPE_IPV4,
168 RTE_FLOW_ITEM_TYPE_TCP,
169 RTE_FLOW_ITEM_TYPE_END,
172 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_ext[] = {
173 RTE_FLOW_ITEM_TYPE_ETH,
174 RTE_FLOW_ITEM_TYPE_IPV4,
175 RTE_FLOW_ITEM_TYPE_TCP,
176 RTE_FLOW_ITEM_TYPE_END,
179 static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
180 RTE_FLOW_ITEM_TYPE_IPV4,
181 RTE_FLOW_ITEM_TYPE_SCTP,
182 RTE_FLOW_ITEM_TYPE_END,
185 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_ext[] = {
186 RTE_FLOW_ITEM_TYPE_ETH,
187 RTE_FLOW_ITEM_TYPE_IPV4,
188 RTE_FLOW_ITEM_TYPE_SCTP,
189 RTE_FLOW_ITEM_TYPE_END,
192 static enum rte_flow_item_type pattern_fdir_ipv6[] = {
193 RTE_FLOW_ITEM_TYPE_IPV6,
194 RTE_FLOW_ITEM_TYPE_END,
197 static enum rte_flow_item_type pattern_fdir_ipv6_ext[] = {
198 RTE_FLOW_ITEM_TYPE_ETH,
199 RTE_FLOW_ITEM_TYPE_IPV6,
200 RTE_FLOW_ITEM_TYPE_END,
203 static enum rte_flow_item_type pattern_fdir_ipv6_udp[] = {
204 RTE_FLOW_ITEM_TYPE_IPV6,
205 RTE_FLOW_ITEM_TYPE_UDP,
206 RTE_FLOW_ITEM_TYPE_END,
209 static enum rte_flow_item_type pattern_fdir_ipv6_udp_ext[] = {
210 RTE_FLOW_ITEM_TYPE_ETH,
211 RTE_FLOW_ITEM_TYPE_IPV6,
212 RTE_FLOW_ITEM_TYPE_UDP,
213 RTE_FLOW_ITEM_TYPE_END,
216 static enum rte_flow_item_type pattern_fdir_ipv6_tcp[] = {
217 RTE_FLOW_ITEM_TYPE_IPV6,
218 RTE_FLOW_ITEM_TYPE_TCP,
219 RTE_FLOW_ITEM_TYPE_END,
222 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_ext[] = {
223 RTE_FLOW_ITEM_TYPE_ETH,
224 RTE_FLOW_ITEM_TYPE_IPV6,
225 RTE_FLOW_ITEM_TYPE_TCP,
226 RTE_FLOW_ITEM_TYPE_END,
229 static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
230 RTE_FLOW_ITEM_TYPE_IPV6,
231 RTE_FLOW_ITEM_TYPE_SCTP,
232 RTE_FLOW_ITEM_TYPE_END,
235 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_ext[] = {
236 RTE_FLOW_ITEM_TYPE_ETH,
237 RTE_FLOW_ITEM_TYPE_IPV6,
238 RTE_FLOW_ITEM_TYPE_SCTP,
239 RTE_FLOW_ITEM_TYPE_END,
242 /* Pattern matched tunnel filter */
243 static enum rte_flow_item_type pattern_vxlan_1[] = {
244 RTE_FLOW_ITEM_TYPE_ETH,
245 RTE_FLOW_ITEM_TYPE_IPV4,
246 RTE_FLOW_ITEM_TYPE_UDP,
247 RTE_FLOW_ITEM_TYPE_VXLAN,
248 RTE_FLOW_ITEM_TYPE_ETH,
249 RTE_FLOW_ITEM_TYPE_END,
252 static enum rte_flow_item_type pattern_vxlan_2[] = {
253 RTE_FLOW_ITEM_TYPE_ETH,
254 RTE_FLOW_ITEM_TYPE_IPV6,
255 RTE_FLOW_ITEM_TYPE_UDP,
256 RTE_FLOW_ITEM_TYPE_VXLAN,
257 RTE_FLOW_ITEM_TYPE_ETH,
258 RTE_FLOW_ITEM_TYPE_END,
261 static enum rte_flow_item_type pattern_vxlan_3[] = {
262 RTE_FLOW_ITEM_TYPE_ETH,
263 RTE_FLOW_ITEM_TYPE_IPV4,
264 RTE_FLOW_ITEM_TYPE_UDP,
265 RTE_FLOW_ITEM_TYPE_VXLAN,
266 RTE_FLOW_ITEM_TYPE_ETH,
267 RTE_FLOW_ITEM_TYPE_VLAN,
268 RTE_FLOW_ITEM_TYPE_END,
271 static enum rte_flow_item_type pattern_vxlan_4[] = {
272 RTE_FLOW_ITEM_TYPE_ETH,
273 RTE_FLOW_ITEM_TYPE_IPV6,
274 RTE_FLOW_ITEM_TYPE_UDP,
275 RTE_FLOW_ITEM_TYPE_VXLAN,
276 RTE_FLOW_ITEM_TYPE_ETH,
277 RTE_FLOW_ITEM_TYPE_VLAN,
278 RTE_FLOW_ITEM_TYPE_END,
281 static struct i40e_valid_pattern i40e_supported_patterns[] = {
283 { pattern_ethertype, i40e_flow_parse_ethertype_filter },
285 { pattern_fdir_ipv4, i40e_flow_parse_fdir_filter },
286 { pattern_fdir_ipv4_ext, i40e_flow_parse_fdir_filter },
287 { pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
288 { pattern_fdir_ipv4_udp_ext, i40e_flow_parse_fdir_filter },
289 { pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
290 { pattern_fdir_ipv4_tcp_ext, i40e_flow_parse_fdir_filter },
291 { pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
292 { pattern_fdir_ipv4_sctp_ext, i40e_flow_parse_fdir_filter },
293 { pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
294 { pattern_fdir_ipv6_ext, i40e_flow_parse_fdir_filter },
295 { pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
296 { pattern_fdir_ipv6_udp_ext, i40e_flow_parse_fdir_filter },
297 { pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
298 { pattern_fdir_ipv6_tcp_ext, i40e_flow_parse_fdir_filter },
299 { pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
300 { pattern_fdir_ipv6_sctp_ext, i40e_flow_parse_fdir_filter },
302 { pattern_vxlan_1, i40e_flow_parse_vxlan_filter },
303 { pattern_vxlan_2, i40e_flow_parse_vxlan_filter },
304 { pattern_vxlan_3, i40e_flow_parse_vxlan_filter },
305 { pattern_vxlan_4, i40e_flow_parse_vxlan_filter },
308 #define NEXT_ITEM_OF_ACTION(act, actions, index) \
310 act = actions + index; \
311 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) { \
313 act = actions + index; \
317 /* Find the first VOID or non-VOID item pointer */
318 static const struct rte_flow_item *
319 i40e_find_first_item(const struct rte_flow_item *item, bool is_void)
323 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
325 is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
327 is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
335 /* Skip all VOID items of the pattern */
337 i40e_pattern_skip_void_item(struct rte_flow_item *items,
338 const struct rte_flow_item *pattern)
340 uint32_t cpy_count = 0;
341 const struct rte_flow_item *pb = pattern, *pe = pattern;
344 /* Find a non-void item first */
345 pb = i40e_find_first_item(pb, false);
346 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
351 /* Find a void item */
352 pe = i40e_find_first_item(pb + 1, true);
355 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
359 if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
366 /* Copy the END item. */
367 rte_memcpy(items, pe, sizeof(struct rte_flow_item));
370 /* Check if the pattern matches a supported item type array */
372 i40e_match_pattern(enum rte_flow_item_type *item_array,
373 struct rte_flow_item *pattern)
375 struct rte_flow_item *item = pattern;
377 while ((*item_array == item->type) &&
378 (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
383 return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
384 item->type == RTE_FLOW_ITEM_TYPE_END);
387 /* Find if there's parse filter function matched */
388 static parse_filter_t
389 i40e_find_parse_filter_func(struct rte_flow_item *pattern)
391 parse_filter_t parse_filter = NULL;
394 for (; i < RTE_DIM(i40e_supported_patterns); i++) {
395 if (i40e_match_pattern(i40e_supported_patterns[i].items,
397 parse_filter = i40e_supported_patterns[i].parse_filter;
405 /* Parse attributes */
407 i40e_flow_parse_attr(const struct rte_flow_attr *attr,
408 struct rte_flow_error *error)
410 /* Must be input direction */
411 if (!attr->ingress) {
412 rte_flow_error_set(error, EINVAL,
413 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
414 attr, "Only support ingress.");
420 rte_flow_error_set(error, EINVAL,
421 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
422 attr, "Not support egress.");
427 if (attr->priority) {
428 rte_flow_error_set(error, EINVAL,
429 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
430 attr, "Not support priority.");
436 rte_flow_error_set(error, EINVAL,
437 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
438 attr, "Not support group.");
446 i40e_get_outer_vlan(struct rte_eth_dev *dev)
448 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
449 int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
459 i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
462 tpid = (reg_r >> I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT) & 0xFFFF;
467 /* 1. Last in item should be NULL as range is not supported.
468 * 2. Supported filter types: MAC_ETHTYPE and ETHTYPE.
469 * 3. SRC mac_addr mask should be 00:00:00:00:00:00.
470 * 4. DST mac_addr mask should be 00:00:00:00:00:00 or
472 * 5. Ether_type mask should be 0xFFFF.
475 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
476 const struct rte_flow_item *pattern,
477 struct rte_flow_error *error,
478 struct rte_eth_ethertype_filter *filter)
480 const struct rte_flow_item *item = pattern;
481 const struct rte_flow_item_eth *eth_spec;
482 const struct rte_flow_item_eth *eth_mask;
483 enum rte_flow_item_type item_type;
486 outer_tpid = i40e_get_outer_vlan(dev);
488 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
490 rte_flow_error_set(error, EINVAL,
491 RTE_FLOW_ERROR_TYPE_ITEM,
493 "Not support range");
496 item_type = item->type;
498 case RTE_FLOW_ITEM_TYPE_ETH:
499 eth_spec = (const struct rte_flow_item_eth *)item->spec;
500 eth_mask = (const struct rte_flow_item_eth *)item->mask;
501 /* Get the MAC info. */
502 if (!eth_spec || !eth_mask) {
503 rte_flow_error_set(error, EINVAL,
504 RTE_FLOW_ERROR_TYPE_ITEM,
506 "NULL ETH spec/mask");
510 /* Mask bits of source MAC address must be full of 0.
511 * Mask bits of destination MAC address must be full
514 if (!is_zero_ether_addr(ð_mask->src) ||
515 (!is_zero_ether_addr(ð_mask->dst) &&
516 !is_broadcast_ether_addr(ð_mask->dst))) {
517 rte_flow_error_set(error, EINVAL,
518 RTE_FLOW_ERROR_TYPE_ITEM,
520 "Invalid MAC_addr mask");
524 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
525 rte_flow_error_set(error, EINVAL,
526 RTE_FLOW_ERROR_TYPE_ITEM,
528 "Invalid ethertype mask");
532 /* If mask bits of destination MAC address
533 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
535 if (is_broadcast_ether_addr(ð_mask->dst)) {
536 filter->mac_addr = eth_spec->dst;
537 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
539 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
541 filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
543 if (filter->ether_type == ETHER_TYPE_IPv4 ||
544 filter->ether_type == ETHER_TYPE_IPv6 ||
545 filter->ether_type == outer_tpid) {
546 rte_flow_error_set(error, EINVAL,
547 RTE_FLOW_ERROR_TYPE_ITEM,
549 "Unsupported ether_type in"
550 " control packet filter.");
562 /* Ethertype action only supports QUEUE or DROP. */
564 i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
565 const struct rte_flow_action *actions,
566 struct rte_flow_error *error,
567 struct rte_eth_ethertype_filter *filter)
569 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
570 const struct rte_flow_action *act;
571 const struct rte_flow_action_queue *act_q;
574 /* Check if the first non-void action is QUEUE or DROP. */
575 NEXT_ITEM_OF_ACTION(act, actions, index);
576 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
577 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
578 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
579 act, "Not supported action.");
583 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
584 act_q = (const struct rte_flow_action_queue *)act->conf;
585 filter->queue = act_q->index;
586 if (filter->queue >= pf->dev_data->nb_rx_queues) {
587 rte_flow_error_set(error, EINVAL,
588 RTE_FLOW_ERROR_TYPE_ACTION,
589 act, "Invalid queue ID for"
590 " ethertype_filter.");
594 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
597 /* Check if the next non-void item is END */
599 NEXT_ITEM_OF_ACTION(act, actions, index);
600 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
601 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
602 act, "Not supported action.");
610 i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
611 const struct rte_flow_attr *attr,
612 const struct rte_flow_item pattern[],
613 const struct rte_flow_action actions[],
614 struct rte_flow_error *error,
615 union i40e_filter_t *filter)
617 struct rte_eth_ethertype_filter *ethertype_filter =
618 &filter->ethertype_filter;
621 ret = i40e_flow_parse_ethertype_pattern(dev, pattern, error,
626 ret = i40e_flow_parse_ethertype_action(dev, actions, error,
631 ret = i40e_flow_parse_attr(attr, error);
635 cons_filter_type = RTE_ETH_FILTER_ETHERTYPE;
640 /* 1. Last in item should be NULL as range is not supported.
641 * 2. Supported flow type and input set: refer to array
642 * default_inset_table in i40e_ethdev.c.
643 * 3. Mask of fields which need to be matched should be
645 * 4. Mask of fields which needn't to be matched should be
649 i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
650 const struct rte_flow_item *pattern,
651 struct rte_flow_error *error,
652 struct rte_eth_fdir_filter *filter)
654 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
655 const struct rte_flow_item *item = pattern;
656 const struct rte_flow_item_eth *eth_spec, *eth_mask;
657 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
658 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
659 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
660 const struct rte_flow_item_udp *udp_spec, *udp_mask;
661 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
662 const struct rte_flow_item_vf *vf_spec;
663 uint32_t flow_type = RTE_ETH_FLOW_UNKNOWN;
664 enum i40e_filter_pctype pctype;
665 uint64_t input_set = I40E_INSET_NONE;
666 uint16_t flag_offset;
667 enum rte_flow_item_type item_type;
668 enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
671 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
673 rte_flow_error_set(error, EINVAL,
674 RTE_FLOW_ERROR_TYPE_ITEM,
676 "Not support range");
679 item_type = item->type;
681 case RTE_FLOW_ITEM_TYPE_ETH:
682 eth_spec = (const struct rte_flow_item_eth *)item->spec;
683 eth_mask = (const struct rte_flow_item_eth *)item->mask;
684 if (eth_spec || eth_mask) {
685 rte_flow_error_set(error, EINVAL,
686 RTE_FLOW_ERROR_TYPE_ITEM,
688 "Invalid ETH spec/mask");
692 case RTE_FLOW_ITEM_TYPE_IPV4:
693 l3 = RTE_FLOW_ITEM_TYPE_IPV4;
695 (const struct rte_flow_item_ipv4 *)item->spec;
697 (const struct rte_flow_item_ipv4 *)item->mask;
698 if (!ipv4_spec || !ipv4_mask) {
699 rte_flow_error_set(error, EINVAL,
700 RTE_FLOW_ERROR_TYPE_ITEM,
702 "NULL IPv4 spec/mask");
706 /* Check IPv4 mask and update input set */
707 if (ipv4_mask->hdr.version_ihl ||
708 ipv4_mask->hdr.total_length ||
709 ipv4_mask->hdr.packet_id ||
710 ipv4_mask->hdr.fragment_offset ||
711 ipv4_mask->hdr.hdr_checksum) {
712 rte_flow_error_set(error, EINVAL,
713 RTE_FLOW_ERROR_TYPE_ITEM,
715 "Invalid IPv4 mask.");
719 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
720 input_set |= I40E_INSET_IPV4_SRC;
721 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
722 input_set |= I40E_INSET_IPV4_DST;
723 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
724 input_set |= I40E_INSET_IPV4_TOS;
725 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
726 input_set |= I40E_INSET_IPV4_TTL;
727 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
728 input_set |= I40E_INSET_IPV4_PROTO;
730 /* Get filter info */
731 flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER;
732 /* Check if it is fragment. */
734 rte_be_to_cpu_16(ipv4_spec->hdr.fragment_offset);
735 if (flag_offset & IPV4_HDR_OFFSET_MASK ||
736 flag_offset & IPV4_HDR_MF_FLAG)
737 flow_type = RTE_ETH_FLOW_FRAG_IPV4;
739 /* Get the filter info */
740 filter->input.flow.ip4_flow.proto =
741 ipv4_spec->hdr.next_proto_id;
742 filter->input.flow.ip4_flow.tos =
743 ipv4_spec->hdr.type_of_service;
744 filter->input.flow.ip4_flow.ttl =
745 ipv4_spec->hdr.time_to_live;
746 filter->input.flow.ip4_flow.src_ip =
747 ipv4_spec->hdr.src_addr;
748 filter->input.flow.ip4_flow.dst_ip =
749 ipv4_spec->hdr.dst_addr;
752 case RTE_FLOW_ITEM_TYPE_IPV6:
753 l3 = RTE_FLOW_ITEM_TYPE_IPV6;
755 (const struct rte_flow_item_ipv6 *)item->spec;
757 (const struct rte_flow_item_ipv6 *)item->mask;
758 if (!ipv6_spec || !ipv6_mask) {
759 rte_flow_error_set(error, EINVAL,
760 RTE_FLOW_ERROR_TYPE_ITEM,
762 "NULL IPv6 spec/mask");
766 /* Check IPv6 mask and update input set */
767 if (ipv6_mask->hdr.payload_len) {
768 rte_flow_error_set(error, EINVAL,
769 RTE_FLOW_ERROR_TYPE_ITEM,
771 "Invalid IPv6 mask");
775 /* SCR and DST address of IPv6 shouldn't be masked */
776 for (j = 0; j < RTE_DIM(ipv6_mask->hdr.src_addr); j++) {
777 if (ipv6_mask->hdr.src_addr[j] != UINT8_MAX ||
778 ipv6_mask->hdr.dst_addr[j] != UINT8_MAX) {
779 rte_flow_error_set(error, EINVAL,
780 RTE_FLOW_ERROR_TYPE_ITEM,
782 "Invalid IPv6 mask");
787 input_set |= I40E_INSET_IPV6_SRC;
788 input_set |= I40E_INSET_IPV6_DST;
790 if ((ipv6_mask->hdr.vtc_flow &
791 rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
792 == rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
793 input_set |= I40E_INSET_IPV6_TC;
794 if (ipv6_mask->hdr.proto == UINT8_MAX)
795 input_set |= I40E_INSET_IPV6_NEXT_HDR;
796 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
797 input_set |= I40E_INSET_IPV6_HOP_LIMIT;
799 /* Get filter info */
800 filter->input.flow.ipv6_flow.tc =
801 (uint8_t)(ipv6_spec->hdr.vtc_flow <<
803 filter->input.flow.ipv6_flow.proto =
804 ipv6_spec->hdr.proto;
805 filter->input.flow.ipv6_flow.hop_limits =
806 ipv6_spec->hdr.hop_limits;
808 rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
809 ipv6_spec->hdr.src_addr, 16);
810 rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
811 ipv6_spec->hdr.dst_addr, 16);
813 /* Check if it is fragment. */
814 if (ipv6_spec->hdr.proto == I40E_IPV6_FRAG_HEADER)
815 flow_type = RTE_ETH_FLOW_FRAG_IPV6;
817 flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
819 case RTE_FLOW_ITEM_TYPE_TCP:
820 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
821 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
822 if (!tcp_spec || !tcp_mask) {
823 rte_flow_error_set(error, EINVAL,
824 RTE_FLOW_ERROR_TYPE_ITEM,
826 "NULL TCP spec/mask");
830 /* Check TCP mask and update input set */
831 if (tcp_mask->hdr.sent_seq ||
832 tcp_mask->hdr.recv_ack ||
833 tcp_mask->hdr.data_off ||
834 tcp_mask->hdr.tcp_flags ||
835 tcp_mask->hdr.rx_win ||
836 tcp_mask->hdr.cksum ||
837 tcp_mask->hdr.tcp_urp) {
838 rte_flow_error_set(error, EINVAL,
839 RTE_FLOW_ERROR_TYPE_ITEM,
845 if (tcp_mask->hdr.src_port != UINT16_MAX ||
846 tcp_mask->hdr.dst_port != UINT16_MAX) {
847 rte_flow_error_set(error, EINVAL,
848 RTE_FLOW_ERROR_TYPE_ITEM,
854 input_set |= I40E_INSET_SRC_PORT;
855 input_set |= I40E_INSET_DST_PORT;
857 /* Get filter info */
858 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
859 flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
860 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
861 flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_TCP;
863 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
864 filter->input.flow.tcp4_flow.src_port =
865 tcp_spec->hdr.src_port;
866 filter->input.flow.tcp4_flow.dst_port =
867 tcp_spec->hdr.dst_port;
868 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
869 filter->input.flow.tcp6_flow.src_port =
870 tcp_spec->hdr.src_port;
871 filter->input.flow.tcp6_flow.dst_port =
872 tcp_spec->hdr.dst_port;
875 case RTE_FLOW_ITEM_TYPE_UDP:
876 udp_spec = (const struct rte_flow_item_udp *)item->spec;
877 udp_mask = (const struct rte_flow_item_udp *)item->mask;
878 if (!udp_spec || !udp_mask) {
879 rte_flow_error_set(error, EINVAL,
880 RTE_FLOW_ERROR_TYPE_ITEM,
882 "NULL UDP spec/mask");
886 /* Check UDP mask and update input set*/
887 if (udp_mask->hdr.dgram_len ||
888 udp_mask->hdr.dgram_cksum) {
889 rte_flow_error_set(error, EINVAL,
890 RTE_FLOW_ERROR_TYPE_ITEM,
896 if (udp_mask->hdr.src_port != UINT16_MAX ||
897 udp_mask->hdr.dst_port != UINT16_MAX) {
898 rte_flow_error_set(error, EINVAL,
899 RTE_FLOW_ERROR_TYPE_ITEM,
905 input_set |= I40E_INSET_SRC_PORT;
906 input_set |= I40E_INSET_DST_PORT;
908 /* Get filter info */
909 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
911 RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
912 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
914 RTE_ETH_FLOW_NONFRAG_IPV6_UDP;
916 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
917 filter->input.flow.udp4_flow.src_port =
918 udp_spec->hdr.src_port;
919 filter->input.flow.udp4_flow.dst_port =
920 udp_spec->hdr.dst_port;
921 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
922 filter->input.flow.udp6_flow.src_port =
923 udp_spec->hdr.src_port;
924 filter->input.flow.udp6_flow.dst_port =
925 udp_spec->hdr.dst_port;
928 case RTE_FLOW_ITEM_TYPE_SCTP:
930 (const struct rte_flow_item_sctp *)item->spec;
932 (const struct rte_flow_item_sctp *)item->mask;
933 if (!sctp_spec || !sctp_mask) {
934 rte_flow_error_set(error, EINVAL,
935 RTE_FLOW_ERROR_TYPE_ITEM,
937 "NULL SCTP spec/mask");
941 /* Check SCTP mask and update input set */
942 if (sctp_mask->hdr.cksum) {
943 rte_flow_error_set(error, EINVAL,
944 RTE_FLOW_ERROR_TYPE_ITEM,
950 if (sctp_mask->hdr.src_port != UINT16_MAX ||
951 sctp_mask->hdr.dst_port != UINT16_MAX ||
952 sctp_mask->hdr.tag != UINT32_MAX) {
953 rte_flow_error_set(error, EINVAL,
954 RTE_FLOW_ERROR_TYPE_ITEM,
959 input_set |= I40E_INSET_SRC_PORT;
960 input_set |= I40E_INSET_DST_PORT;
961 input_set |= I40E_INSET_SCTP_VT;
963 /* Get filter info */
964 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
965 flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_SCTP;
966 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
967 flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_SCTP;
969 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
970 filter->input.flow.sctp4_flow.src_port =
971 sctp_spec->hdr.src_port;
972 filter->input.flow.sctp4_flow.dst_port =
973 sctp_spec->hdr.dst_port;
974 filter->input.flow.sctp4_flow.verify_tag =
976 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
977 filter->input.flow.sctp6_flow.src_port =
978 sctp_spec->hdr.src_port;
979 filter->input.flow.sctp6_flow.dst_port =
980 sctp_spec->hdr.dst_port;
981 filter->input.flow.sctp6_flow.verify_tag =
985 case RTE_FLOW_ITEM_TYPE_VF:
986 vf_spec = (const struct rte_flow_item_vf *)item->spec;
987 filter->input.flow_ext.is_vf = 1;
988 filter->input.flow_ext.dst_id = vf_spec->id;
989 if (filter->input.flow_ext.is_vf &&
990 filter->input.flow_ext.dst_id >= pf->vf_num) {
991 rte_flow_error_set(error, EINVAL,
992 RTE_FLOW_ERROR_TYPE_ITEM,
994 "Invalid VF ID for FDIR.");
1003 pctype = i40e_flowtype_to_pctype(flow_type);
1004 if (pctype == 0 || pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) {
1005 rte_flow_error_set(error, EINVAL,
1006 RTE_FLOW_ERROR_TYPE_ITEM, item,
1007 "Unsupported flow type");
1011 if (input_set != i40e_get_default_input_set(pctype)) {
1012 rte_flow_error_set(error, EINVAL,
1013 RTE_FLOW_ERROR_TYPE_ITEM, item,
1014 "Invalid input set.");
1017 filter->input.flow_type = flow_type;
1022 /* Parse to get the action info of a FDIR filter.
1023 * FDIR action supports QUEUE or (QUEUE + MARK).
1026 i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
1027 const struct rte_flow_action *actions,
1028 struct rte_flow_error *error,
1029 struct rte_eth_fdir_filter *filter)
1031 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1032 const struct rte_flow_action *act;
1033 const struct rte_flow_action_queue *act_q;
1034 const struct rte_flow_action_mark *mark_spec;
1037 /* Check if the first non-void action is QUEUE or DROP. */
1038 NEXT_ITEM_OF_ACTION(act, actions, index);
1039 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
1040 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
1041 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1042 act, "Invalid action.");
1046 act_q = (const struct rte_flow_action_queue *)act->conf;
1047 filter->action.flex_off = 0;
1048 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE)
1049 filter->action.behavior = RTE_ETH_FDIR_ACCEPT;
1051 filter->action.behavior = RTE_ETH_FDIR_REJECT;
1053 filter->action.report_status = RTE_ETH_FDIR_REPORT_ID;
1054 filter->action.rx_queue = act_q->index;
1056 if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
1057 rte_flow_error_set(error, EINVAL,
1058 RTE_FLOW_ERROR_TYPE_ACTION, act,
1059 "Invalid queue ID for FDIR.");
1063 /* Check if the next non-void item is MARK or END. */
1065 NEXT_ITEM_OF_ACTION(act, actions, index);
1066 if (act->type != RTE_FLOW_ACTION_TYPE_MARK &&
1067 act->type != RTE_FLOW_ACTION_TYPE_END) {
1068 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1069 act, "Invalid action.");
1073 if (act->type == RTE_FLOW_ACTION_TYPE_MARK) {
1074 mark_spec = (const struct rte_flow_action_mark *)act->conf;
1075 filter->soft_id = mark_spec->id;
1077 /* Check if the next non-void item is END */
1079 NEXT_ITEM_OF_ACTION(act, actions, index);
1080 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1081 rte_flow_error_set(error, EINVAL,
1082 RTE_FLOW_ERROR_TYPE_ACTION,
1083 act, "Invalid action.");
1092 i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
1093 const struct rte_flow_attr *attr,
1094 const struct rte_flow_item pattern[],
1095 const struct rte_flow_action actions[],
1096 struct rte_flow_error *error,
1097 union i40e_filter_t *filter)
1099 struct rte_eth_fdir_filter *fdir_filter =
1100 &filter->fdir_filter;
1103 ret = i40e_flow_parse_fdir_pattern(dev, pattern, error, fdir_filter);
1107 ret = i40e_flow_parse_fdir_action(dev, actions, error, fdir_filter);
1111 ret = i40e_flow_parse_attr(attr, error);
1115 cons_filter_type = RTE_ETH_FILTER_FDIR;
1117 if (dev->data->dev_conf.fdir_conf.mode !=
1118 RTE_FDIR_MODE_PERFECT) {
1119 rte_flow_error_set(error, ENOTSUP,
1120 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1122 "Check the mode in fdir_conf.");
1129 /* Parse to get the action info of a tunnle filter
1130 * Tunnel action only supports PF, VF and QUEUE.
1133 i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
1134 const struct rte_flow_action *actions,
1135 struct rte_flow_error *error,
1136 struct i40e_tunnel_filter_conf *filter)
1138 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1139 const struct rte_flow_action *act;
1140 const struct rte_flow_action_queue *act_q;
1141 const struct rte_flow_action_vf *act_vf;
1144 /* Check if the first non-void action is PF or VF. */
1145 NEXT_ITEM_OF_ACTION(act, actions, index);
1146 if (act->type != RTE_FLOW_ACTION_TYPE_PF &&
1147 act->type != RTE_FLOW_ACTION_TYPE_VF) {
1148 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1149 act, "Not supported action.");
1153 if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
1154 act_vf = (const struct rte_flow_action_vf *)act->conf;
1155 filter->vf_id = act_vf->id;
1156 filter->is_to_vf = 1;
1157 if (filter->vf_id >= pf->vf_num) {
1158 rte_flow_error_set(error, EINVAL,
1159 RTE_FLOW_ERROR_TYPE_ACTION,
1160 act, "Invalid VF ID for tunnel filter");
1165 /* Check if the next non-void item is QUEUE */
1167 NEXT_ITEM_OF_ACTION(act, actions, index);
1168 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1169 act_q = (const struct rte_flow_action_queue *)act->conf;
1170 filter->queue_id = act_q->index;
1171 if (!filter->is_to_vf)
1172 if (filter->queue_id >= pf->dev_data->nb_rx_queues) {
1173 rte_flow_error_set(error, EINVAL,
1174 RTE_FLOW_ERROR_TYPE_ACTION,
1175 act, "Invalid queue ID for tunnel filter");
1180 /* Check if the next non-void item is END */
1182 NEXT_ITEM_OF_ACTION(act, actions, index);
1183 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1184 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1185 act, "Not supported action.");
1193 i40e_check_tenant_id_mask(const uint8_t *mask)
1198 for (j = 0; j < I40E_TENANT_ARRAY_NUM; j++) {
1199 if (*(mask + j) == UINT8_MAX) {
1200 if (j > 0 && (*(mask + j) != *(mask + j - 1)))
1203 } else if (*(mask + j) == 0) {
1204 if (j > 0 && (*(mask + j) != *(mask + j - 1)))
1215 /* 1. Last in item should be NULL as range is not supported.
1216 * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
1217 * IMAC_TENID, OMAC_TENID_IMAC and IMAC.
1218 * 3. Mask of fields which need to be matched should be
1220 * 4. Mask of fields which needn't to be matched should be
1224 i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
1225 const struct rte_flow_item *pattern,
1226 struct rte_flow_error *error,
1227 struct i40e_tunnel_filter_conf *filter)
1229 const struct rte_flow_item *item = pattern;
1230 const struct rte_flow_item_eth *eth_spec;
1231 const struct rte_flow_item_eth *eth_mask;
1232 const struct rte_flow_item_eth *o_eth_spec = NULL;
1233 const struct rte_flow_item_eth *o_eth_mask = NULL;
1234 const struct rte_flow_item_vxlan *vxlan_spec = NULL;
1235 const struct rte_flow_item_vxlan *vxlan_mask = NULL;
1236 const struct rte_flow_item_eth *i_eth_spec = NULL;
1237 const struct rte_flow_item_eth *i_eth_mask = NULL;
1238 const struct rte_flow_item_vlan *vlan_spec = NULL;
1239 const struct rte_flow_item_vlan *vlan_mask = NULL;
1240 bool is_vni_masked = 0;
1241 enum rte_flow_item_type item_type;
1242 bool vxlan_flag = 0;
1243 uint32_t tenant_id_be = 0;
1245 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1247 rte_flow_error_set(error, EINVAL,
1248 RTE_FLOW_ERROR_TYPE_ITEM,
1250 "Not support range");
1253 item_type = item->type;
1254 switch (item_type) {
1255 case RTE_FLOW_ITEM_TYPE_ETH:
1256 eth_spec = (const struct rte_flow_item_eth *)item->spec;
1257 eth_mask = (const struct rte_flow_item_eth *)item->mask;
1258 if ((!eth_spec && eth_mask) ||
1259 (eth_spec && !eth_mask)) {
1260 rte_flow_error_set(error, EINVAL,
1261 RTE_FLOW_ERROR_TYPE_ITEM,
1263 "Invalid ether spec/mask");
1267 if (eth_spec && eth_mask) {
1268 /* DST address of inner MAC shouldn't be masked.
1269 * SRC address of Inner MAC should be masked.
1271 if (!is_broadcast_ether_addr(ð_mask->dst) ||
1272 !is_zero_ether_addr(ð_mask->src) ||
1274 rte_flow_error_set(error, EINVAL,
1275 RTE_FLOW_ERROR_TYPE_ITEM,
1277 "Invalid ether spec/mask");
1282 rte_memcpy(&filter->outer_mac,
1286 rte_memcpy(&filter->inner_mac,
1292 o_eth_spec = eth_spec;
1293 o_eth_mask = eth_mask;
1295 i_eth_spec = eth_spec;
1296 i_eth_mask = eth_mask;
1300 case RTE_FLOW_ITEM_TYPE_VLAN:
1302 (const struct rte_flow_item_vlan *)item->spec;
1304 (const struct rte_flow_item_vlan *)item->mask;
1307 (const struct rte_flow_item_vlan *)item->spec;
1309 (const struct rte_flow_item_vlan *)item->mask;
1310 if (!(vlan_spec && vlan_mask)) {
1311 rte_flow_error_set(error, EINVAL,
1312 RTE_FLOW_ERROR_TYPE_ITEM,
1314 "Invalid vlan item");
1318 if (vlan_spec || vlan_mask)
1319 rte_flow_error_set(error, EINVAL,
1320 RTE_FLOW_ERROR_TYPE_ITEM,
1322 "Invalid vlan item");
1326 case RTE_FLOW_ITEM_TYPE_IPV4:
1327 filter->ip_type = RTE_TUNNEL_IPTYPE_IPV4;
1328 /* IPv4 is used to describe protocol,
1329 * spec and mask should be NULL.
1331 if (item->spec || item->mask) {
1332 rte_flow_error_set(error, EINVAL,
1333 RTE_FLOW_ERROR_TYPE_ITEM,
1335 "Invalid IPv4 item");
1339 case RTE_FLOW_ITEM_TYPE_IPV6:
1340 filter->ip_type = RTE_TUNNEL_IPTYPE_IPV6;
1341 /* IPv6 is used to describe protocol,
1342 * spec and mask should be NULL.
1344 if (item->spec || item->mask) {
1345 rte_flow_error_set(error, EINVAL,
1346 RTE_FLOW_ERROR_TYPE_ITEM,
1348 "Invalid IPv6 item");
1352 case RTE_FLOW_ITEM_TYPE_UDP:
1353 /* UDP is used to describe protocol,
1354 * spec and mask should be NULL.
1356 if (item->spec || item->mask) {
1357 rte_flow_error_set(error, EINVAL,
1358 RTE_FLOW_ERROR_TYPE_ITEM,
1360 "Invalid UDP item");
1364 case RTE_FLOW_ITEM_TYPE_VXLAN:
1366 (const struct rte_flow_item_vxlan *)item->spec;
1368 (const struct rte_flow_item_vxlan *)item->mask;
1369 /* Check if VXLAN item is used to describe protocol.
1370 * If yes, both spec and mask should be NULL.
1371 * If no, either spec or mask shouldn't be NULL.
1373 if ((!vxlan_spec && vxlan_mask) ||
1374 (vxlan_spec && !vxlan_mask)) {
1375 rte_flow_error_set(error, EINVAL,
1376 RTE_FLOW_ERROR_TYPE_ITEM,
1378 "Invalid VXLAN item");
1382 /* Check if VNI is masked. */
1385 i40e_check_tenant_id_mask(vxlan_mask->vni);
1386 if (is_vni_masked < 0) {
1387 rte_flow_error_set(error, EINVAL,
1388 RTE_FLOW_ERROR_TYPE_ITEM,
1390 "Invalid VNI mask");
1401 /* Check specification and mask to get the filter type */
1402 if (vlan_spec && vlan_mask &&
1403 (vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
1404 /* If there's inner vlan */
1405 filter->inner_vlan = rte_be_to_cpu_16(vlan_spec->tci)
1407 if (vxlan_spec && vxlan_mask && !is_vni_masked) {
1408 /* If there's vxlan */
1409 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
1410 vxlan_spec->vni, 3);
1411 filter->tenant_id = rte_be_to_cpu_32(tenant_id_be);
1412 if (!o_eth_spec && !o_eth_mask &&
1413 i_eth_spec && i_eth_mask)
1414 filter->filter_type =
1415 RTE_TUNNEL_FILTER_IMAC_IVLAN_TENID;
1417 rte_flow_error_set(error, EINVAL,
1418 RTE_FLOW_ERROR_TYPE_ITEM,
1420 "Invalid filter type");
1423 } else if (!vxlan_spec && !vxlan_mask) {
1424 /* If there's no vxlan */
1425 if (!o_eth_spec && !o_eth_mask &&
1426 i_eth_spec && i_eth_mask)
1427 filter->filter_type =
1428 RTE_TUNNEL_FILTER_IMAC_IVLAN;
1430 rte_flow_error_set(error, EINVAL,
1431 RTE_FLOW_ERROR_TYPE_ITEM,
1433 "Invalid filter type");
1437 rte_flow_error_set(error, EINVAL,
1438 RTE_FLOW_ERROR_TYPE_ITEM,
1440 "Invalid filter type");
1443 } else if ((!vlan_spec && !vlan_mask) ||
1444 (vlan_spec && vlan_mask && vlan_mask->tci == 0x0)) {
1445 /* If there's no inner vlan */
1446 if (vxlan_spec && vxlan_mask && !is_vni_masked) {
1447 /* If there's vxlan */
1448 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
1449 vxlan_spec->vni, 3);
1450 filter->tenant_id = rte_be_to_cpu_32(tenant_id_be);
1451 if (!o_eth_spec && !o_eth_mask &&
1452 i_eth_spec && i_eth_mask)
1453 filter->filter_type =
1454 RTE_TUNNEL_FILTER_IMAC_TENID;
1455 else if (o_eth_spec && o_eth_mask &&
1456 i_eth_spec && i_eth_mask)
1457 filter->filter_type =
1458 RTE_TUNNEL_FILTER_OMAC_TENID_IMAC;
1459 } else if (!vxlan_spec && !vxlan_mask) {
1460 /* If there's no vxlan */
1461 if (!o_eth_spec && !o_eth_mask &&
1462 i_eth_spec && i_eth_mask) {
1463 filter->filter_type = ETH_TUNNEL_FILTER_IMAC;
1465 rte_flow_error_set(error, EINVAL,
1466 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1467 "Invalid filter type");
1471 rte_flow_error_set(error, EINVAL,
1472 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1473 "Invalid filter type");
1477 rte_flow_error_set(error, EINVAL,
1478 RTE_FLOW_ERROR_TYPE_ITEM, NULL,
1479 "Not supported by tunnel filter.");
1483 filter->tunnel_type = RTE_TUNNEL_TYPE_VXLAN;
1489 i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
1490 const struct rte_flow_attr *attr,
1491 const struct rte_flow_item pattern[],
1492 const struct rte_flow_action actions[],
1493 struct rte_flow_error *error,
1494 union i40e_filter_t *filter)
1496 struct i40e_tunnel_filter_conf *tunnel_filter =
1497 &filter->consistent_tunnel_filter;
1500 ret = i40e_flow_parse_vxlan_pattern(dev, pattern,
1501 error, tunnel_filter);
1505 ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
1509 ret = i40e_flow_parse_attr(attr, error);
1513 cons_filter_type = RTE_ETH_FILTER_TUNNEL;
1519 i40e_flow_validate(struct rte_eth_dev *dev,
1520 const struct rte_flow_attr *attr,
1521 const struct rte_flow_item pattern[],
1522 const struct rte_flow_action actions[],
1523 struct rte_flow_error *error)
1525 struct rte_flow_item *items; /* internal pattern w/o VOID items */
1526 parse_filter_t parse_filter;
1527 uint32_t item_num = 0; /* non-void item number of pattern*/
1532 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1533 NULL, "NULL pattern.");
1538 rte_flow_error_set(error, EINVAL,
1539 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1540 NULL, "NULL action.");
1545 rte_flow_error_set(error, EINVAL,
1546 RTE_FLOW_ERROR_TYPE_ATTR,
1547 NULL, "NULL attribute.");
1551 memset(&cons_filter, 0, sizeof(cons_filter));
1553 /* Get the non-void item number of pattern */
1554 while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
1555 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
1561 items = rte_zmalloc("i40e_pattern",
1562 item_num * sizeof(struct rte_flow_item), 0);
1564 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1565 NULL, "No memory for PMD internal items.");
1569 i40e_pattern_skip_void_item(items, pattern);
1571 /* Find if there's matched parse filter function */
1572 parse_filter = i40e_find_parse_filter_func(items);
1573 if (!parse_filter) {
1574 rte_flow_error_set(error, EINVAL,
1575 RTE_FLOW_ERROR_TYPE_ITEM,
1576 pattern, "Unsupported pattern");
1580 ret = parse_filter(dev, attr, items, actions, error, &cons_filter);
1587 static struct rte_flow *
1588 i40e_flow_create(struct rte_eth_dev *dev,
1589 const struct rte_flow_attr *attr,
1590 const struct rte_flow_item pattern[],
1591 const struct rte_flow_action actions[],
1592 struct rte_flow_error *error)
1594 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1595 struct rte_flow *flow;
1598 flow = rte_zmalloc("i40e_flow", sizeof(struct rte_flow), 0);
1600 rte_flow_error_set(error, ENOMEM,
1601 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1602 "Failed to allocate memory");
1606 ret = i40e_flow_validate(dev, attr, pattern, actions, error);
1610 switch (cons_filter_type) {
1611 case RTE_ETH_FILTER_ETHERTYPE:
1612 ret = i40e_ethertype_filter_set(pf,
1613 &cons_filter.ethertype_filter, 1);
1616 flow->rule = TAILQ_LAST(&pf->ethertype.ethertype_list,
1617 i40e_ethertype_filter_list);
1619 case RTE_ETH_FILTER_FDIR:
1620 ret = i40e_add_del_fdir_filter(dev,
1621 &cons_filter.fdir_filter, 1);
1624 flow->rule = TAILQ_LAST(&pf->fdir.fdir_list,
1625 i40e_fdir_filter_list);
1627 case RTE_ETH_FILTER_TUNNEL:
1628 ret = i40e_dev_consistent_tunnel_filter_set(pf,
1629 &cons_filter.consistent_tunnel_filter, 1);
1632 flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
1633 i40e_tunnel_filter_list);
1639 flow->filter_type = cons_filter_type;
1640 TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
1644 rte_flow_error_set(error, -ret,
1645 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1646 "Failed to create flow.");
1652 i40e_flow_destroy(struct rte_eth_dev *dev,
1653 struct rte_flow *flow,
1654 struct rte_flow_error *error)
1656 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1657 enum rte_filter_type filter_type = flow->filter_type;
1660 switch (filter_type) {
1661 case RTE_ETH_FILTER_ETHERTYPE:
1662 ret = i40e_flow_destroy_ethertype_filter(pf,
1663 (struct i40e_ethertype_filter *)flow->rule);
1665 case RTE_ETH_FILTER_TUNNEL:
1666 ret = i40e_flow_destroy_tunnel_filter(pf,
1667 (struct i40e_tunnel_filter *)flow->rule);
1669 case RTE_ETH_FILTER_FDIR:
1670 ret = i40e_add_del_fdir_filter(dev,
1671 &((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
1674 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
1681 TAILQ_REMOVE(&pf->flow_list, flow, node);
1684 rte_flow_error_set(error, -ret,
1685 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1686 "Failed to destroy flow.");
1692 i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
1693 struct i40e_ethertype_filter *filter)
1695 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1696 struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
1697 struct i40e_ethertype_filter *node;
1698 struct i40e_control_filter_stats stats;
1702 if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
1703 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
1704 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
1705 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
1706 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
1708 memset(&stats, 0, sizeof(stats));
1709 ret = i40e_aq_add_rem_control_packet_filter(hw,
1710 filter->input.mac_addr.addr_bytes,
1711 filter->input.ether_type,
1712 flags, pf->main_vsi->seid,
1713 filter->queue, 0, &stats, NULL);
1717 node = i40e_sw_ethertype_filter_lookup(ethertype_rule, &filter->input);
1721 ret = i40e_sw_ethertype_filter_del(pf, &node->input);
1727 i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
1728 struct i40e_tunnel_filter *filter)
1730 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
1731 struct i40e_vsi *vsi = pf->main_vsi;
1732 struct i40e_aqc_add_rm_cloud_filt_elem_ext cld_filter;
1733 struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
1734 struct i40e_tunnel_filter *node;
1737 memset(&cld_filter, 0, sizeof(cld_filter));
1738 ether_addr_copy((struct ether_addr *)&filter->input.outer_mac,
1739 (struct ether_addr *)&cld_filter.element.outer_mac);
1740 ether_addr_copy((struct ether_addr *)&filter->input.inner_mac,
1741 (struct ether_addr *)&cld_filter.element.inner_mac);
1742 cld_filter.element.inner_vlan = filter->input.inner_vlan;
1743 cld_filter.element.flags = filter->input.flags;
1744 cld_filter.element.tenant_id = filter->input.tenant_id;
1745 cld_filter.element.queue_number = filter->queue;
1746 rte_memcpy(cld_filter.general_fields,
1747 filter->input.general_fields,
1748 sizeof(cld_filter.general_fields));
1750 ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
1751 &cld_filter.element, 1);
1755 node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &filter->input);
1759 ret = i40e_sw_tunnel_filter_del(pf, &node->input);
1765 i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1767 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1770 ret = i40e_flow_flush_fdir_filter(pf);
1772 rte_flow_error_set(error, -ret,
1773 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1774 "Failed to flush FDIR flows.");
1778 ret = i40e_flow_flush_ethertype_filter(pf);
1780 rte_flow_error_set(error, -ret,
1781 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1782 "Failed to ethertype flush flows.");
1786 ret = i40e_flow_flush_tunnel_filter(pf);
1788 rte_flow_error_set(error, -ret,
1789 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1790 "Failed to flush tunnel flows.");
1798 i40e_flow_flush_fdir_filter(struct i40e_pf *pf)
1800 struct rte_eth_dev *dev = pf->adapter->eth_dev;
1801 struct i40e_fdir_info *fdir_info = &pf->fdir;
1802 struct i40e_fdir_filter *fdir_filter;
1803 struct rte_flow *flow;
1807 ret = i40e_fdir_flush(dev);
1809 /* Delete FDIR filters in FDIR list. */
1810 while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
1811 ret = i40e_sw_fdir_filter_del(pf,
1812 &fdir_filter->fdir.input);
1817 /* Delete FDIR flows in flow list. */
1818 TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
1819 if (flow->filter_type == RTE_ETH_FILTER_FDIR) {
1820 TAILQ_REMOVE(&pf->flow_list, flow, node);
1829 /* Flush all ethertype filters */
1831 i40e_flow_flush_ethertype_filter(struct i40e_pf *pf)
1833 struct i40e_ethertype_filter_list
1834 *ethertype_list = &pf->ethertype.ethertype_list;
1835 struct i40e_ethertype_filter *filter;
1836 struct rte_flow *flow;
1840 while ((filter = TAILQ_FIRST(ethertype_list))) {
1841 ret = i40e_flow_destroy_ethertype_filter(pf, filter);
1846 /* Delete ethertype flows in flow list. */
1847 TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
1848 if (flow->filter_type == RTE_ETH_FILTER_ETHERTYPE) {
1849 TAILQ_REMOVE(&pf->flow_list, flow, node);
1857 /* Flush all tunnel filters */
1859 i40e_flow_flush_tunnel_filter(struct i40e_pf *pf)
1861 struct i40e_tunnel_filter_list
1862 *tunnel_list = &pf->tunnel.tunnel_list;
1863 struct i40e_tunnel_filter *filter;
1864 struct rte_flow *flow;
1868 while ((filter = TAILQ_FIRST(tunnel_list))) {
1869 ret = i40e_flow_destroy_tunnel_filter(pf, filter);
1874 /* Delete tunnel flows in flow list. */
1875 TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
1876 if (flow->filter_type == RTE_ETH_FILTER_TUNNEL) {
1877 TAILQ_REMOVE(&pf->flow_list, flow, node);