4 * Copyright (c) 2016-2017 Intel Corporation. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/queue.h>
41 #include <rte_ether.h>
42 #include <rte_ethdev.h>
44 #include <rte_memzone.h>
45 #include <rte_malloc.h>
46 #include <rte_eth_ctrl.h>
47 #include <rte_tailq.h>
48 #include <rte_flow_driver.h>
50 #include "i40e_logs.h"
51 #include "base/i40e_type.h"
52 #include "base/i40e_prototype.h"
53 #include "i40e_ethdev.h"
55 #define I40E_IPV4_TC_SHIFT 4
56 #define I40E_IPV6_TC_MASK (0x00FF << I40E_IPV4_TC_SHIFT)
57 #define I40E_IPV6_FRAG_HEADER 44
58 #define I40E_TENANT_ARRAY_NUM 3
59 #define I40E_TCI_MASK 0xFFFF
61 static int i40e_flow_validate(struct rte_eth_dev *dev,
62 const struct rte_flow_attr *attr,
63 const struct rte_flow_item pattern[],
64 const struct rte_flow_action actions[],
65 struct rte_flow_error *error);
66 static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
67 const struct rte_flow_attr *attr,
68 const struct rte_flow_item pattern[],
69 const struct rte_flow_action actions[],
70 struct rte_flow_error *error);
71 static int i40e_flow_destroy(struct rte_eth_dev *dev,
72 struct rte_flow *flow,
73 struct rte_flow_error *error);
74 static int i40e_flow_flush(struct rte_eth_dev *dev,
75 struct rte_flow_error *error);
77 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
78 const struct rte_flow_item *pattern,
79 struct rte_flow_error *error,
80 struct rte_eth_ethertype_filter *filter);
81 static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
82 const struct rte_flow_action *actions,
83 struct rte_flow_error *error,
84 struct rte_eth_ethertype_filter *filter);
85 static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
86 const struct rte_flow_item *pattern,
87 struct rte_flow_error *error,
88 struct rte_eth_fdir_filter *filter);
89 static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
90 const struct rte_flow_action *actions,
91 struct rte_flow_error *error,
92 struct rte_eth_fdir_filter *filter);
93 static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
94 const struct rte_flow_action *actions,
95 struct rte_flow_error *error,
96 struct i40e_tunnel_filter_conf *filter);
97 static int i40e_flow_parse_attr(const struct rte_flow_attr *attr,
98 struct rte_flow_error *error);
99 static int i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
100 const struct rte_flow_attr *attr,
101 const struct rte_flow_item pattern[],
102 const struct rte_flow_action actions[],
103 struct rte_flow_error *error,
104 union i40e_filter_t *filter);
105 static int i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
106 const struct rte_flow_attr *attr,
107 const struct rte_flow_item pattern[],
108 const struct rte_flow_action actions[],
109 struct rte_flow_error *error,
110 union i40e_filter_t *filter);
111 static int i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
112 const struct rte_flow_attr *attr,
113 const struct rte_flow_item pattern[],
114 const struct rte_flow_action actions[],
115 struct rte_flow_error *error,
116 union i40e_filter_t *filter);
117 static int i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
118 const struct rte_flow_attr *attr,
119 const struct rte_flow_item pattern[],
120 const struct rte_flow_action actions[],
121 struct rte_flow_error *error,
122 union i40e_filter_t *filter);
123 static int i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
124 const struct rte_flow_attr *attr,
125 const struct rte_flow_item pattern[],
126 const struct rte_flow_action actions[],
127 struct rte_flow_error *error,
128 union i40e_filter_t *filter);
129 static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
130 struct i40e_ethertype_filter *filter);
131 static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
132 struct i40e_tunnel_filter *filter);
133 static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);
134 static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf);
135 static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf);
137 i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
138 const struct rte_flow_attr *attr,
139 const struct rte_flow_item pattern[],
140 const struct rte_flow_action actions[],
141 struct rte_flow_error *error,
142 union i40e_filter_t *filter);
144 i40e_flow_parse_qinq_pattern(struct rte_eth_dev *dev,
145 const struct rte_flow_item *pattern,
146 struct rte_flow_error *error,
147 struct i40e_tunnel_filter_conf *filter);
149 const struct rte_flow_ops i40e_flow_ops = {
150 .validate = i40e_flow_validate,
151 .create = i40e_flow_create,
152 .destroy = i40e_flow_destroy,
153 .flush = i40e_flow_flush,
156 union i40e_filter_t cons_filter;
157 enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
159 /* Pattern matched ethertype filter */
160 static enum rte_flow_item_type pattern_ethertype[] = {
161 RTE_FLOW_ITEM_TYPE_ETH,
162 RTE_FLOW_ITEM_TYPE_END,
165 /* Pattern matched flow director filter */
166 static enum rte_flow_item_type pattern_fdir_ipv4[] = {
167 RTE_FLOW_ITEM_TYPE_IPV4,
168 RTE_FLOW_ITEM_TYPE_END,
171 static enum rte_flow_item_type pattern_fdir_ipv4_ext[] = {
172 RTE_FLOW_ITEM_TYPE_ETH,
173 RTE_FLOW_ITEM_TYPE_IPV4,
174 RTE_FLOW_ITEM_TYPE_END,
177 static enum rte_flow_item_type pattern_fdir_ipv4_udp[] = {
178 RTE_FLOW_ITEM_TYPE_IPV4,
179 RTE_FLOW_ITEM_TYPE_UDP,
180 RTE_FLOW_ITEM_TYPE_END,
183 static enum rte_flow_item_type pattern_fdir_ipv4_udp_ext[] = {
184 RTE_FLOW_ITEM_TYPE_ETH,
185 RTE_FLOW_ITEM_TYPE_IPV4,
186 RTE_FLOW_ITEM_TYPE_UDP,
187 RTE_FLOW_ITEM_TYPE_END,
190 static enum rte_flow_item_type pattern_fdir_ipv4_tcp[] = {
191 RTE_FLOW_ITEM_TYPE_IPV4,
192 RTE_FLOW_ITEM_TYPE_TCP,
193 RTE_FLOW_ITEM_TYPE_END,
196 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_ext[] = {
197 RTE_FLOW_ITEM_TYPE_ETH,
198 RTE_FLOW_ITEM_TYPE_IPV4,
199 RTE_FLOW_ITEM_TYPE_TCP,
200 RTE_FLOW_ITEM_TYPE_END,
203 static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
204 RTE_FLOW_ITEM_TYPE_IPV4,
205 RTE_FLOW_ITEM_TYPE_SCTP,
206 RTE_FLOW_ITEM_TYPE_END,
209 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_ext[] = {
210 RTE_FLOW_ITEM_TYPE_ETH,
211 RTE_FLOW_ITEM_TYPE_IPV4,
212 RTE_FLOW_ITEM_TYPE_SCTP,
213 RTE_FLOW_ITEM_TYPE_END,
216 static enum rte_flow_item_type pattern_fdir_ipv6[] = {
217 RTE_FLOW_ITEM_TYPE_IPV6,
218 RTE_FLOW_ITEM_TYPE_END,
221 static enum rte_flow_item_type pattern_fdir_ipv6_ext[] = {
222 RTE_FLOW_ITEM_TYPE_ETH,
223 RTE_FLOW_ITEM_TYPE_IPV6,
224 RTE_FLOW_ITEM_TYPE_END,
227 static enum rte_flow_item_type pattern_fdir_ipv6_udp[] = {
228 RTE_FLOW_ITEM_TYPE_IPV6,
229 RTE_FLOW_ITEM_TYPE_UDP,
230 RTE_FLOW_ITEM_TYPE_END,
233 static enum rte_flow_item_type pattern_fdir_ipv6_udp_ext[] = {
234 RTE_FLOW_ITEM_TYPE_ETH,
235 RTE_FLOW_ITEM_TYPE_IPV6,
236 RTE_FLOW_ITEM_TYPE_UDP,
237 RTE_FLOW_ITEM_TYPE_END,
240 static enum rte_flow_item_type pattern_fdir_ipv6_tcp[] = {
241 RTE_FLOW_ITEM_TYPE_IPV6,
242 RTE_FLOW_ITEM_TYPE_TCP,
243 RTE_FLOW_ITEM_TYPE_END,
246 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_ext[] = {
247 RTE_FLOW_ITEM_TYPE_ETH,
248 RTE_FLOW_ITEM_TYPE_IPV6,
249 RTE_FLOW_ITEM_TYPE_TCP,
250 RTE_FLOW_ITEM_TYPE_END,
253 static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
254 RTE_FLOW_ITEM_TYPE_IPV6,
255 RTE_FLOW_ITEM_TYPE_SCTP,
256 RTE_FLOW_ITEM_TYPE_END,
259 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_ext[] = {
260 RTE_FLOW_ITEM_TYPE_ETH,
261 RTE_FLOW_ITEM_TYPE_IPV6,
262 RTE_FLOW_ITEM_TYPE_SCTP,
263 RTE_FLOW_ITEM_TYPE_END,
266 /* Pattern matched tunnel filter */
267 static enum rte_flow_item_type pattern_vxlan_1[] = {
268 RTE_FLOW_ITEM_TYPE_ETH,
269 RTE_FLOW_ITEM_TYPE_IPV4,
270 RTE_FLOW_ITEM_TYPE_UDP,
271 RTE_FLOW_ITEM_TYPE_VXLAN,
272 RTE_FLOW_ITEM_TYPE_ETH,
273 RTE_FLOW_ITEM_TYPE_END,
276 static enum rte_flow_item_type pattern_vxlan_2[] = {
277 RTE_FLOW_ITEM_TYPE_ETH,
278 RTE_FLOW_ITEM_TYPE_IPV6,
279 RTE_FLOW_ITEM_TYPE_UDP,
280 RTE_FLOW_ITEM_TYPE_VXLAN,
281 RTE_FLOW_ITEM_TYPE_ETH,
282 RTE_FLOW_ITEM_TYPE_END,
285 static enum rte_flow_item_type pattern_vxlan_3[] = {
286 RTE_FLOW_ITEM_TYPE_ETH,
287 RTE_FLOW_ITEM_TYPE_IPV4,
288 RTE_FLOW_ITEM_TYPE_UDP,
289 RTE_FLOW_ITEM_TYPE_VXLAN,
290 RTE_FLOW_ITEM_TYPE_ETH,
291 RTE_FLOW_ITEM_TYPE_VLAN,
292 RTE_FLOW_ITEM_TYPE_END,
295 static enum rte_flow_item_type pattern_vxlan_4[] = {
296 RTE_FLOW_ITEM_TYPE_ETH,
297 RTE_FLOW_ITEM_TYPE_IPV6,
298 RTE_FLOW_ITEM_TYPE_UDP,
299 RTE_FLOW_ITEM_TYPE_VXLAN,
300 RTE_FLOW_ITEM_TYPE_ETH,
301 RTE_FLOW_ITEM_TYPE_VLAN,
302 RTE_FLOW_ITEM_TYPE_END,
305 static enum rte_flow_item_type pattern_nvgre_1[] = {
306 RTE_FLOW_ITEM_TYPE_ETH,
307 RTE_FLOW_ITEM_TYPE_IPV4,
308 RTE_FLOW_ITEM_TYPE_NVGRE,
309 RTE_FLOW_ITEM_TYPE_ETH,
310 RTE_FLOW_ITEM_TYPE_END,
313 static enum rte_flow_item_type pattern_nvgre_2[] = {
314 RTE_FLOW_ITEM_TYPE_ETH,
315 RTE_FLOW_ITEM_TYPE_IPV6,
316 RTE_FLOW_ITEM_TYPE_NVGRE,
317 RTE_FLOW_ITEM_TYPE_ETH,
318 RTE_FLOW_ITEM_TYPE_END,
321 static enum rte_flow_item_type pattern_nvgre_3[] = {
322 RTE_FLOW_ITEM_TYPE_ETH,
323 RTE_FLOW_ITEM_TYPE_IPV4,
324 RTE_FLOW_ITEM_TYPE_NVGRE,
325 RTE_FLOW_ITEM_TYPE_ETH,
326 RTE_FLOW_ITEM_TYPE_VLAN,
327 RTE_FLOW_ITEM_TYPE_END,
330 static enum rte_flow_item_type pattern_nvgre_4[] = {
331 RTE_FLOW_ITEM_TYPE_ETH,
332 RTE_FLOW_ITEM_TYPE_IPV6,
333 RTE_FLOW_ITEM_TYPE_NVGRE,
334 RTE_FLOW_ITEM_TYPE_ETH,
335 RTE_FLOW_ITEM_TYPE_VLAN,
336 RTE_FLOW_ITEM_TYPE_END,
339 static enum rte_flow_item_type pattern_mpls_1[] = {
340 RTE_FLOW_ITEM_TYPE_ETH,
341 RTE_FLOW_ITEM_TYPE_IPV4,
342 RTE_FLOW_ITEM_TYPE_UDP,
343 RTE_FLOW_ITEM_TYPE_MPLS,
344 RTE_FLOW_ITEM_TYPE_END,
347 static enum rte_flow_item_type pattern_mpls_2[] = {
348 RTE_FLOW_ITEM_TYPE_ETH,
349 RTE_FLOW_ITEM_TYPE_IPV6,
350 RTE_FLOW_ITEM_TYPE_UDP,
351 RTE_FLOW_ITEM_TYPE_MPLS,
352 RTE_FLOW_ITEM_TYPE_END,
355 static enum rte_flow_item_type pattern_mpls_3[] = {
356 RTE_FLOW_ITEM_TYPE_ETH,
357 RTE_FLOW_ITEM_TYPE_IPV4,
358 RTE_FLOW_ITEM_TYPE_GRE,
359 RTE_FLOW_ITEM_TYPE_MPLS,
360 RTE_FLOW_ITEM_TYPE_END,
363 static enum rte_flow_item_type pattern_mpls_4[] = {
364 RTE_FLOW_ITEM_TYPE_ETH,
365 RTE_FLOW_ITEM_TYPE_IPV6,
366 RTE_FLOW_ITEM_TYPE_GRE,
367 RTE_FLOW_ITEM_TYPE_MPLS,
368 RTE_FLOW_ITEM_TYPE_END,
371 static enum rte_flow_item_type pattern_qinq_1[] = {
372 RTE_FLOW_ITEM_TYPE_ETH,
373 RTE_FLOW_ITEM_TYPE_VLAN,
374 RTE_FLOW_ITEM_TYPE_VLAN,
375 RTE_FLOW_ITEM_TYPE_END,
378 static struct i40e_valid_pattern i40e_supported_patterns[] = {
380 { pattern_ethertype, i40e_flow_parse_ethertype_filter },
382 { pattern_fdir_ipv4, i40e_flow_parse_fdir_filter },
383 { pattern_fdir_ipv4_ext, i40e_flow_parse_fdir_filter },
384 { pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
385 { pattern_fdir_ipv4_udp_ext, i40e_flow_parse_fdir_filter },
386 { pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
387 { pattern_fdir_ipv4_tcp_ext, i40e_flow_parse_fdir_filter },
388 { pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
389 { pattern_fdir_ipv4_sctp_ext, i40e_flow_parse_fdir_filter },
390 { pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
391 { pattern_fdir_ipv6_ext, i40e_flow_parse_fdir_filter },
392 { pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
393 { pattern_fdir_ipv6_udp_ext, i40e_flow_parse_fdir_filter },
394 { pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
395 { pattern_fdir_ipv6_tcp_ext, i40e_flow_parse_fdir_filter },
396 { pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
397 { pattern_fdir_ipv6_sctp_ext, i40e_flow_parse_fdir_filter },
399 { pattern_vxlan_1, i40e_flow_parse_vxlan_filter },
400 { pattern_vxlan_2, i40e_flow_parse_vxlan_filter },
401 { pattern_vxlan_3, i40e_flow_parse_vxlan_filter },
402 { pattern_vxlan_4, i40e_flow_parse_vxlan_filter },
404 { pattern_nvgre_1, i40e_flow_parse_nvgre_filter },
405 { pattern_nvgre_2, i40e_flow_parse_nvgre_filter },
406 { pattern_nvgre_3, i40e_flow_parse_nvgre_filter },
407 { pattern_nvgre_4, i40e_flow_parse_nvgre_filter },
408 /* MPLSoUDP & MPLSoGRE */
409 { pattern_mpls_1, i40e_flow_parse_mpls_filter },
410 { pattern_mpls_2, i40e_flow_parse_mpls_filter },
411 { pattern_mpls_3, i40e_flow_parse_mpls_filter },
412 { pattern_mpls_4, i40e_flow_parse_mpls_filter },
414 { pattern_qinq_1, i40e_flow_parse_qinq_filter },
417 #define NEXT_ITEM_OF_ACTION(act, actions, index) \
419 act = actions + index; \
420 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) { \
422 act = actions + index; \
426 /* Find the first VOID or non-VOID item pointer */
427 static const struct rte_flow_item *
428 i40e_find_first_item(const struct rte_flow_item *item, bool is_void)
432 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
434 is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
436 is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
444 /* Skip all VOID items of the pattern */
446 i40e_pattern_skip_void_item(struct rte_flow_item *items,
447 const struct rte_flow_item *pattern)
449 uint32_t cpy_count = 0;
450 const struct rte_flow_item *pb = pattern, *pe = pattern;
453 /* Find a non-void item first */
454 pb = i40e_find_first_item(pb, false);
455 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
460 /* Find a void item */
461 pe = i40e_find_first_item(pb + 1, true);
464 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
468 if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
475 /* Copy the END item. */
476 rte_memcpy(items, pe, sizeof(struct rte_flow_item));
479 /* Check if the pattern matches a supported item type array */
481 i40e_match_pattern(enum rte_flow_item_type *item_array,
482 struct rte_flow_item *pattern)
484 struct rte_flow_item *item = pattern;
486 while ((*item_array == item->type) &&
487 (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
492 return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
493 item->type == RTE_FLOW_ITEM_TYPE_END);
496 /* Find if there's parse filter function matched */
497 static parse_filter_t
498 i40e_find_parse_filter_func(struct rte_flow_item *pattern)
500 parse_filter_t parse_filter = NULL;
503 for (; i < RTE_DIM(i40e_supported_patterns); i++) {
504 if (i40e_match_pattern(i40e_supported_patterns[i].items,
506 parse_filter = i40e_supported_patterns[i].parse_filter;
514 /* Parse attributes */
516 i40e_flow_parse_attr(const struct rte_flow_attr *attr,
517 struct rte_flow_error *error)
519 /* Must be input direction */
520 if (!attr->ingress) {
521 rte_flow_error_set(error, EINVAL,
522 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
523 attr, "Only support ingress.");
529 rte_flow_error_set(error, EINVAL,
530 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
531 attr, "Not support egress.");
536 if (attr->priority) {
537 rte_flow_error_set(error, EINVAL,
538 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
539 attr, "Not support priority.");
545 rte_flow_error_set(error, EINVAL,
546 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
547 attr, "Not support group.");
555 i40e_get_outer_vlan(struct rte_eth_dev *dev)
557 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
558 int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
568 i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
571 tpid = (reg_r >> I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT) & 0xFFFF;
576 /* 1. Last in item should be NULL as range is not supported.
577 * 2. Supported filter types: MAC_ETHTYPE and ETHTYPE.
578 * 3. SRC mac_addr mask should be 00:00:00:00:00:00.
579 * 4. DST mac_addr mask should be 00:00:00:00:00:00 or
581 * 5. Ether_type mask should be 0xFFFF.
584 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
585 const struct rte_flow_item *pattern,
586 struct rte_flow_error *error,
587 struct rte_eth_ethertype_filter *filter)
589 const struct rte_flow_item *item = pattern;
590 const struct rte_flow_item_eth *eth_spec;
591 const struct rte_flow_item_eth *eth_mask;
592 enum rte_flow_item_type item_type;
595 outer_tpid = i40e_get_outer_vlan(dev);
597 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
599 rte_flow_error_set(error, EINVAL,
600 RTE_FLOW_ERROR_TYPE_ITEM,
602 "Not support range");
605 item_type = item->type;
607 case RTE_FLOW_ITEM_TYPE_ETH:
608 eth_spec = (const struct rte_flow_item_eth *)item->spec;
609 eth_mask = (const struct rte_flow_item_eth *)item->mask;
610 /* Get the MAC info. */
611 if (!eth_spec || !eth_mask) {
612 rte_flow_error_set(error, EINVAL,
613 RTE_FLOW_ERROR_TYPE_ITEM,
615 "NULL ETH spec/mask");
619 /* Mask bits of source MAC address must be full of 0.
620 * Mask bits of destination MAC address must be full
623 if (!is_zero_ether_addr(ð_mask->src) ||
624 (!is_zero_ether_addr(ð_mask->dst) &&
625 !is_broadcast_ether_addr(ð_mask->dst))) {
626 rte_flow_error_set(error, EINVAL,
627 RTE_FLOW_ERROR_TYPE_ITEM,
629 "Invalid MAC_addr mask");
633 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
634 rte_flow_error_set(error, EINVAL,
635 RTE_FLOW_ERROR_TYPE_ITEM,
637 "Invalid ethertype mask");
641 /* If mask bits of destination MAC address
642 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
644 if (is_broadcast_ether_addr(ð_mask->dst)) {
645 filter->mac_addr = eth_spec->dst;
646 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
648 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
650 filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
652 if (filter->ether_type == ETHER_TYPE_IPv4 ||
653 filter->ether_type == ETHER_TYPE_IPv6 ||
654 filter->ether_type == ETHER_TYPE_LLDP ||
655 filter->ether_type == outer_tpid) {
656 rte_flow_error_set(error, EINVAL,
657 RTE_FLOW_ERROR_TYPE_ITEM,
659 "Unsupported ether_type in"
660 " control packet filter.");
672 /* Ethertype action only supports QUEUE or DROP. */
674 i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
675 const struct rte_flow_action *actions,
676 struct rte_flow_error *error,
677 struct rte_eth_ethertype_filter *filter)
679 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
680 const struct rte_flow_action *act;
681 const struct rte_flow_action_queue *act_q;
684 /* Check if the first non-void action is QUEUE or DROP. */
685 NEXT_ITEM_OF_ACTION(act, actions, index);
686 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
687 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
688 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
689 act, "Not supported action.");
693 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
694 act_q = (const struct rte_flow_action_queue *)act->conf;
695 filter->queue = act_q->index;
696 if (filter->queue >= pf->dev_data->nb_rx_queues) {
697 rte_flow_error_set(error, EINVAL,
698 RTE_FLOW_ERROR_TYPE_ACTION,
699 act, "Invalid queue ID for"
700 " ethertype_filter.");
704 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
707 /* Check if the next non-void item is END */
709 NEXT_ITEM_OF_ACTION(act, actions, index);
710 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
711 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
712 act, "Not supported action.");
720 i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
721 const struct rte_flow_attr *attr,
722 const struct rte_flow_item pattern[],
723 const struct rte_flow_action actions[],
724 struct rte_flow_error *error,
725 union i40e_filter_t *filter)
727 struct rte_eth_ethertype_filter *ethertype_filter =
728 &filter->ethertype_filter;
731 ret = i40e_flow_parse_ethertype_pattern(dev, pattern, error,
736 ret = i40e_flow_parse_ethertype_action(dev, actions, error,
741 ret = i40e_flow_parse_attr(attr, error);
745 cons_filter_type = RTE_ETH_FILTER_ETHERTYPE;
751 i40e_flow_check_raw_item(const struct rte_flow_item *item,
752 const struct rte_flow_item_raw *raw_spec,
753 struct rte_flow_error *error)
755 if (!raw_spec->relative) {
756 rte_flow_error_set(error, EINVAL,
757 RTE_FLOW_ERROR_TYPE_ITEM,
759 "Relative should be 1.");
763 if (raw_spec->offset % sizeof(uint16_t)) {
764 rte_flow_error_set(error, EINVAL,
765 RTE_FLOW_ERROR_TYPE_ITEM,
767 "Offset should be even.");
771 if (raw_spec->search || raw_spec->limit) {
772 rte_flow_error_set(error, EINVAL,
773 RTE_FLOW_ERROR_TYPE_ITEM,
775 "search or limit is not supported.");
779 if (raw_spec->offset < 0) {
780 rte_flow_error_set(error, EINVAL,
781 RTE_FLOW_ERROR_TYPE_ITEM,
783 "Offset should be non-negative.");
790 i40e_flow_store_flex_pit(struct i40e_pf *pf,
791 struct i40e_fdir_flex_pit *flex_pit,
792 enum i40e_flxpld_layer_idx layer_idx,
797 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + raw_id;
798 /* Check if the configuration is conflicted */
799 if (pf->fdir.flex_pit_flag[layer_idx] &&
800 (pf->fdir.flex_set[field_idx].src_offset != flex_pit->src_offset ||
801 pf->fdir.flex_set[field_idx].size != flex_pit->size ||
802 pf->fdir.flex_set[field_idx].dst_offset != flex_pit->dst_offset))
805 /* Check if the configuration exists. */
806 if (pf->fdir.flex_pit_flag[layer_idx] &&
807 (pf->fdir.flex_set[field_idx].src_offset == flex_pit->src_offset &&
808 pf->fdir.flex_set[field_idx].size == flex_pit->size &&
809 pf->fdir.flex_set[field_idx].dst_offset == flex_pit->dst_offset))
812 pf->fdir.flex_set[field_idx].src_offset =
813 flex_pit->src_offset;
814 pf->fdir.flex_set[field_idx].size =
816 pf->fdir.flex_set[field_idx].dst_offset =
817 flex_pit->dst_offset;
823 i40e_flow_store_flex_mask(struct i40e_pf *pf,
824 enum i40e_filter_pctype pctype,
827 struct i40e_fdir_flex_mask flex_mask;
829 uint8_t i, nb_bitmask = 0;
831 memset(&flex_mask, 0, sizeof(struct i40e_fdir_flex_mask));
832 for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i += sizeof(uint16_t)) {
833 mask_tmp = I40E_WORD(mask[i], mask[i + 1]);
835 flex_mask.word_mask |=
836 I40E_FLEX_WORD_MASK(i / sizeof(uint16_t));
837 if (mask_tmp != UINT16_MAX) {
838 flex_mask.bitmask[nb_bitmask].mask = ~mask_tmp;
839 flex_mask.bitmask[nb_bitmask].offset =
840 i / sizeof(uint16_t);
842 if (nb_bitmask > I40E_FDIR_BITMASK_NUM_WORD)
847 flex_mask.nb_bitmask = nb_bitmask;
849 if (pf->fdir.flex_mask_flag[pctype] &&
850 (memcmp(&flex_mask, &pf->fdir.flex_mask[pctype],
851 sizeof(struct i40e_fdir_flex_mask))))
853 else if (pf->fdir.flex_mask_flag[pctype] &&
854 !(memcmp(&flex_mask, &pf->fdir.flex_mask[pctype],
855 sizeof(struct i40e_fdir_flex_mask))))
858 memcpy(&pf->fdir.flex_mask[pctype], &flex_mask,
859 sizeof(struct i40e_fdir_flex_mask));
864 i40e_flow_set_fdir_flex_pit(struct i40e_pf *pf,
865 enum i40e_flxpld_layer_idx layer_idx,
868 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
871 uint16_t min_next_off = 0; /* in words */
875 for (i = 0; i < raw_id; i++) {
876 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
877 flx_pit = MK_FLX_PIT(pf->fdir.flex_set[field_idx].src_offset,
878 pf->fdir.flex_set[field_idx].size,
879 pf->fdir.flex_set[field_idx].dst_offset);
881 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
882 min_next_off = pf->fdir.flex_set[field_idx].src_offset +
883 pf->fdir.flex_set[field_idx].size;
886 for (; i < I40E_MAX_FLXPLD_FIED; i++) {
887 /* set the non-used register obeying register's constrain */
888 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
889 flx_pit = MK_FLX_PIT(min_next_off, NONUSE_FLX_PIT_FSIZE,
890 NONUSE_FLX_PIT_DEST_OFF);
891 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
895 pf->fdir.flex_pit_flag[layer_idx] = 1;
899 i40e_flow_set_fdir_flex_msk(struct i40e_pf *pf,
900 enum i40e_filter_pctype pctype)
902 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
903 struct i40e_fdir_flex_mask *flex_mask;
904 uint32_t flxinset, fd_mask;
908 flex_mask = &pf->fdir.flex_mask[pctype];
909 flxinset = (flex_mask->word_mask <<
910 I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) &
911 I40E_PRTQF_FD_FLXINSET_INSET_MASK;
912 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), flxinset);
914 for (i = 0; i < flex_mask->nb_bitmask; i++) {
915 fd_mask = (flex_mask->bitmask[i].mask <<
916 I40E_PRTQF_FD_MSK_MASK_SHIFT) &
917 I40E_PRTQF_FD_MSK_MASK_MASK;
918 fd_mask |= ((flex_mask->bitmask[i].offset +
919 I40E_FLX_OFFSET_IN_FIELD_VECTOR) <<
920 I40E_PRTQF_FD_MSK_OFFSET_SHIFT) &
921 I40E_PRTQF_FD_MSK_OFFSET_MASK;
922 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), fd_mask);
925 pf->fdir.flex_mask_flag[pctype] = 1;
928 /* 1. Last in item should be NULL as range is not supported.
929 * 2. Supported patterns: refer to array i40e_supported_patterns.
930 * 3. Supported flow type and input set: refer to array
931 * default_inset_table in i40e_ethdev.c.
932 * 4. Mask of fields which need to be matched should be
934 * 5. Mask of fields which needn't to be matched should be
938 i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
939 const struct rte_flow_item *pattern,
940 struct rte_flow_error *error,
941 struct rte_eth_fdir_filter *filter)
943 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
944 const struct rte_flow_item *item = pattern;
945 const struct rte_flow_item_eth *eth_spec, *eth_mask;
946 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
947 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
948 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
949 const struct rte_flow_item_udp *udp_spec, *udp_mask;
950 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
951 const struct rte_flow_item_raw *raw_spec, *raw_mask;
952 const struct rte_flow_item_vf *vf_spec;
954 uint32_t flow_type = RTE_ETH_FLOW_UNKNOWN;
955 enum i40e_filter_pctype pctype;
956 uint64_t input_set = I40E_INSET_NONE;
957 uint16_t flag_offset;
958 enum rte_flow_item_type item_type;
959 enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
961 enum i40e_flxpld_layer_idx layer_idx = I40E_FLXPLD_L2_IDX;
963 int32_t off_arr[I40E_MAX_FLXPLD_FIED];
964 uint16_t len_arr[I40E_MAX_FLXPLD_FIED];
965 struct i40e_fdir_flex_pit flex_pit;
966 uint8_t next_dst_off = 0;
967 uint8_t flex_mask[I40E_FDIR_MAX_FLEX_LEN];
969 bool cfg_flex_pit = true;
970 bool cfg_flex_msk = true;
973 memset(off_arr, 0, I40E_MAX_FLXPLD_FIED);
974 memset(len_arr, 0, I40E_MAX_FLXPLD_FIED);
975 memset(flex_mask, 0, I40E_FDIR_MAX_FLEX_LEN);
976 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
978 rte_flow_error_set(error, EINVAL,
979 RTE_FLOW_ERROR_TYPE_ITEM,
981 "Not support range");
984 item_type = item->type;
986 case RTE_FLOW_ITEM_TYPE_ETH:
987 eth_spec = (const struct rte_flow_item_eth *)item->spec;
988 eth_mask = (const struct rte_flow_item_eth *)item->mask;
989 if (eth_spec || eth_mask) {
990 rte_flow_error_set(error, EINVAL,
991 RTE_FLOW_ERROR_TYPE_ITEM,
993 "Invalid ETH spec/mask");
997 layer_idx = I40E_FLXPLD_L2_IDX;
1000 case RTE_FLOW_ITEM_TYPE_IPV4:
1001 l3 = RTE_FLOW_ITEM_TYPE_IPV4;
1003 (const struct rte_flow_item_ipv4 *)item->spec;
1005 (const struct rte_flow_item_ipv4 *)item->mask;
1006 if (!ipv4_spec || !ipv4_mask) {
1007 rte_flow_error_set(error, EINVAL,
1008 RTE_FLOW_ERROR_TYPE_ITEM,
1010 "NULL IPv4 spec/mask");
1014 /* Check IPv4 mask and update input set */
1015 if (ipv4_mask->hdr.version_ihl ||
1016 ipv4_mask->hdr.total_length ||
1017 ipv4_mask->hdr.packet_id ||
1018 ipv4_mask->hdr.fragment_offset ||
1019 ipv4_mask->hdr.hdr_checksum) {
1020 rte_flow_error_set(error, EINVAL,
1021 RTE_FLOW_ERROR_TYPE_ITEM,
1023 "Invalid IPv4 mask.");
1027 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
1028 input_set |= I40E_INSET_IPV4_SRC;
1029 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
1030 input_set |= I40E_INSET_IPV4_DST;
1031 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
1032 input_set |= I40E_INSET_IPV4_TOS;
1033 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
1034 input_set |= I40E_INSET_IPV4_TTL;
1035 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
1036 input_set |= I40E_INSET_IPV4_PROTO;
1038 /* Get filter info */
1039 flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER;
1040 /* Check if it is fragment. */
1042 rte_be_to_cpu_16(ipv4_spec->hdr.fragment_offset);
1043 if (flag_offset & IPV4_HDR_OFFSET_MASK ||
1044 flag_offset & IPV4_HDR_MF_FLAG)
1045 flow_type = RTE_ETH_FLOW_FRAG_IPV4;
1047 /* Get the filter info */
1048 filter->input.flow.ip4_flow.proto =
1049 ipv4_spec->hdr.next_proto_id;
1050 filter->input.flow.ip4_flow.tos =
1051 ipv4_spec->hdr.type_of_service;
1052 filter->input.flow.ip4_flow.ttl =
1053 ipv4_spec->hdr.time_to_live;
1054 filter->input.flow.ip4_flow.src_ip =
1055 ipv4_spec->hdr.src_addr;
1056 filter->input.flow.ip4_flow.dst_ip =
1057 ipv4_spec->hdr.dst_addr;
1059 layer_idx = I40E_FLXPLD_L3_IDX;
1062 case RTE_FLOW_ITEM_TYPE_IPV6:
1063 l3 = RTE_FLOW_ITEM_TYPE_IPV6;
1065 (const struct rte_flow_item_ipv6 *)item->spec;
1067 (const struct rte_flow_item_ipv6 *)item->mask;
1068 if (!ipv6_spec || !ipv6_mask) {
1069 rte_flow_error_set(error, EINVAL,
1070 RTE_FLOW_ERROR_TYPE_ITEM,
1072 "NULL IPv6 spec/mask");
1076 /* Check IPv6 mask and update input set */
1077 if (ipv6_mask->hdr.payload_len) {
1078 rte_flow_error_set(error, EINVAL,
1079 RTE_FLOW_ERROR_TYPE_ITEM,
1081 "Invalid IPv6 mask");
1085 /* SCR and DST address of IPv6 shouldn't be masked */
1086 for (j = 0; j < RTE_DIM(ipv6_mask->hdr.src_addr); j++) {
1087 if (ipv6_mask->hdr.src_addr[j] != UINT8_MAX ||
1088 ipv6_mask->hdr.dst_addr[j] != UINT8_MAX) {
1089 rte_flow_error_set(error, EINVAL,
1090 RTE_FLOW_ERROR_TYPE_ITEM,
1092 "Invalid IPv6 mask");
1097 input_set |= I40E_INSET_IPV6_SRC;
1098 input_set |= I40E_INSET_IPV6_DST;
1100 if ((ipv6_mask->hdr.vtc_flow &
1101 rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
1102 == rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
1103 input_set |= I40E_INSET_IPV6_TC;
1104 if (ipv6_mask->hdr.proto == UINT8_MAX)
1105 input_set |= I40E_INSET_IPV6_NEXT_HDR;
1106 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
1107 input_set |= I40E_INSET_IPV6_HOP_LIMIT;
1109 /* Get filter info */
1110 filter->input.flow.ipv6_flow.tc =
1111 (uint8_t)(ipv6_spec->hdr.vtc_flow <<
1112 I40E_IPV4_TC_SHIFT);
1113 filter->input.flow.ipv6_flow.proto =
1114 ipv6_spec->hdr.proto;
1115 filter->input.flow.ipv6_flow.hop_limits =
1116 ipv6_spec->hdr.hop_limits;
1118 rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
1119 ipv6_spec->hdr.src_addr, 16);
1120 rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
1121 ipv6_spec->hdr.dst_addr, 16);
1123 /* Check if it is fragment. */
1124 if (ipv6_spec->hdr.proto == I40E_IPV6_FRAG_HEADER)
1125 flow_type = RTE_ETH_FLOW_FRAG_IPV6;
1127 flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
1129 layer_idx = I40E_FLXPLD_L3_IDX;
1132 case RTE_FLOW_ITEM_TYPE_TCP:
1133 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1134 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1135 if (!tcp_spec || !tcp_mask) {
1136 rte_flow_error_set(error, EINVAL,
1137 RTE_FLOW_ERROR_TYPE_ITEM,
1139 "NULL TCP spec/mask");
1143 /* Check TCP mask and update input set */
1144 if (tcp_mask->hdr.sent_seq ||
1145 tcp_mask->hdr.recv_ack ||
1146 tcp_mask->hdr.data_off ||
1147 tcp_mask->hdr.tcp_flags ||
1148 tcp_mask->hdr.rx_win ||
1149 tcp_mask->hdr.cksum ||
1150 tcp_mask->hdr.tcp_urp) {
1151 rte_flow_error_set(error, EINVAL,
1152 RTE_FLOW_ERROR_TYPE_ITEM,
1154 "Invalid TCP mask");
1158 if (tcp_mask->hdr.src_port != UINT16_MAX ||
1159 tcp_mask->hdr.dst_port != UINT16_MAX) {
1160 rte_flow_error_set(error, EINVAL,
1161 RTE_FLOW_ERROR_TYPE_ITEM,
1163 "Invalid TCP mask");
1167 input_set |= I40E_INSET_SRC_PORT;
1168 input_set |= I40E_INSET_DST_PORT;
1170 /* Get filter info */
1171 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1172 flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
1173 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1174 flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_TCP;
1176 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1177 filter->input.flow.tcp4_flow.src_port =
1178 tcp_spec->hdr.src_port;
1179 filter->input.flow.tcp4_flow.dst_port =
1180 tcp_spec->hdr.dst_port;
1181 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1182 filter->input.flow.tcp6_flow.src_port =
1183 tcp_spec->hdr.src_port;
1184 filter->input.flow.tcp6_flow.dst_port =
1185 tcp_spec->hdr.dst_port;
1188 layer_idx = I40E_FLXPLD_L4_IDX;
1191 case RTE_FLOW_ITEM_TYPE_UDP:
1192 udp_spec = (const struct rte_flow_item_udp *)item->spec;
1193 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1194 if (!udp_spec || !udp_mask) {
1195 rte_flow_error_set(error, EINVAL,
1196 RTE_FLOW_ERROR_TYPE_ITEM,
1198 "NULL UDP spec/mask");
1202 /* Check UDP mask and update input set*/
1203 if (udp_mask->hdr.dgram_len ||
1204 udp_mask->hdr.dgram_cksum) {
1205 rte_flow_error_set(error, EINVAL,
1206 RTE_FLOW_ERROR_TYPE_ITEM,
1208 "Invalid UDP mask");
1212 if (udp_mask->hdr.src_port != UINT16_MAX ||
1213 udp_mask->hdr.dst_port != UINT16_MAX) {
1214 rte_flow_error_set(error, EINVAL,
1215 RTE_FLOW_ERROR_TYPE_ITEM,
1217 "Invalid UDP mask");
1221 input_set |= I40E_INSET_SRC_PORT;
1222 input_set |= I40E_INSET_DST_PORT;
1224 /* Get filter info */
1225 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1227 RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
1228 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1230 RTE_ETH_FLOW_NONFRAG_IPV6_UDP;
1232 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1233 filter->input.flow.udp4_flow.src_port =
1234 udp_spec->hdr.src_port;
1235 filter->input.flow.udp4_flow.dst_port =
1236 udp_spec->hdr.dst_port;
1237 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1238 filter->input.flow.udp6_flow.src_port =
1239 udp_spec->hdr.src_port;
1240 filter->input.flow.udp6_flow.dst_port =
1241 udp_spec->hdr.dst_port;
1244 layer_idx = I40E_FLXPLD_L4_IDX;
1247 case RTE_FLOW_ITEM_TYPE_SCTP:
1249 (const struct rte_flow_item_sctp *)item->spec;
1251 (const struct rte_flow_item_sctp *)item->mask;
1252 if (!sctp_spec || !sctp_mask) {
1253 rte_flow_error_set(error, EINVAL,
1254 RTE_FLOW_ERROR_TYPE_ITEM,
1256 "NULL SCTP spec/mask");
1260 /* Check SCTP mask and update input set */
1261 if (sctp_mask->hdr.cksum) {
1262 rte_flow_error_set(error, EINVAL,
1263 RTE_FLOW_ERROR_TYPE_ITEM,
1265 "Invalid UDP mask");
1269 if (sctp_mask->hdr.src_port != UINT16_MAX ||
1270 sctp_mask->hdr.dst_port != UINT16_MAX ||
1271 sctp_mask->hdr.tag != UINT32_MAX) {
1272 rte_flow_error_set(error, EINVAL,
1273 RTE_FLOW_ERROR_TYPE_ITEM,
1275 "Invalid UDP mask");
1278 input_set |= I40E_INSET_SRC_PORT;
1279 input_set |= I40E_INSET_DST_PORT;
1280 input_set |= I40E_INSET_SCTP_VT;
1282 /* Get filter info */
1283 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1284 flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_SCTP;
1285 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1286 flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_SCTP;
1288 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1289 filter->input.flow.sctp4_flow.src_port =
1290 sctp_spec->hdr.src_port;
1291 filter->input.flow.sctp4_flow.dst_port =
1292 sctp_spec->hdr.dst_port;
1293 filter->input.flow.sctp4_flow.verify_tag =
1295 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1296 filter->input.flow.sctp6_flow.src_port =
1297 sctp_spec->hdr.src_port;
1298 filter->input.flow.sctp6_flow.dst_port =
1299 sctp_spec->hdr.dst_port;
1300 filter->input.flow.sctp6_flow.verify_tag =
1304 layer_idx = I40E_FLXPLD_L4_IDX;
1307 case RTE_FLOW_ITEM_TYPE_RAW:
1308 raw_spec = (const struct rte_flow_item_raw *)item->spec;
1309 raw_mask = (const struct rte_flow_item_raw *)item->mask;
1311 if (!raw_spec || !raw_mask) {
1312 rte_flow_error_set(error, EINVAL,
1313 RTE_FLOW_ERROR_TYPE_ITEM,
1315 "NULL RAW spec/mask");
1319 ret = i40e_flow_check_raw_item(item, raw_spec, error);
1323 off_arr[raw_id] = raw_spec->offset;
1324 len_arr[raw_id] = raw_spec->length;
1327 memset(&flex_pit, 0, sizeof(struct i40e_fdir_flex_pit));
1329 raw_spec->length / sizeof(uint16_t);
1330 flex_pit.dst_offset =
1331 next_dst_off / sizeof(uint16_t);
1333 for (i = 0; i <= raw_id; i++) {
1335 flex_pit.src_offset +=
1339 flex_pit.src_offset +=
1340 (off_arr[i] + len_arr[i]) /
1342 flex_size += len_arr[i];
1344 if (((flex_pit.src_offset + flex_pit.size) >=
1345 I40E_MAX_FLX_SOURCE_OFF / sizeof(uint16_t)) ||
1346 flex_size > I40E_FDIR_MAX_FLEXLEN) {
1347 rte_flow_error_set(error, EINVAL,
1348 RTE_FLOW_ERROR_TYPE_ITEM,
1350 "Exceeds maxmial payload limit.");
1354 /* Store flex pit to SW */
1355 ret = i40e_flow_store_flex_pit(pf, &flex_pit,
1358 rte_flow_error_set(error, EINVAL,
1359 RTE_FLOW_ERROR_TYPE_ITEM,
1361 "Conflict with the first flexible rule.");
1364 cfg_flex_pit = false;
1366 for (i = 0; i < raw_spec->length; i++) {
1367 j = i + next_dst_off;
1368 filter->input.flow_ext.flexbytes[j] =
1369 raw_spec->pattern[i];
1370 flex_mask[j] = raw_mask->pattern[i];
1373 next_dst_off += raw_spec->length;
1376 case RTE_FLOW_ITEM_TYPE_VF:
1377 vf_spec = (const struct rte_flow_item_vf *)item->spec;
1378 filter->input.flow_ext.is_vf = 1;
1379 filter->input.flow_ext.dst_id = vf_spec->id;
1380 if (filter->input.flow_ext.is_vf &&
1381 filter->input.flow_ext.dst_id >= pf->vf_num) {
1382 rte_flow_error_set(error, EINVAL,
1383 RTE_FLOW_ERROR_TYPE_ITEM,
1385 "Invalid VF ID for FDIR.");
1394 pctype = i40e_flowtype_to_pctype(flow_type);
1395 if (pctype == 0 || pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) {
1396 rte_flow_error_set(error, EINVAL,
1397 RTE_FLOW_ERROR_TYPE_ITEM, item,
1398 "Unsupported flow type");
1402 if (input_set != i40e_get_default_input_set(pctype)) {
1403 rte_flow_error_set(error, EINVAL,
1404 RTE_FLOW_ERROR_TYPE_ITEM, item,
1405 "Invalid input set.");
1408 filter->input.flow_type = flow_type;
1410 /* Store flex mask to SW */
1411 ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
1413 rte_flow_error_set(error, EINVAL,
1414 RTE_FLOW_ERROR_TYPE_ITEM,
1416 "Exceed maximal number of bitmasks");
1418 } else if (ret == -2) {
1419 rte_flow_error_set(error, EINVAL,
1420 RTE_FLOW_ERROR_TYPE_ITEM,
1422 "Conflict with the first flexible rule");
1425 cfg_flex_msk = false;
1428 i40e_flow_set_fdir_flex_pit(pf, layer_idx, raw_id);
1431 i40e_flow_set_fdir_flex_msk(pf, pctype);
1436 /* Parse to get the action info of a FDIR filter.
1437 * FDIR action supports QUEUE or (QUEUE + MARK).
1440 i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
1441 const struct rte_flow_action *actions,
1442 struct rte_flow_error *error,
1443 struct rte_eth_fdir_filter *filter)
1445 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1446 const struct rte_flow_action *act;
1447 const struct rte_flow_action_queue *act_q;
1448 const struct rte_flow_action_mark *mark_spec;
1451 /* Check if the first non-void action is QUEUE or DROP or PASSTHRU. */
1452 NEXT_ITEM_OF_ACTION(act, actions, index);
1453 switch (act->type) {
1454 case RTE_FLOW_ACTION_TYPE_QUEUE:
1455 act_q = (const struct rte_flow_action_queue *)act->conf;
1456 filter->action.rx_queue = act_q->index;
1457 if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
1458 rte_flow_error_set(error, EINVAL,
1459 RTE_FLOW_ERROR_TYPE_ACTION, act,
1460 "Invalid queue ID for FDIR.");
1463 filter->action.behavior = RTE_ETH_FDIR_ACCEPT;
1465 case RTE_FLOW_ACTION_TYPE_DROP:
1466 filter->action.behavior = RTE_ETH_FDIR_REJECT;
1468 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
1469 filter->action.behavior = RTE_ETH_FDIR_PASSTHRU;
1472 rte_flow_error_set(error, EINVAL,
1473 RTE_FLOW_ERROR_TYPE_ACTION, act,
1478 /* Check if the next non-void item is MARK or FLAG or END. */
1480 NEXT_ITEM_OF_ACTION(act, actions, index);
1481 switch (act->type) {
1482 case RTE_FLOW_ACTION_TYPE_MARK:
1483 mark_spec = (const struct rte_flow_action_mark *)act->conf;
1484 filter->action.report_status = RTE_ETH_FDIR_REPORT_ID;
1485 filter->soft_id = mark_spec->id;
1487 case RTE_FLOW_ACTION_TYPE_FLAG:
1488 filter->action.report_status = RTE_ETH_FDIR_NO_REPORT_STATUS;
1490 case RTE_FLOW_ACTION_TYPE_END:
1493 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1494 act, "Invalid action.");
1498 /* Check if the next non-void item is END */
1500 NEXT_ITEM_OF_ACTION(act, actions, index);
1501 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1502 rte_flow_error_set(error, EINVAL,
1503 RTE_FLOW_ERROR_TYPE_ACTION,
1504 act, "Invalid action.");
1512 i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
1513 const struct rte_flow_attr *attr,
1514 const struct rte_flow_item pattern[],
1515 const struct rte_flow_action actions[],
1516 struct rte_flow_error *error,
1517 union i40e_filter_t *filter)
1519 struct rte_eth_fdir_filter *fdir_filter =
1520 &filter->fdir_filter;
1523 ret = i40e_flow_parse_fdir_pattern(dev, pattern, error, fdir_filter);
1527 ret = i40e_flow_parse_fdir_action(dev, actions, error, fdir_filter);
1531 ret = i40e_flow_parse_attr(attr, error);
1535 cons_filter_type = RTE_ETH_FILTER_FDIR;
1537 if (dev->data->dev_conf.fdir_conf.mode !=
1538 RTE_FDIR_MODE_PERFECT) {
1539 rte_flow_error_set(error, ENOTSUP,
1540 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1542 "Check the mode in fdir_conf.");
1549 /* Parse to get the action info of a tunnel filter
1550 * Tunnel action only supports PF, VF and QUEUE.
1553 i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
1554 const struct rte_flow_action *actions,
1555 struct rte_flow_error *error,
1556 struct i40e_tunnel_filter_conf *filter)
1558 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1559 const struct rte_flow_action *act;
1560 const struct rte_flow_action_queue *act_q;
1561 const struct rte_flow_action_vf *act_vf;
1564 /* Check if the first non-void action is PF or VF. */
1565 NEXT_ITEM_OF_ACTION(act, actions, index);
1566 if (act->type != RTE_FLOW_ACTION_TYPE_PF &&
1567 act->type != RTE_FLOW_ACTION_TYPE_VF) {
1568 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1569 act, "Not supported action.");
1573 if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
1574 act_vf = (const struct rte_flow_action_vf *)act->conf;
1575 filter->vf_id = act_vf->id;
1576 filter->is_to_vf = 1;
1577 if (filter->vf_id >= pf->vf_num) {
1578 rte_flow_error_set(error, EINVAL,
1579 RTE_FLOW_ERROR_TYPE_ACTION,
1580 act, "Invalid VF ID for tunnel filter");
1585 /* Check if the next non-void item is QUEUE */
1587 NEXT_ITEM_OF_ACTION(act, actions, index);
1588 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1589 act_q = (const struct rte_flow_action_queue *)act->conf;
1590 filter->queue_id = act_q->index;
1591 if ((!filter->is_to_vf) &&
1592 (filter->queue_id >= pf->dev_data->nb_rx_queues)) {
1593 rte_flow_error_set(error, EINVAL,
1594 RTE_FLOW_ERROR_TYPE_ACTION,
1595 act, "Invalid queue ID for tunnel filter");
1597 } else if (filter->is_to_vf &&
1598 (filter->queue_id >= pf->vf_nb_qps)) {
1599 rte_flow_error_set(error, EINVAL,
1600 RTE_FLOW_ERROR_TYPE_ACTION,
1601 act, "Invalid queue ID for tunnel filter");
1606 /* Check if the next non-void item is END */
1608 NEXT_ITEM_OF_ACTION(act, actions, index);
1609 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1610 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1611 act, "Not supported action.");
1618 static uint16_t i40e_supported_tunnel_filter_types[] = {
1619 ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID |
1620 ETH_TUNNEL_FILTER_IVLAN,
1621 ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
1622 ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID,
1623 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID |
1624 ETH_TUNNEL_FILTER_IMAC,
1625 ETH_TUNNEL_FILTER_IMAC,
1629 i40e_check_tunnel_filter_type(uint8_t filter_type)
1633 for (i = 0; i < RTE_DIM(i40e_supported_tunnel_filter_types); i++) {
1634 if (filter_type == i40e_supported_tunnel_filter_types[i])
1641 /* 1. Last in item should be NULL as range is not supported.
1642 * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
1643 * IMAC_TENID, OMAC_TENID_IMAC and IMAC.
1644 * 3. Mask of fields which need to be matched should be
1646 * 4. Mask of fields which needn't to be matched should be
1650 i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
1651 const struct rte_flow_item *pattern,
1652 struct rte_flow_error *error,
1653 struct i40e_tunnel_filter_conf *filter)
1655 const struct rte_flow_item *item = pattern;
1656 const struct rte_flow_item_eth *eth_spec;
1657 const struct rte_flow_item_eth *eth_mask;
1658 const struct rte_flow_item_vxlan *vxlan_spec;
1659 const struct rte_flow_item_vxlan *vxlan_mask;
1660 const struct rte_flow_item_vlan *vlan_spec;
1661 const struct rte_flow_item_vlan *vlan_mask;
1662 uint8_t filter_type = 0;
1663 bool is_vni_masked = 0;
1664 uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
1665 enum rte_flow_item_type item_type;
1666 bool vxlan_flag = 0;
1667 uint32_t tenant_id_be = 0;
1670 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1672 rte_flow_error_set(error, EINVAL,
1673 RTE_FLOW_ERROR_TYPE_ITEM,
1675 "Not support range");
1678 item_type = item->type;
1679 switch (item_type) {
1680 case RTE_FLOW_ITEM_TYPE_ETH:
1681 eth_spec = (const struct rte_flow_item_eth *)item->spec;
1682 eth_mask = (const struct rte_flow_item_eth *)item->mask;
1684 /* Check if ETH item is used for place holder.
1685 * If yes, both spec and mask should be NULL.
1686 * If no, both spec and mask shouldn't be NULL.
1688 if ((!eth_spec && eth_mask) ||
1689 (eth_spec && !eth_mask)) {
1690 rte_flow_error_set(error, EINVAL,
1691 RTE_FLOW_ERROR_TYPE_ITEM,
1693 "Invalid ether spec/mask");
1697 if (eth_spec && eth_mask) {
1698 /* DST address of inner MAC shouldn't be masked.
1699 * SRC address of Inner MAC should be masked.
1701 if (!is_broadcast_ether_addr(ð_mask->dst) ||
1702 !is_zero_ether_addr(ð_mask->src) ||
1704 rte_flow_error_set(error, EINVAL,
1705 RTE_FLOW_ERROR_TYPE_ITEM,
1707 "Invalid ether spec/mask");
1712 rte_memcpy(&filter->outer_mac,
1715 filter_type |= ETH_TUNNEL_FILTER_OMAC;
1717 rte_memcpy(&filter->inner_mac,
1720 filter_type |= ETH_TUNNEL_FILTER_IMAC;
1724 case RTE_FLOW_ITEM_TYPE_VLAN:
1726 (const struct rte_flow_item_vlan *)item->spec;
1728 (const struct rte_flow_item_vlan *)item->mask;
1729 if (!(vlan_spec && vlan_mask)) {
1730 rte_flow_error_set(error, EINVAL,
1731 RTE_FLOW_ERROR_TYPE_ITEM,
1733 "Invalid vlan item");
1737 if (vlan_spec && vlan_mask) {
1738 if (vlan_mask->tci ==
1739 rte_cpu_to_be_16(I40E_TCI_MASK))
1740 filter->inner_vlan =
1741 rte_be_to_cpu_16(vlan_spec->tci) &
1743 filter_type |= ETH_TUNNEL_FILTER_IVLAN;
1746 case RTE_FLOW_ITEM_TYPE_IPV4:
1747 filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
1748 /* IPv4 is used to describe protocol,
1749 * spec and mask should be NULL.
1751 if (item->spec || item->mask) {
1752 rte_flow_error_set(error, EINVAL,
1753 RTE_FLOW_ERROR_TYPE_ITEM,
1755 "Invalid IPv4 item");
1759 case RTE_FLOW_ITEM_TYPE_IPV6:
1760 filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
1761 /* IPv6 is used to describe protocol,
1762 * spec and mask should be NULL.
1764 if (item->spec || item->mask) {
1765 rte_flow_error_set(error, EINVAL,
1766 RTE_FLOW_ERROR_TYPE_ITEM,
1768 "Invalid IPv6 item");
1772 case RTE_FLOW_ITEM_TYPE_UDP:
1773 /* UDP is used to describe protocol,
1774 * spec and mask should be NULL.
1776 if (item->spec || item->mask) {
1777 rte_flow_error_set(error, EINVAL,
1778 RTE_FLOW_ERROR_TYPE_ITEM,
1780 "Invalid UDP item");
1784 case RTE_FLOW_ITEM_TYPE_VXLAN:
1786 (const struct rte_flow_item_vxlan *)item->spec;
1788 (const struct rte_flow_item_vxlan *)item->mask;
1789 /* Check if VXLAN item is used to describe protocol.
1790 * If yes, both spec and mask should be NULL.
1791 * If no, both spec and mask shouldn't be NULL.
1793 if ((!vxlan_spec && vxlan_mask) ||
1794 (vxlan_spec && !vxlan_mask)) {
1795 rte_flow_error_set(error, EINVAL,
1796 RTE_FLOW_ERROR_TYPE_ITEM,
1798 "Invalid VXLAN item");
1802 /* Check if VNI is masked. */
1803 if (vxlan_spec && vxlan_mask) {
1805 !!memcmp(vxlan_mask->vni, vni_mask,
1807 if (is_vni_masked) {
1808 rte_flow_error_set(error, EINVAL,
1809 RTE_FLOW_ERROR_TYPE_ITEM,
1811 "Invalid VNI mask");
1815 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
1816 vxlan_spec->vni, 3);
1818 rte_be_to_cpu_32(tenant_id_be);
1819 filter_type |= ETH_TUNNEL_FILTER_TENID;
1829 ret = i40e_check_tunnel_filter_type(filter_type);
1831 rte_flow_error_set(error, EINVAL,
1832 RTE_FLOW_ERROR_TYPE_ITEM,
1834 "Invalid filter type");
1837 filter->filter_type = filter_type;
1839 filter->tunnel_type = I40E_TUNNEL_TYPE_VXLAN;
1845 i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
1846 const struct rte_flow_attr *attr,
1847 const struct rte_flow_item pattern[],
1848 const struct rte_flow_action actions[],
1849 struct rte_flow_error *error,
1850 union i40e_filter_t *filter)
1852 struct i40e_tunnel_filter_conf *tunnel_filter =
1853 &filter->consistent_tunnel_filter;
1856 ret = i40e_flow_parse_vxlan_pattern(dev, pattern,
1857 error, tunnel_filter);
1861 ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
1865 ret = i40e_flow_parse_attr(attr, error);
1869 cons_filter_type = RTE_ETH_FILTER_TUNNEL;
1874 /* 1. Last in item should be NULL as range is not supported.
1875 * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
1876 * IMAC_TENID, OMAC_TENID_IMAC and IMAC.
1877 * 3. Mask of fields which need to be matched should be
1879 * 4. Mask of fields which needn't to be matched should be
1883 i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
1884 const struct rte_flow_item *pattern,
1885 struct rte_flow_error *error,
1886 struct i40e_tunnel_filter_conf *filter)
1888 const struct rte_flow_item *item = pattern;
1889 const struct rte_flow_item_eth *eth_spec;
1890 const struct rte_flow_item_eth *eth_mask;
1891 const struct rte_flow_item_nvgre *nvgre_spec;
1892 const struct rte_flow_item_nvgre *nvgre_mask;
1893 const struct rte_flow_item_vlan *vlan_spec;
1894 const struct rte_flow_item_vlan *vlan_mask;
1895 enum rte_flow_item_type item_type;
1896 uint8_t filter_type = 0;
1897 bool is_tni_masked = 0;
1898 uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
1899 bool nvgre_flag = 0;
1900 uint32_t tenant_id_be = 0;
1903 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1905 rte_flow_error_set(error, EINVAL,
1906 RTE_FLOW_ERROR_TYPE_ITEM,
1908 "Not support range");
1911 item_type = item->type;
1912 switch (item_type) {
1913 case RTE_FLOW_ITEM_TYPE_ETH:
1914 eth_spec = (const struct rte_flow_item_eth *)item->spec;
1915 eth_mask = (const struct rte_flow_item_eth *)item->mask;
1917 /* Check if ETH item is used for place holder.
1918 * If yes, both spec and mask should be NULL.
1919 * If no, both spec and mask shouldn't be NULL.
1921 if ((!eth_spec && eth_mask) ||
1922 (eth_spec && !eth_mask)) {
1923 rte_flow_error_set(error, EINVAL,
1924 RTE_FLOW_ERROR_TYPE_ITEM,
1926 "Invalid ether spec/mask");
1930 if (eth_spec && eth_mask) {
1931 /* DST address of inner MAC shouldn't be masked.
1932 * SRC address of Inner MAC should be masked.
1934 if (!is_broadcast_ether_addr(ð_mask->dst) ||
1935 !is_zero_ether_addr(ð_mask->src) ||
1937 rte_flow_error_set(error, EINVAL,
1938 RTE_FLOW_ERROR_TYPE_ITEM,
1940 "Invalid ether spec/mask");
1945 rte_memcpy(&filter->outer_mac,
1948 filter_type |= ETH_TUNNEL_FILTER_OMAC;
1950 rte_memcpy(&filter->inner_mac,
1953 filter_type |= ETH_TUNNEL_FILTER_IMAC;
1958 case RTE_FLOW_ITEM_TYPE_VLAN:
1960 (const struct rte_flow_item_vlan *)item->spec;
1962 (const struct rte_flow_item_vlan *)item->mask;
1963 if (!(vlan_spec && vlan_mask)) {
1964 rte_flow_error_set(error, EINVAL,
1965 RTE_FLOW_ERROR_TYPE_ITEM,
1967 "Invalid vlan item");
1971 if (vlan_spec && vlan_mask) {
1972 if (vlan_mask->tci ==
1973 rte_cpu_to_be_16(I40E_TCI_MASK))
1974 filter->inner_vlan =
1975 rte_be_to_cpu_16(vlan_spec->tci) &
1977 filter_type |= ETH_TUNNEL_FILTER_IVLAN;
1980 case RTE_FLOW_ITEM_TYPE_IPV4:
1981 filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
1982 /* IPv4 is used to describe protocol,
1983 * spec and mask should be NULL.
1985 if (item->spec || item->mask) {
1986 rte_flow_error_set(error, EINVAL,
1987 RTE_FLOW_ERROR_TYPE_ITEM,
1989 "Invalid IPv4 item");
1993 case RTE_FLOW_ITEM_TYPE_IPV6:
1994 filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
1995 /* IPv6 is used to describe protocol,
1996 * spec and mask should be NULL.
1998 if (item->spec || item->mask) {
1999 rte_flow_error_set(error, EINVAL,
2000 RTE_FLOW_ERROR_TYPE_ITEM,
2002 "Invalid IPv6 item");
2006 case RTE_FLOW_ITEM_TYPE_NVGRE:
2008 (const struct rte_flow_item_nvgre *)item->spec;
2010 (const struct rte_flow_item_nvgre *)item->mask;
2011 /* Check if NVGRE item is used to describe protocol.
2012 * If yes, both spec and mask should be NULL.
2013 * If no, both spec and mask shouldn't be NULL.
2015 if ((!nvgre_spec && nvgre_mask) ||
2016 (nvgre_spec && !nvgre_mask)) {
2017 rte_flow_error_set(error, EINVAL,
2018 RTE_FLOW_ERROR_TYPE_ITEM,
2020 "Invalid NVGRE item");
2024 if (nvgre_spec && nvgre_mask) {
2026 !!memcmp(nvgre_mask->tni, tni_mask,
2028 if (is_tni_masked) {
2029 rte_flow_error_set(error, EINVAL,
2030 RTE_FLOW_ERROR_TYPE_ITEM,
2032 "Invalid TNI mask");
2035 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
2036 nvgre_spec->tni, 3);
2038 rte_be_to_cpu_32(tenant_id_be);
2039 filter_type |= ETH_TUNNEL_FILTER_TENID;
2049 ret = i40e_check_tunnel_filter_type(filter_type);
2051 rte_flow_error_set(error, EINVAL,
2052 RTE_FLOW_ERROR_TYPE_ITEM,
2054 "Invalid filter type");
2057 filter->filter_type = filter_type;
2059 filter->tunnel_type = I40E_TUNNEL_TYPE_NVGRE;
2065 i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
2066 const struct rte_flow_attr *attr,
2067 const struct rte_flow_item pattern[],
2068 const struct rte_flow_action actions[],
2069 struct rte_flow_error *error,
2070 union i40e_filter_t *filter)
2072 struct i40e_tunnel_filter_conf *tunnel_filter =
2073 &filter->consistent_tunnel_filter;
2076 ret = i40e_flow_parse_nvgre_pattern(dev, pattern,
2077 error, tunnel_filter);
2081 ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
2085 ret = i40e_flow_parse_attr(attr, error);
2089 cons_filter_type = RTE_ETH_FILTER_TUNNEL;
2094 /* 1. Last in item should be NULL as range is not supported.
2095 * 2. Supported filter types: MPLS label.
2096 * 3. Mask of fields which need to be matched should be
2098 * 4. Mask of fields which needn't to be matched should be
2102 i40e_flow_parse_mpls_pattern(__rte_unused struct rte_eth_dev *dev,
2103 const struct rte_flow_item *pattern,
2104 struct rte_flow_error *error,
2105 struct i40e_tunnel_filter_conf *filter)
2107 const struct rte_flow_item *item = pattern;
2108 const struct rte_flow_item_mpls *mpls_spec;
2109 const struct rte_flow_item_mpls *mpls_mask;
2110 enum rte_flow_item_type item_type;
2111 bool is_mplsoudp = 0; /* 1 - MPLSoUDP, 0 - MPLSoGRE */
2112 const uint8_t label_mask[3] = {0xFF, 0xFF, 0xF0};
2113 uint32_t label_be = 0;
2115 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
2117 rte_flow_error_set(error, EINVAL,
2118 RTE_FLOW_ERROR_TYPE_ITEM,
2120 "Not support range");
2123 item_type = item->type;
2124 switch (item_type) {
2125 case RTE_FLOW_ITEM_TYPE_ETH:
2126 if (item->spec || item->mask) {
2127 rte_flow_error_set(error, EINVAL,
2128 RTE_FLOW_ERROR_TYPE_ITEM,
2130 "Invalid ETH item");
2134 case RTE_FLOW_ITEM_TYPE_IPV4:
2135 filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
2136 /* IPv4 is used to describe protocol,
2137 * spec and mask should be NULL.
2139 if (item->spec || item->mask) {
2140 rte_flow_error_set(error, EINVAL,
2141 RTE_FLOW_ERROR_TYPE_ITEM,
2143 "Invalid IPv4 item");
2147 case RTE_FLOW_ITEM_TYPE_IPV6:
2148 filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
2149 /* IPv6 is used to describe protocol,
2150 * spec and mask should be NULL.
2152 if (item->spec || item->mask) {
2153 rte_flow_error_set(error, EINVAL,
2154 RTE_FLOW_ERROR_TYPE_ITEM,
2156 "Invalid IPv6 item");
2160 case RTE_FLOW_ITEM_TYPE_UDP:
2161 /* UDP is used to describe protocol,
2162 * spec and mask should be NULL.
2164 if (item->spec || item->mask) {
2165 rte_flow_error_set(error, EINVAL,
2166 RTE_FLOW_ERROR_TYPE_ITEM,
2168 "Invalid UDP item");
2173 case RTE_FLOW_ITEM_TYPE_GRE:
2174 /* GRE is used to describe protocol,
2175 * spec and mask should be NULL.
2177 if (item->spec || item->mask) {
2178 rte_flow_error_set(error, EINVAL,
2179 RTE_FLOW_ERROR_TYPE_ITEM,
2181 "Invalid GRE item");
2185 case RTE_FLOW_ITEM_TYPE_MPLS:
2187 (const struct rte_flow_item_mpls *)item->spec;
2189 (const struct rte_flow_item_mpls *)item->mask;
2191 if (!mpls_spec || !mpls_mask) {
2192 rte_flow_error_set(error, EINVAL,
2193 RTE_FLOW_ERROR_TYPE_ITEM,
2195 "Invalid MPLS item");
2199 if (memcmp(mpls_mask->label_tc_s, label_mask, 3)) {
2200 rte_flow_error_set(error, EINVAL,
2201 RTE_FLOW_ERROR_TYPE_ITEM,
2203 "Invalid MPLS label mask");
2206 rte_memcpy(((uint8_t *)&label_be + 1),
2207 mpls_spec->label_tc_s, 3);
2208 filter->tenant_id = rte_be_to_cpu_32(label_be) >> 4;
2216 filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoUDP;
2218 filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoGRE;
2224 i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
2225 const struct rte_flow_attr *attr,
2226 const struct rte_flow_item pattern[],
2227 const struct rte_flow_action actions[],
2228 struct rte_flow_error *error,
2229 union i40e_filter_t *filter)
2231 struct i40e_tunnel_filter_conf *tunnel_filter =
2232 &filter->consistent_tunnel_filter;
2235 ret = i40e_flow_parse_mpls_pattern(dev, pattern,
2236 error, tunnel_filter);
2240 ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
2244 ret = i40e_flow_parse_attr(attr, error);
2248 cons_filter_type = RTE_ETH_FILTER_TUNNEL;
2253 /* 1. Last in item should be NULL as range is not supported.
2254 * 2. Supported filter types: QINQ.
2255 * 3. Mask of fields which need to be matched should be
2257 * 4. Mask of fields which needn't to be matched should be
2261 i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev,
2262 const struct rte_flow_item *pattern,
2263 struct rte_flow_error *error,
2264 struct i40e_tunnel_filter_conf *filter)
2266 const struct rte_flow_item *item = pattern;
2267 const struct rte_flow_item_vlan *vlan_spec = NULL;
2268 const struct rte_flow_item_vlan *vlan_mask = NULL;
2269 const struct rte_flow_item_vlan *i_vlan_spec = NULL;
2270 const struct rte_flow_item_vlan *i_vlan_mask = NULL;
2271 const struct rte_flow_item_vlan *o_vlan_spec = NULL;
2272 const struct rte_flow_item_vlan *o_vlan_mask = NULL;
2274 enum rte_flow_item_type item_type;
2277 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
2279 rte_flow_error_set(error, EINVAL,
2280 RTE_FLOW_ERROR_TYPE_ITEM,
2282 "Not support range");
2285 item_type = item->type;
2286 switch (item_type) {
2287 case RTE_FLOW_ITEM_TYPE_ETH:
2288 if (item->spec || item->mask) {
2289 rte_flow_error_set(error, EINVAL,
2290 RTE_FLOW_ERROR_TYPE_ITEM,
2292 "Invalid ETH item");
2296 case RTE_FLOW_ITEM_TYPE_VLAN:
2298 (const struct rte_flow_item_vlan *)item->spec;
2300 (const struct rte_flow_item_vlan *)item->mask;
2302 if (!(vlan_spec && vlan_mask)) {
2303 rte_flow_error_set(error, EINVAL,
2304 RTE_FLOW_ERROR_TYPE_ITEM,
2306 "Invalid vlan item");
2311 o_vlan_spec = vlan_spec;
2312 o_vlan_mask = vlan_mask;
2315 i_vlan_spec = vlan_spec;
2316 i_vlan_mask = vlan_mask;
2326 /* Get filter specification */
2327 if ((o_vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK)) &&
2328 (i_vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
2329 filter->outer_vlan = rte_be_to_cpu_16(o_vlan_spec->tci)
2331 filter->inner_vlan = rte_be_to_cpu_16(i_vlan_spec->tci)
2334 rte_flow_error_set(error, EINVAL,
2335 RTE_FLOW_ERROR_TYPE_ITEM,
2337 "Invalid filter type");
2341 filter->tunnel_type = I40E_TUNNEL_TYPE_QINQ;
2346 i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
2347 const struct rte_flow_attr *attr,
2348 const struct rte_flow_item pattern[],
2349 const struct rte_flow_action actions[],
2350 struct rte_flow_error *error,
2351 union i40e_filter_t *filter)
2353 struct i40e_tunnel_filter_conf *tunnel_filter =
2354 &filter->consistent_tunnel_filter;
2357 ret = i40e_flow_parse_qinq_pattern(dev, pattern,
2358 error, tunnel_filter);
2362 ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
2366 ret = i40e_flow_parse_attr(attr, error);
2370 cons_filter_type = RTE_ETH_FILTER_TUNNEL;
2376 i40e_flow_validate(struct rte_eth_dev *dev,
2377 const struct rte_flow_attr *attr,
2378 const struct rte_flow_item pattern[],
2379 const struct rte_flow_action actions[],
2380 struct rte_flow_error *error)
2382 struct rte_flow_item *items; /* internal pattern w/o VOID items */
2383 parse_filter_t parse_filter;
2384 uint32_t item_num = 0; /* non-void item number of pattern*/
2389 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2390 NULL, "NULL pattern.");
2395 rte_flow_error_set(error, EINVAL,
2396 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2397 NULL, "NULL action.");
2402 rte_flow_error_set(error, EINVAL,
2403 RTE_FLOW_ERROR_TYPE_ATTR,
2404 NULL, "NULL attribute.");
2408 memset(&cons_filter, 0, sizeof(cons_filter));
2410 /* Get the non-void item number of pattern */
2411 while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
2412 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
2418 items = rte_zmalloc("i40e_pattern",
2419 item_num * sizeof(struct rte_flow_item), 0);
2421 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2422 NULL, "No memory for PMD internal items.");
2426 i40e_pattern_skip_void_item(items, pattern);
2428 /* Find if there's matched parse filter function */
2429 parse_filter = i40e_find_parse_filter_func(items);
2430 if (!parse_filter) {
2431 rte_flow_error_set(error, EINVAL,
2432 RTE_FLOW_ERROR_TYPE_ITEM,
2433 pattern, "Unsupported pattern");
2437 ret = parse_filter(dev, attr, items, actions, error, &cons_filter);
2444 static struct rte_flow *
2445 i40e_flow_create(struct rte_eth_dev *dev,
2446 const struct rte_flow_attr *attr,
2447 const struct rte_flow_item pattern[],
2448 const struct rte_flow_action actions[],
2449 struct rte_flow_error *error)
2451 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2452 struct rte_flow *flow;
2455 flow = rte_zmalloc("i40e_flow", sizeof(struct rte_flow), 0);
2457 rte_flow_error_set(error, ENOMEM,
2458 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2459 "Failed to allocate memory");
2463 ret = i40e_flow_validate(dev, attr, pattern, actions, error);
2467 switch (cons_filter_type) {
2468 case RTE_ETH_FILTER_ETHERTYPE:
2469 ret = i40e_ethertype_filter_set(pf,
2470 &cons_filter.ethertype_filter, 1);
2473 flow->rule = TAILQ_LAST(&pf->ethertype.ethertype_list,
2474 i40e_ethertype_filter_list);
2476 case RTE_ETH_FILTER_FDIR:
2477 ret = i40e_add_del_fdir_filter(dev,
2478 &cons_filter.fdir_filter, 1);
2481 flow->rule = TAILQ_LAST(&pf->fdir.fdir_list,
2482 i40e_fdir_filter_list);
2484 case RTE_ETH_FILTER_TUNNEL:
2485 ret = i40e_dev_consistent_tunnel_filter_set(pf,
2486 &cons_filter.consistent_tunnel_filter, 1);
2489 flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
2490 i40e_tunnel_filter_list);
2496 flow->filter_type = cons_filter_type;
2497 TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
2501 rte_flow_error_set(error, -ret,
2502 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2503 "Failed to create flow.");
2509 i40e_flow_destroy(struct rte_eth_dev *dev,
2510 struct rte_flow *flow,
2511 struct rte_flow_error *error)
2513 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2514 enum rte_filter_type filter_type = flow->filter_type;
2517 switch (filter_type) {
2518 case RTE_ETH_FILTER_ETHERTYPE:
2519 ret = i40e_flow_destroy_ethertype_filter(pf,
2520 (struct i40e_ethertype_filter *)flow->rule);
2522 case RTE_ETH_FILTER_TUNNEL:
2523 ret = i40e_flow_destroy_tunnel_filter(pf,
2524 (struct i40e_tunnel_filter *)flow->rule);
2526 case RTE_ETH_FILTER_FDIR:
2527 ret = i40e_add_del_fdir_filter(dev,
2528 &((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
2531 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
2538 TAILQ_REMOVE(&pf->flow_list, flow, node);
2541 rte_flow_error_set(error, -ret,
2542 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2543 "Failed to destroy flow.");
2549 i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
2550 struct i40e_ethertype_filter *filter)
2552 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2553 struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
2554 struct i40e_ethertype_filter *node;
2555 struct i40e_control_filter_stats stats;
2559 if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
2560 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
2561 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
2562 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
2563 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
2565 memset(&stats, 0, sizeof(stats));
2566 ret = i40e_aq_add_rem_control_packet_filter(hw,
2567 filter->input.mac_addr.addr_bytes,
2568 filter->input.ether_type,
2569 flags, pf->main_vsi->seid,
2570 filter->queue, 0, &stats, NULL);
2574 node = i40e_sw_ethertype_filter_lookup(ethertype_rule, &filter->input);
2578 ret = i40e_sw_ethertype_filter_del(pf, &node->input);
2584 i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
2585 struct i40e_tunnel_filter *filter)
2587 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2588 struct i40e_vsi *vsi;
2589 struct i40e_pf_vf *vf;
2590 struct i40e_aqc_add_rm_cloud_filt_elem_ext cld_filter;
2591 struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
2592 struct i40e_tunnel_filter *node;
2593 bool big_buffer = 0;
2596 memset(&cld_filter, 0, sizeof(cld_filter));
2597 ether_addr_copy((struct ether_addr *)&filter->input.outer_mac,
2598 (struct ether_addr *)&cld_filter.element.outer_mac);
2599 ether_addr_copy((struct ether_addr *)&filter->input.inner_mac,
2600 (struct ether_addr *)&cld_filter.element.inner_mac);
2601 cld_filter.element.inner_vlan = filter->input.inner_vlan;
2602 cld_filter.element.flags = filter->input.flags;
2603 cld_filter.element.tenant_id = filter->input.tenant_id;
2604 cld_filter.element.queue_number = filter->queue;
2605 rte_memcpy(cld_filter.general_fields,
2606 filter->input.general_fields,
2607 sizeof(cld_filter.general_fields));
2609 if (!filter->is_to_vf)
2612 vf = &pf->vfs[filter->vf_id];
2616 if (((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ==
2617 I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ||
2618 ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ==
2619 I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ||
2620 ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ) ==
2621 I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ))
2625 ret = i40e_aq_remove_cloud_filters_big_buffer(hw, vsi->seid,
2628 ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
2629 &cld_filter.element, 1);
2633 node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &filter->input);
2637 ret = i40e_sw_tunnel_filter_del(pf, &node->input);
2643 i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
2645 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2648 ret = i40e_flow_flush_fdir_filter(pf);
2650 rte_flow_error_set(error, -ret,
2651 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2652 "Failed to flush FDIR flows.");
2656 ret = i40e_flow_flush_ethertype_filter(pf);
2658 rte_flow_error_set(error, -ret,
2659 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2660 "Failed to ethertype flush flows.");
2664 ret = i40e_flow_flush_tunnel_filter(pf);
2666 rte_flow_error_set(error, -ret,
2667 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2668 "Failed to flush tunnel flows.");
2676 i40e_flow_flush_fdir_filter(struct i40e_pf *pf)
2678 struct rte_eth_dev *dev = pf->adapter->eth_dev;
2679 struct i40e_fdir_info *fdir_info = &pf->fdir;
2680 struct i40e_fdir_filter *fdir_filter;
2681 struct rte_flow *flow;
2685 ret = i40e_fdir_flush(dev);
2687 /* Delete FDIR filters in FDIR list. */
2688 while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
2689 ret = i40e_sw_fdir_filter_del(pf,
2690 &fdir_filter->fdir.input);
2695 /* Delete FDIR flows in flow list. */
2696 TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
2697 if (flow->filter_type == RTE_ETH_FILTER_FDIR) {
2698 TAILQ_REMOVE(&pf->flow_list, flow, node);
2707 /* Flush all ethertype filters */
2709 i40e_flow_flush_ethertype_filter(struct i40e_pf *pf)
2711 struct i40e_ethertype_filter_list
2712 *ethertype_list = &pf->ethertype.ethertype_list;
2713 struct i40e_ethertype_filter *filter;
2714 struct rte_flow *flow;
2718 while ((filter = TAILQ_FIRST(ethertype_list))) {
2719 ret = i40e_flow_destroy_ethertype_filter(pf, filter);
2724 /* Delete ethertype flows in flow list. */
2725 TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
2726 if (flow->filter_type == RTE_ETH_FILTER_ETHERTYPE) {
2727 TAILQ_REMOVE(&pf->flow_list, flow, node);
2735 /* Flush all tunnel filters */
2737 i40e_flow_flush_tunnel_filter(struct i40e_pf *pf)
2739 struct i40e_tunnel_filter_list
2740 *tunnel_list = &pf->tunnel.tunnel_list;
2741 struct i40e_tunnel_filter *filter;
2742 struct rte_flow *flow;
2746 while ((filter = TAILQ_FIRST(tunnel_list))) {
2747 ret = i40e_flow_destroy_tunnel_filter(pf, filter);
2752 /* Delete tunnel flows in flow list. */
2753 TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
2754 if (flow->filter_type == RTE_ETH_FILTER_TUNNEL) {
2755 TAILQ_REMOVE(&pf->flow_list, flow, node);