4 * Copyright (c) 2016-2017 Intel Corporation. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/queue.h>
41 #include <rte_ether.h>
42 #include <rte_ethdev.h>
44 #include <rte_memzone.h>
45 #include <rte_malloc.h>
46 #include <rte_eth_ctrl.h>
47 #include <rte_tailq.h>
48 #include <rte_flow_driver.h>
50 #include "i40e_logs.h"
51 #include "base/i40e_type.h"
52 #include "base/i40e_prototype.h"
53 #include "i40e_ethdev.h"
55 #define I40E_IPV4_TC_SHIFT 4
56 #define I40E_IPV6_TC_MASK (0x00FF << I40E_IPV4_TC_SHIFT)
57 #define I40E_IPV6_FRAG_HEADER 44
58 #define I40E_TENANT_ARRAY_NUM 3
59 #define I40E_TCI_MASK 0xFFFF
61 static int i40e_flow_validate(struct rte_eth_dev *dev,
62 const struct rte_flow_attr *attr,
63 const struct rte_flow_item pattern[],
64 const struct rte_flow_action actions[],
65 struct rte_flow_error *error);
66 static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
67 const struct rte_flow_attr *attr,
68 const struct rte_flow_item pattern[],
69 const struct rte_flow_action actions[],
70 struct rte_flow_error *error);
71 static int i40e_flow_destroy(struct rte_eth_dev *dev,
72 struct rte_flow *flow,
73 struct rte_flow_error *error);
74 static int i40e_flow_flush(struct rte_eth_dev *dev,
75 struct rte_flow_error *error);
77 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
78 const struct rte_flow_item *pattern,
79 struct rte_flow_error *error,
80 struct rte_eth_ethertype_filter *filter);
81 static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
82 const struct rte_flow_action *actions,
83 struct rte_flow_error *error,
84 struct rte_eth_ethertype_filter *filter);
85 static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
86 const struct rte_flow_item *pattern,
87 struct rte_flow_error *error,
88 struct rte_eth_fdir_filter *filter);
89 static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
90 const struct rte_flow_action *actions,
91 struct rte_flow_error *error,
92 struct rte_eth_fdir_filter *filter);
93 static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
94 const struct rte_flow_action *actions,
95 struct rte_flow_error *error,
96 struct i40e_tunnel_filter_conf *filter);
97 static int i40e_flow_parse_attr(const struct rte_flow_attr *attr,
98 struct rte_flow_error *error);
99 static int i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
100 const struct rte_flow_attr *attr,
101 const struct rte_flow_item pattern[],
102 const struct rte_flow_action actions[],
103 struct rte_flow_error *error,
104 union i40e_filter_t *filter);
105 static int i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
106 const struct rte_flow_attr *attr,
107 const struct rte_flow_item pattern[],
108 const struct rte_flow_action actions[],
109 struct rte_flow_error *error,
110 union i40e_filter_t *filter);
111 static int i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
112 const struct rte_flow_attr *attr,
113 const struct rte_flow_item pattern[],
114 const struct rte_flow_action actions[],
115 struct rte_flow_error *error,
116 union i40e_filter_t *filter);
117 static int i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
118 const struct rte_flow_attr *attr,
119 const struct rte_flow_item pattern[],
120 const struct rte_flow_action actions[],
121 struct rte_flow_error *error,
122 union i40e_filter_t *filter);
123 static int i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
124 const struct rte_flow_attr *attr,
125 const struct rte_flow_item pattern[],
126 const struct rte_flow_action actions[],
127 struct rte_flow_error *error,
128 union i40e_filter_t *filter);
129 static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
130 struct i40e_ethertype_filter *filter);
131 static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
132 struct i40e_tunnel_filter *filter);
133 static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);
134 static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf);
135 static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf);
137 i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
138 const struct rte_flow_attr *attr,
139 const struct rte_flow_item pattern[],
140 const struct rte_flow_action actions[],
141 struct rte_flow_error *error,
142 union i40e_filter_t *filter);
144 i40e_flow_parse_qinq_pattern(struct rte_eth_dev *dev,
145 const struct rte_flow_item *pattern,
146 struct rte_flow_error *error,
147 struct i40e_tunnel_filter_conf *filter);
149 const struct rte_flow_ops i40e_flow_ops = {
150 .validate = i40e_flow_validate,
151 .create = i40e_flow_create,
152 .destroy = i40e_flow_destroy,
153 .flush = i40e_flow_flush,
156 union i40e_filter_t cons_filter;
157 enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
159 /* Pattern matched ethertype filter */
160 static enum rte_flow_item_type pattern_ethertype[] = {
161 RTE_FLOW_ITEM_TYPE_ETH,
162 RTE_FLOW_ITEM_TYPE_END,
165 /* Pattern matched flow director filter */
166 static enum rte_flow_item_type pattern_fdir_ipv4[] = {
167 RTE_FLOW_ITEM_TYPE_IPV4,
168 RTE_FLOW_ITEM_TYPE_END,
171 static enum rte_flow_item_type pattern_fdir_ipv4_ext[] = {
172 RTE_FLOW_ITEM_TYPE_ETH,
173 RTE_FLOW_ITEM_TYPE_IPV4,
174 RTE_FLOW_ITEM_TYPE_END,
177 static enum rte_flow_item_type pattern_fdir_ipv4_udp[] = {
178 RTE_FLOW_ITEM_TYPE_IPV4,
179 RTE_FLOW_ITEM_TYPE_UDP,
180 RTE_FLOW_ITEM_TYPE_END,
183 static enum rte_flow_item_type pattern_fdir_ipv4_udp_ext[] = {
184 RTE_FLOW_ITEM_TYPE_ETH,
185 RTE_FLOW_ITEM_TYPE_IPV4,
186 RTE_FLOW_ITEM_TYPE_UDP,
187 RTE_FLOW_ITEM_TYPE_END,
190 static enum rte_flow_item_type pattern_fdir_ipv4_tcp[] = {
191 RTE_FLOW_ITEM_TYPE_IPV4,
192 RTE_FLOW_ITEM_TYPE_TCP,
193 RTE_FLOW_ITEM_TYPE_END,
196 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_ext[] = {
197 RTE_FLOW_ITEM_TYPE_ETH,
198 RTE_FLOW_ITEM_TYPE_IPV4,
199 RTE_FLOW_ITEM_TYPE_TCP,
200 RTE_FLOW_ITEM_TYPE_END,
203 static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
204 RTE_FLOW_ITEM_TYPE_IPV4,
205 RTE_FLOW_ITEM_TYPE_SCTP,
206 RTE_FLOW_ITEM_TYPE_END,
209 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_ext[] = {
210 RTE_FLOW_ITEM_TYPE_ETH,
211 RTE_FLOW_ITEM_TYPE_IPV4,
212 RTE_FLOW_ITEM_TYPE_SCTP,
213 RTE_FLOW_ITEM_TYPE_END,
216 static enum rte_flow_item_type pattern_fdir_ipv6[] = {
217 RTE_FLOW_ITEM_TYPE_IPV6,
218 RTE_FLOW_ITEM_TYPE_END,
221 static enum rte_flow_item_type pattern_fdir_ipv6_ext[] = {
222 RTE_FLOW_ITEM_TYPE_ETH,
223 RTE_FLOW_ITEM_TYPE_IPV6,
224 RTE_FLOW_ITEM_TYPE_END,
227 static enum rte_flow_item_type pattern_fdir_ipv6_udp[] = {
228 RTE_FLOW_ITEM_TYPE_IPV6,
229 RTE_FLOW_ITEM_TYPE_UDP,
230 RTE_FLOW_ITEM_TYPE_END,
233 static enum rte_flow_item_type pattern_fdir_ipv6_udp_ext[] = {
234 RTE_FLOW_ITEM_TYPE_ETH,
235 RTE_FLOW_ITEM_TYPE_IPV6,
236 RTE_FLOW_ITEM_TYPE_UDP,
237 RTE_FLOW_ITEM_TYPE_END,
240 static enum rte_flow_item_type pattern_fdir_ipv6_tcp[] = {
241 RTE_FLOW_ITEM_TYPE_IPV6,
242 RTE_FLOW_ITEM_TYPE_TCP,
243 RTE_FLOW_ITEM_TYPE_END,
246 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_ext[] = {
247 RTE_FLOW_ITEM_TYPE_ETH,
248 RTE_FLOW_ITEM_TYPE_IPV6,
249 RTE_FLOW_ITEM_TYPE_TCP,
250 RTE_FLOW_ITEM_TYPE_END,
253 static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
254 RTE_FLOW_ITEM_TYPE_IPV6,
255 RTE_FLOW_ITEM_TYPE_SCTP,
256 RTE_FLOW_ITEM_TYPE_END,
259 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_ext[] = {
260 RTE_FLOW_ITEM_TYPE_ETH,
261 RTE_FLOW_ITEM_TYPE_IPV6,
262 RTE_FLOW_ITEM_TYPE_SCTP,
263 RTE_FLOW_ITEM_TYPE_END,
266 /* Pattern matched tunnel filter */
267 static enum rte_flow_item_type pattern_vxlan_1[] = {
268 RTE_FLOW_ITEM_TYPE_ETH,
269 RTE_FLOW_ITEM_TYPE_IPV4,
270 RTE_FLOW_ITEM_TYPE_UDP,
271 RTE_FLOW_ITEM_TYPE_VXLAN,
272 RTE_FLOW_ITEM_TYPE_ETH,
273 RTE_FLOW_ITEM_TYPE_END,
276 static enum rte_flow_item_type pattern_vxlan_2[] = {
277 RTE_FLOW_ITEM_TYPE_ETH,
278 RTE_FLOW_ITEM_TYPE_IPV6,
279 RTE_FLOW_ITEM_TYPE_UDP,
280 RTE_FLOW_ITEM_TYPE_VXLAN,
281 RTE_FLOW_ITEM_TYPE_ETH,
282 RTE_FLOW_ITEM_TYPE_END,
285 static enum rte_flow_item_type pattern_vxlan_3[] = {
286 RTE_FLOW_ITEM_TYPE_ETH,
287 RTE_FLOW_ITEM_TYPE_IPV4,
288 RTE_FLOW_ITEM_TYPE_UDP,
289 RTE_FLOW_ITEM_TYPE_VXLAN,
290 RTE_FLOW_ITEM_TYPE_ETH,
291 RTE_FLOW_ITEM_TYPE_VLAN,
292 RTE_FLOW_ITEM_TYPE_END,
295 static enum rte_flow_item_type pattern_vxlan_4[] = {
296 RTE_FLOW_ITEM_TYPE_ETH,
297 RTE_FLOW_ITEM_TYPE_IPV6,
298 RTE_FLOW_ITEM_TYPE_UDP,
299 RTE_FLOW_ITEM_TYPE_VXLAN,
300 RTE_FLOW_ITEM_TYPE_ETH,
301 RTE_FLOW_ITEM_TYPE_VLAN,
302 RTE_FLOW_ITEM_TYPE_END,
305 static enum rte_flow_item_type pattern_nvgre_1[] = {
306 RTE_FLOW_ITEM_TYPE_ETH,
307 RTE_FLOW_ITEM_TYPE_IPV4,
308 RTE_FLOW_ITEM_TYPE_NVGRE,
309 RTE_FLOW_ITEM_TYPE_ETH,
310 RTE_FLOW_ITEM_TYPE_END,
313 static enum rte_flow_item_type pattern_nvgre_2[] = {
314 RTE_FLOW_ITEM_TYPE_ETH,
315 RTE_FLOW_ITEM_TYPE_IPV6,
316 RTE_FLOW_ITEM_TYPE_NVGRE,
317 RTE_FLOW_ITEM_TYPE_ETH,
318 RTE_FLOW_ITEM_TYPE_END,
321 static enum rte_flow_item_type pattern_nvgre_3[] = {
322 RTE_FLOW_ITEM_TYPE_ETH,
323 RTE_FLOW_ITEM_TYPE_IPV4,
324 RTE_FLOW_ITEM_TYPE_NVGRE,
325 RTE_FLOW_ITEM_TYPE_ETH,
326 RTE_FLOW_ITEM_TYPE_VLAN,
327 RTE_FLOW_ITEM_TYPE_END,
330 static enum rte_flow_item_type pattern_nvgre_4[] = {
331 RTE_FLOW_ITEM_TYPE_ETH,
332 RTE_FLOW_ITEM_TYPE_IPV6,
333 RTE_FLOW_ITEM_TYPE_NVGRE,
334 RTE_FLOW_ITEM_TYPE_ETH,
335 RTE_FLOW_ITEM_TYPE_VLAN,
336 RTE_FLOW_ITEM_TYPE_END,
339 static enum rte_flow_item_type pattern_mpls_1[] = {
340 RTE_FLOW_ITEM_TYPE_ETH,
341 RTE_FLOW_ITEM_TYPE_IPV4,
342 RTE_FLOW_ITEM_TYPE_UDP,
343 RTE_FLOW_ITEM_TYPE_MPLS,
344 RTE_FLOW_ITEM_TYPE_END,
347 static enum rte_flow_item_type pattern_mpls_2[] = {
348 RTE_FLOW_ITEM_TYPE_ETH,
349 RTE_FLOW_ITEM_TYPE_IPV6,
350 RTE_FLOW_ITEM_TYPE_UDP,
351 RTE_FLOW_ITEM_TYPE_MPLS,
352 RTE_FLOW_ITEM_TYPE_END,
355 static enum rte_flow_item_type pattern_mpls_3[] = {
356 RTE_FLOW_ITEM_TYPE_ETH,
357 RTE_FLOW_ITEM_TYPE_IPV4,
358 RTE_FLOW_ITEM_TYPE_GRE,
359 RTE_FLOW_ITEM_TYPE_MPLS,
360 RTE_FLOW_ITEM_TYPE_END,
363 static enum rte_flow_item_type pattern_mpls_4[] = {
364 RTE_FLOW_ITEM_TYPE_ETH,
365 RTE_FLOW_ITEM_TYPE_IPV6,
366 RTE_FLOW_ITEM_TYPE_GRE,
367 RTE_FLOW_ITEM_TYPE_MPLS,
368 RTE_FLOW_ITEM_TYPE_END,
371 static enum rte_flow_item_type pattern_qinq_1[] = {
372 RTE_FLOW_ITEM_TYPE_ETH,
373 RTE_FLOW_ITEM_TYPE_VLAN,
374 RTE_FLOW_ITEM_TYPE_VLAN,
375 RTE_FLOW_ITEM_TYPE_END,
378 static struct i40e_valid_pattern i40e_supported_patterns[] = {
380 { pattern_ethertype, i40e_flow_parse_ethertype_filter },
382 { pattern_fdir_ipv4, i40e_flow_parse_fdir_filter },
383 { pattern_fdir_ipv4_ext, i40e_flow_parse_fdir_filter },
384 { pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
385 { pattern_fdir_ipv4_udp_ext, i40e_flow_parse_fdir_filter },
386 { pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
387 { pattern_fdir_ipv4_tcp_ext, i40e_flow_parse_fdir_filter },
388 { pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
389 { pattern_fdir_ipv4_sctp_ext, i40e_flow_parse_fdir_filter },
390 { pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
391 { pattern_fdir_ipv6_ext, i40e_flow_parse_fdir_filter },
392 { pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
393 { pattern_fdir_ipv6_udp_ext, i40e_flow_parse_fdir_filter },
394 { pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
395 { pattern_fdir_ipv6_tcp_ext, i40e_flow_parse_fdir_filter },
396 { pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
397 { pattern_fdir_ipv6_sctp_ext, i40e_flow_parse_fdir_filter },
399 { pattern_vxlan_1, i40e_flow_parse_vxlan_filter },
400 { pattern_vxlan_2, i40e_flow_parse_vxlan_filter },
401 { pattern_vxlan_3, i40e_flow_parse_vxlan_filter },
402 { pattern_vxlan_4, i40e_flow_parse_vxlan_filter },
404 { pattern_nvgre_1, i40e_flow_parse_nvgre_filter },
405 { pattern_nvgre_2, i40e_flow_parse_nvgre_filter },
406 { pattern_nvgre_3, i40e_flow_parse_nvgre_filter },
407 { pattern_nvgre_4, i40e_flow_parse_nvgre_filter },
408 /* MPLSoUDP & MPLSoGRE */
409 { pattern_mpls_1, i40e_flow_parse_mpls_filter },
410 { pattern_mpls_2, i40e_flow_parse_mpls_filter },
411 { pattern_mpls_3, i40e_flow_parse_mpls_filter },
412 { pattern_mpls_4, i40e_flow_parse_mpls_filter },
414 { pattern_qinq_1, i40e_flow_parse_qinq_filter },
417 #define NEXT_ITEM_OF_ACTION(act, actions, index) \
419 act = actions + index; \
420 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) { \
422 act = actions + index; \
426 /* Find the first VOID or non-VOID item pointer */
427 static const struct rte_flow_item *
428 i40e_find_first_item(const struct rte_flow_item *item, bool is_void)
432 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
434 is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
436 is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
444 /* Skip all VOID items of the pattern */
446 i40e_pattern_skip_void_item(struct rte_flow_item *items,
447 const struct rte_flow_item *pattern)
449 uint32_t cpy_count = 0;
450 const struct rte_flow_item *pb = pattern, *pe = pattern;
453 /* Find a non-void item first */
454 pb = i40e_find_first_item(pb, false);
455 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
460 /* Find a void item */
461 pe = i40e_find_first_item(pb + 1, true);
464 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
468 if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
475 /* Copy the END item. */
476 rte_memcpy(items, pe, sizeof(struct rte_flow_item));
479 /* Check if the pattern matches a supported item type array */
481 i40e_match_pattern(enum rte_flow_item_type *item_array,
482 struct rte_flow_item *pattern)
484 struct rte_flow_item *item = pattern;
486 while ((*item_array == item->type) &&
487 (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
492 return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
493 item->type == RTE_FLOW_ITEM_TYPE_END);
496 /* Find if there's parse filter function matched */
497 static parse_filter_t
498 i40e_find_parse_filter_func(struct rte_flow_item *pattern)
500 parse_filter_t parse_filter = NULL;
503 for (; i < RTE_DIM(i40e_supported_patterns); i++) {
504 if (i40e_match_pattern(i40e_supported_patterns[i].items,
506 parse_filter = i40e_supported_patterns[i].parse_filter;
514 /* Parse attributes */
516 i40e_flow_parse_attr(const struct rte_flow_attr *attr,
517 struct rte_flow_error *error)
519 /* Must be input direction */
520 if (!attr->ingress) {
521 rte_flow_error_set(error, EINVAL,
522 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
523 attr, "Only support ingress.");
529 rte_flow_error_set(error, EINVAL,
530 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
531 attr, "Not support egress.");
536 if (attr->priority) {
537 rte_flow_error_set(error, EINVAL,
538 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
539 attr, "Not support priority.");
545 rte_flow_error_set(error, EINVAL,
546 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
547 attr, "Not support group.");
555 i40e_get_outer_vlan(struct rte_eth_dev *dev)
557 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
558 int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
568 i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
571 tpid = (reg_r >> I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT) & 0xFFFF;
576 /* 1. Last in item should be NULL as range is not supported.
577 * 2. Supported filter types: MAC_ETHTYPE and ETHTYPE.
578 * 3. SRC mac_addr mask should be 00:00:00:00:00:00.
579 * 4. DST mac_addr mask should be 00:00:00:00:00:00 or
581 * 5. Ether_type mask should be 0xFFFF.
584 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
585 const struct rte_flow_item *pattern,
586 struct rte_flow_error *error,
587 struct rte_eth_ethertype_filter *filter)
589 const struct rte_flow_item *item = pattern;
590 const struct rte_flow_item_eth *eth_spec;
591 const struct rte_flow_item_eth *eth_mask;
592 enum rte_flow_item_type item_type;
595 outer_tpid = i40e_get_outer_vlan(dev);
597 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
599 rte_flow_error_set(error, EINVAL,
600 RTE_FLOW_ERROR_TYPE_ITEM,
602 "Not support range");
605 item_type = item->type;
607 case RTE_FLOW_ITEM_TYPE_ETH:
608 eth_spec = (const struct rte_flow_item_eth *)item->spec;
609 eth_mask = (const struct rte_flow_item_eth *)item->mask;
610 /* Get the MAC info. */
611 if (!eth_spec || !eth_mask) {
612 rte_flow_error_set(error, EINVAL,
613 RTE_FLOW_ERROR_TYPE_ITEM,
615 "NULL ETH spec/mask");
619 /* Mask bits of source MAC address must be full of 0.
620 * Mask bits of destination MAC address must be full
623 if (!is_zero_ether_addr(ð_mask->src) ||
624 (!is_zero_ether_addr(ð_mask->dst) &&
625 !is_broadcast_ether_addr(ð_mask->dst))) {
626 rte_flow_error_set(error, EINVAL,
627 RTE_FLOW_ERROR_TYPE_ITEM,
629 "Invalid MAC_addr mask");
633 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
634 rte_flow_error_set(error, EINVAL,
635 RTE_FLOW_ERROR_TYPE_ITEM,
637 "Invalid ethertype mask");
641 /* If mask bits of destination MAC address
642 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
644 if (is_broadcast_ether_addr(ð_mask->dst)) {
645 filter->mac_addr = eth_spec->dst;
646 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
648 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
650 filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
652 if (filter->ether_type == ETHER_TYPE_IPv4 ||
653 filter->ether_type == ETHER_TYPE_IPv6 ||
654 filter->ether_type == ETHER_TYPE_LLDP ||
655 filter->ether_type == outer_tpid) {
656 rte_flow_error_set(error, EINVAL,
657 RTE_FLOW_ERROR_TYPE_ITEM,
659 "Unsupported ether_type in"
660 " control packet filter.");
672 /* Ethertype action only supports QUEUE or DROP. */
674 i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
675 const struct rte_flow_action *actions,
676 struct rte_flow_error *error,
677 struct rte_eth_ethertype_filter *filter)
679 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
680 const struct rte_flow_action *act;
681 const struct rte_flow_action_queue *act_q;
684 /* Check if the first non-void action is QUEUE or DROP. */
685 NEXT_ITEM_OF_ACTION(act, actions, index);
686 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
687 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
688 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
689 act, "Not supported action.");
693 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
694 act_q = (const struct rte_flow_action_queue *)act->conf;
695 filter->queue = act_q->index;
696 if (filter->queue >= pf->dev_data->nb_rx_queues) {
697 rte_flow_error_set(error, EINVAL,
698 RTE_FLOW_ERROR_TYPE_ACTION,
699 act, "Invalid queue ID for"
700 " ethertype_filter.");
704 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
707 /* Check if the next non-void item is END */
709 NEXT_ITEM_OF_ACTION(act, actions, index);
710 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
711 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
712 act, "Not supported action.");
720 i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
721 const struct rte_flow_attr *attr,
722 const struct rte_flow_item pattern[],
723 const struct rte_flow_action actions[],
724 struct rte_flow_error *error,
725 union i40e_filter_t *filter)
727 struct rte_eth_ethertype_filter *ethertype_filter =
728 &filter->ethertype_filter;
731 ret = i40e_flow_parse_ethertype_pattern(dev, pattern, error,
736 ret = i40e_flow_parse_ethertype_action(dev, actions, error,
741 ret = i40e_flow_parse_attr(attr, error);
745 cons_filter_type = RTE_ETH_FILTER_ETHERTYPE;
751 i40e_flow_check_raw_item(const struct rte_flow_item *item,
752 const struct rte_flow_item_raw *raw_spec,
753 struct rte_flow_error *error)
755 if (!raw_spec->relative) {
756 rte_flow_error_set(error, EINVAL,
757 RTE_FLOW_ERROR_TYPE_ITEM,
759 "Relative should be 1.");
763 if (raw_spec->offset % sizeof(uint16_t)) {
764 rte_flow_error_set(error, EINVAL,
765 RTE_FLOW_ERROR_TYPE_ITEM,
767 "Offset should be even.");
771 if (raw_spec->search || raw_spec->limit) {
772 rte_flow_error_set(error, EINVAL,
773 RTE_FLOW_ERROR_TYPE_ITEM,
775 "search or limit is not supported.");
779 if (raw_spec->offset < 0) {
780 rte_flow_error_set(error, EINVAL,
781 RTE_FLOW_ERROR_TYPE_ITEM,
783 "Offset should be non-negative.");
790 i40e_flow_store_flex_pit(struct i40e_pf *pf,
791 struct i40e_fdir_flex_pit *flex_pit,
792 enum i40e_flxpld_layer_idx layer_idx,
797 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + raw_id;
798 /* Check if the configuration is conflicted */
799 if (pf->fdir.flex_pit_flag[layer_idx] &&
800 (pf->fdir.flex_set[field_idx].src_offset != flex_pit->src_offset ||
801 pf->fdir.flex_set[field_idx].size != flex_pit->size ||
802 pf->fdir.flex_set[field_idx].dst_offset != flex_pit->dst_offset))
805 /* Check if the configuration exists. */
806 if (pf->fdir.flex_pit_flag[layer_idx] &&
807 (pf->fdir.flex_set[field_idx].src_offset == flex_pit->src_offset &&
808 pf->fdir.flex_set[field_idx].size == flex_pit->size &&
809 pf->fdir.flex_set[field_idx].dst_offset == flex_pit->dst_offset))
812 pf->fdir.flex_set[field_idx].src_offset =
813 flex_pit->src_offset;
814 pf->fdir.flex_set[field_idx].size =
816 pf->fdir.flex_set[field_idx].dst_offset =
817 flex_pit->dst_offset;
823 i40e_flow_store_flex_mask(struct i40e_pf *pf,
824 enum i40e_filter_pctype pctype,
827 struct i40e_fdir_flex_mask flex_mask;
829 uint8_t i, nb_bitmask = 0;
831 memset(&flex_mask, 0, sizeof(struct i40e_fdir_flex_mask));
832 for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i += sizeof(uint16_t)) {
833 mask_tmp = I40E_WORD(mask[i], mask[i + 1]);
835 flex_mask.word_mask |=
836 I40E_FLEX_WORD_MASK(i / sizeof(uint16_t));
837 if (mask_tmp != UINT16_MAX) {
838 flex_mask.bitmask[nb_bitmask].mask = ~mask_tmp;
839 flex_mask.bitmask[nb_bitmask].offset =
840 i / sizeof(uint16_t);
842 if (nb_bitmask > I40E_FDIR_BITMASK_NUM_WORD)
847 flex_mask.nb_bitmask = nb_bitmask;
849 if (pf->fdir.flex_mask_flag[pctype] &&
850 (memcmp(&flex_mask, &pf->fdir.flex_mask[pctype],
851 sizeof(struct i40e_fdir_flex_mask))))
853 else if (pf->fdir.flex_mask_flag[pctype] &&
854 !(memcmp(&flex_mask, &pf->fdir.flex_mask[pctype],
855 sizeof(struct i40e_fdir_flex_mask))))
858 memcpy(&pf->fdir.flex_mask[pctype], &flex_mask,
859 sizeof(struct i40e_fdir_flex_mask));
864 i40e_flow_set_fdir_flex_pit(struct i40e_pf *pf,
865 enum i40e_flxpld_layer_idx layer_idx,
868 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
871 uint16_t min_next_off = 0; /* in words */
875 for (i = 0; i < raw_id; i++) {
876 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
877 flx_pit = MK_FLX_PIT(pf->fdir.flex_set[field_idx].src_offset,
878 pf->fdir.flex_set[field_idx].size,
879 pf->fdir.flex_set[field_idx].dst_offset);
881 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
882 min_next_off = pf->fdir.flex_set[field_idx].src_offset +
883 pf->fdir.flex_set[field_idx].size;
886 for (; i < I40E_MAX_FLXPLD_FIED; i++) {
887 /* set the non-used register obeying register's constrain */
888 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
889 flx_pit = MK_FLX_PIT(min_next_off, NONUSE_FLX_PIT_FSIZE,
890 NONUSE_FLX_PIT_DEST_OFF);
891 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
895 pf->fdir.flex_pit_flag[layer_idx] = 1;
899 i40e_flow_set_fdir_flex_msk(struct i40e_pf *pf,
900 enum i40e_filter_pctype pctype)
902 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
903 struct i40e_fdir_flex_mask *flex_mask;
904 uint32_t flxinset, fd_mask;
908 flex_mask = &pf->fdir.flex_mask[pctype];
909 flxinset = (flex_mask->word_mask <<
910 I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) &
911 I40E_PRTQF_FD_FLXINSET_INSET_MASK;
912 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), flxinset);
914 for (i = 0; i < flex_mask->nb_bitmask; i++) {
915 fd_mask = (flex_mask->bitmask[i].mask <<
916 I40E_PRTQF_FD_MSK_MASK_SHIFT) &
917 I40E_PRTQF_FD_MSK_MASK_MASK;
918 fd_mask |= ((flex_mask->bitmask[i].offset +
919 I40E_FLX_OFFSET_IN_FIELD_VECTOR) <<
920 I40E_PRTQF_FD_MSK_OFFSET_SHIFT) &
921 I40E_PRTQF_FD_MSK_OFFSET_MASK;
922 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), fd_mask);
925 pf->fdir.flex_mask_flag[pctype] = 1;
929 i40e_flow_set_fdir_inset(struct i40e_pf *pf,
930 enum i40e_filter_pctype pctype,
933 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
934 uint64_t inset_reg = 0;
935 uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
938 /* Check if the input set is valid */
939 if (i40e_validate_input_set(pctype, RTE_ETH_FILTER_FDIR,
941 PMD_DRV_LOG(ERR, "Invalid input set");
945 /* Check if the configuration is conflicted */
946 if (pf->fdir.inset_flag[pctype] &&
947 memcmp(&pf->fdir.input_set[pctype], &input_set, sizeof(uint64_t)))
950 if (pf->fdir.inset_flag[pctype] &&
951 !memcmp(&pf->fdir.input_set[pctype], &input_set, sizeof(uint64_t)))
954 num = i40e_generate_inset_mask_reg(input_set, mask_reg,
955 I40E_INSET_MASK_NUM_REG);
959 inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
961 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
962 (uint32_t)(inset_reg & UINT32_MAX));
963 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
964 (uint32_t)((inset_reg >>
965 I40E_32_BIT_WIDTH) & UINT32_MAX));
967 for (i = 0; i < num; i++)
968 i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
971 /*clear unused mask registers of the pctype */
972 for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
973 i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype), 0);
974 I40E_WRITE_FLUSH(hw);
976 pf->fdir.input_set[pctype] = input_set;
977 pf->fdir.inset_flag[pctype] = 1;
981 /* 1. Last in item should be NULL as range is not supported.
982 * 2. Supported patterns: refer to array i40e_supported_patterns.
983 * 3. Supported flow type and input set: refer to array
984 * valid_fdir_inset_table in i40e_ethdev.c.
985 * 4. Mask of fields which need to be matched should be
987 * 5. Mask of fields which needn't to be matched should be
991 i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
992 const struct rte_flow_item *pattern,
993 struct rte_flow_error *error,
994 struct rte_eth_fdir_filter *filter)
996 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
997 const struct rte_flow_item *item = pattern;
998 const struct rte_flow_item_eth *eth_spec, *eth_mask;
999 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
1000 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
1001 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
1002 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
1003 const struct rte_flow_item_udp *udp_spec, *udp_mask;
1004 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
1005 const struct rte_flow_item_raw *raw_spec, *raw_mask;
1006 const struct rte_flow_item_vf *vf_spec;
1008 uint32_t flow_type = RTE_ETH_FLOW_UNKNOWN;
1009 enum i40e_filter_pctype pctype;
1010 uint64_t input_set = I40E_INSET_NONE;
1012 enum rte_flow_item_type item_type;
1013 enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
1015 uint8_t ipv6_addr_mask[16] = {
1016 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
1017 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
1018 enum i40e_flxpld_layer_idx layer_idx = I40E_FLXPLD_L2_IDX;
1020 int32_t off_arr[I40E_MAX_FLXPLD_FIED];
1021 uint16_t len_arr[I40E_MAX_FLXPLD_FIED];
1022 struct i40e_fdir_flex_pit flex_pit;
1023 uint8_t next_dst_off = 0;
1024 uint8_t flex_mask[I40E_FDIR_MAX_FLEX_LEN];
1026 bool cfg_flex_pit = true;
1027 bool cfg_flex_msk = true;
1028 uint16_t outer_tpid;
1029 uint16_t ether_type;
1032 memset(off_arr, 0, I40E_MAX_FLXPLD_FIED);
1033 memset(len_arr, 0, I40E_MAX_FLXPLD_FIED);
1034 memset(flex_mask, 0, I40E_FDIR_MAX_FLEX_LEN);
1035 outer_tpid = i40e_get_outer_vlan(dev);
1036 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1038 rte_flow_error_set(error, EINVAL,
1039 RTE_FLOW_ERROR_TYPE_ITEM,
1041 "Not support range");
1044 item_type = item->type;
1045 switch (item_type) {
1046 case RTE_FLOW_ITEM_TYPE_ETH:
1047 eth_spec = (const struct rte_flow_item_eth *)item->spec;
1048 eth_mask = (const struct rte_flow_item_eth *)item->mask;
1050 if (eth_spec && eth_mask) {
1051 if (!is_zero_ether_addr(ð_mask->src) ||
1052 !is_zero_ether_addr(ð_mask->dst)) {
1053 rte_flow_error_set(error, EINVAL,
1054 RTE_FLOW_ERROR_TYPE_ITEM,
1056 "Invalid MAC_addr mask.");
1060 if ((eth_mask->type & UINT16_MAX) ==
1062 input_set |= I40E_INSET_LAST_ETHER_TYPE;
1063 filter->input.flow.l2_flow.ether_type =
1067 ether_type = rte_be_to_cpu_16(eth_spec->type);
1068 if (ether_type == ETHER_TYPE_IPv4 ||
1069 ether_type == ETHER_TYPE_IPv6 ||
1070 ether_type == ETHER_TYPE_ARP ||
1071 ether_type == outer_tpid) {
1072 rte_flow_error_set(error, EINVAL,
1073 RTE_FLOW_ERROR_TYPE_ITEM,
1075 "Unsupported ether_type.");
1080 flow_type = RTE_ETH_FLOW_L2_PAYLOAD;
1081 layer_idx = I40E_FLXPLD_L2_IDX;
1084 case RTE_FLOW_ITEM_TYPE_VLAN:
1086 (const struct rte_flow_item_vlan *)item->spec;
1088 (const struct rte_flow_item_vlan *)item->mask;
1089 if (vlan_spec && vlan_mask) {
1090 if (vlan_mask->tci ==
1091 rte_cpu_to_be_16(I40E_TCI_MASK)) {
1092 input_set |= I40E_INSET_VLAN_INNER;
1093 filter->input.flow_ext.vlan_tci =
1098 flow_type = RTE_ETH_FLOW_L2_PAYLOAD;
1099 layer_idx = I40E_FLXPLD_L2_IDX;
1102 case RTE_FLOW_ITEM_TYPE_IPV4:
1103 l3 = RTE_FLOW_ITEM_TYPE_IPV4;
1105 (const struct rte_flow_item_ipv4 *)item->spec;
1107 (const struct rte_flow_item_ipv4 *)item->mask;
1109 if (ipv4_spec && ipv4_mask) {
1110 /* Check IPv4 mask and update input set */
1111 if (ipv4_mask->hdr.version_ihl ||
1112 ipv4_mask->hdr.total_length ||
1113 ipv4_mask->hdr.packet_id ||
1114 ipv4_mask->hdr.fragment_offset ||
1115 ipv4_mask->hdr.hdr_checksum) {
1116 rte_flow_error_set(error, EINVAL,
1117 RTE_FLOW_ERROR_TYPE_ITEM,
1119 "Invalid IPv4 mask.");
1123 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
1124 input_set |= I40E_INSET_IPV4_SRC;
1125 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
1126 input_set |= I40E_INSET_IPV4_DST;
1127 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
1128 input_set |= I40E_INSET_IPV4_TOS;
1129 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
1130 input_set |= I40E_INSET_IPV4_TTL;
1131 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
1132 input_set |= I40E_INSET_IPV4_PROTO;
1134 /* Get filter info */
1135 flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER;
1136 /* Check if it is fragment. */
1137 frag_off = ipv4_spec->hdr.fragment_offset;
1138 frag_off = rte_be_to_cpu_16(frag_off);
1139 if (frag_off & IPV4_HDR_OFFSET_MASK ||
1140 frag_off & IPV4_HDR_MF_FLAG)
1141 flow_type = RTE_ETH_FLOW_FRAG_IPV4;
1143 /* Get the filter info */
1144 filter->input.flow.ip4_flow.proto =
1145 ipv4_spec->hdr.next_proto_id;
1146 filter->input.flow.ip4_flow.tos =
1147 ipv4_spec->hdr.type_of_service;
1148 filter->input.flow.ip4_flow.ttl =
1149 ipv4_spec->hdr.time_to_live;
1150 filter->input.flow.ip4_flow.src_ip =
1151 ipv4_spec->hdr.src_addr;
1152 filter->input.flow.ip4_flow.dst_ip =
1153 ipv4_spec->hdr.dst_addr;
1156 layer_idx = I40E_FLXPLD_L3_IDX;
1159 case RTE_FLOW_ITEM_TYPE_IPV6:
1160 l3 = RTE_FLOW_ITEM_TYPE_IPV6;
1162 (const struct rte_flow_item_ipv6 *)item->spec;
1164 (const struct rte_flow_item_ipv6 *)item->mask;
1166 if (ipv6_spec && ipv6_mask) {
1167 /* Check IPv6 mask and update input set */
1168 if (ipv6_mask->hdr.payload_len) {
1169 rte_flow_error_set(error, EINVAL,
1170 RTE_FLOW_ERROR_TYPE_ITEM,
1172 "Invalid IPv6 mask");
1176 if (!memcmp(ipv6_mask->hdr.src_addr,
1178 RTE_DIM(ipv6_mask->hdr.src_addr)))
1179 input_set |= I40E_INSET_IPV6_SRC;
1180 if (!memcmp(ipv6_mask->hdr.dst_addr,
1182 RTE_DIM(ipv6_mask->hdr.dst_addr)))
1183 input_set |= I40E_INSET_IPV6_DST;
1185 if ((ipv6_mask->hdr.vtc_flow &
1186 rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
1187 == rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
1188 input_set |= I40E_INSET_IPV6_TC;
1189 if (ipv6_mask->hdr.proto == UINT8_MAX)
1190 input_set |= I40E_INSET_IPV6_NEXT_HDR;
1191 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
1192 input_set |= I40E_INSET_IPV6_HOP_LIMIT;
1194 /* Get filter info */
1195 filter->input.flow.ipv6_flow.tc =
1196 (uint8_t)(ipv6_spec->hdr.vtc_flow <<
1197 I40E_IPV4_TC_SHIFT);
1198 filter->input.flow.ipv6_flow.proto =
1199 ipv6_spec->hdr.proto;
1200 filter->input.flow.ipv6_flow.hop_limits =
1201 ipv6_spec->hdr.hop_limits;
1203 rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
1204 ipv6_spec->hdr.src_addr, 16);
1205 rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
1206 ipv6_spec->hdr.dst_addr, 16);
1208 /* Check if it is fragment. */
1209 if (ipv6_spec->hdr.proto ==
1210 I40E_IPV6_FRAG_HEADER)
1212 RTE_ETH_FLOW_FRAG_IPV6;
1215 RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
1218 layer_idx = I40E_FLXPLD_L3_IDX;
1221 case RTE_FLOW_ITEM_TYPE_TCP:
1222 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
1223 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
1225 if (tcp_spec && tcp_mask) {
1226 /* Check TCP mask and update input set */
1227 if (tcp_mask->hdr.sent_seq ||
1228 tcp_mask->hdr.recv_ack ||
1229 tcp_mask->hdr.data_off ||
1230 tcp_mask->hdr.tcp_flags ||
1231 tcp_mask->hdr.rx_win ||
1232 tcp_mask->hdr.cksum ||
1233 tcp_mask->hdr.tcp_urp) {
1234 rte_flow_error_set(error, EINVAL,
1235 RTE_FLOW_ERROR_TYPE_ITEM,
1237 "Invalid TCP mask");
1241 if (tcp_mask->hdr.src_port == UINT16_MAX)
1242 input_set |= I40E_INSET_SRC_PORT;
1243 if (tcp_mask->hdr.dst_port == UINT16_MAX)
1244 input_set |= I40E_INSET_DST_PORT;
1246 /* Get filter info */
1247 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1249 RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
1250 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1252 RTE_ETH_FLOW_NONFRAG_IPV6_TCP;
1254 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1255 filter->input.flow.tcp4_flow.src_port =
1256 tcp_spec->hdr.src_port;
1257 filter->input.flow.tcp4_flow.dst_port =
1258 tcp_spec->hdr.dst_port;
1259 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1260 filter->input.flow.tcp6_flow.src_port =
1261 tcp_spec->hdr.src_port;
1262 filter->input.flow.tcp6_flow.dst_port =
1263 tcp_spec->hdr.dst_port;
1267 layer_idx = I40E_FLXPLD_L4_IDX;
1270 case RTE_FLOW_ITEM_TYPE_UDP:
1271 udp_spec = (const struct rte_flow_item_udp *)item->spec;
1272 udp_mask = (const struct rte_flow_item_udp *)item->mask;
1274 if (udp_spec && udp_mask) {
1275 /* Check UDP mask and update input set*/
1276 if (udp_mask->hdr.dgram_len ||
1277 udp_mask->hdr.dgram_cksum) {
1278 rte_flow_error_set(error, EINVAL,
1279 RTE_FLOW_ERROR_TYPE_ITEM,
1281 "Invalid UDP mask");
1285 if (udp_mask->hdr.src_port == UINT16_MAX)
1286 input_set |= I40E_INSET_SRC_PORT;
1287 if (udp_mask->hdr.dst_port == UINT16_MAX)
1288 input_set |= I40E_INSET_DST_PORT;
1290 /* Get filter info */
1291 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1293 RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
1294 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1296 RTE_ETH_FLOW_NONFRAG_IPV6_UDP;
1298 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1299 filter->input.flow.udp4_flow.src_port =
1300 udp_spec->hdr.src_port;
1301 filter->input.flow.udp4_flow.dst_port =
1302 udp_spec->hdr.dst_port;
1303 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1304 filter->input.flow.udp6_flow.src_port =
1305 udp_spec->hdr.src_port;
1306 filter->input.flow.udp6_flow.dst_port =
1307 udp_spec->hdr.dst_port;
1311 layer_idx = I40E_FLXPLD_L4_IDX;
1314 case RTE_FLOW_ITEM_TYPE_SCTP:
1316 (const struct rte_flow_item_sctp *)item->spec;
1318 (const struct rte_flow_item_sctp *)item->mask;
1320 if (sctp_spec && sctp_mask) {
1321 /* Check SCTP mask and update input set */
1322 if (sctp_mask->hdr.cksum) {
1323 rte_flow_error_set(error, EINVAL,
1324 RTE_FLOW_ERROR_TYPE_ITEM,
1326 "Invalid UDP mask");
1330 if (sctp_mask->hdr.src_port == UINT16_MAX)
1331 input_set |= I40E_INSET_SRC_PORT;
1332 if (sctp_mask->hdr.dst_port == UINT16_MAX)
1333 input_set |= I40E_INSET_DST_PORT;
1334 if (sctp_mask->hdr.tag == UINT32_MAX)
1335 input_set |= I40E_INSET_SCTP_VT;
1337 /* Get filter info */
1338 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1340 RTE_ETH_FLOW_NONFRAG_IPV4_SCTP;
1341 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1343 RTE_ETH_FLOW_NONFRAG_IPV6_SCTP;
1345 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1346 filter->input.flow.sctp4_flow.src_port =
1347 sctp_spec->hdr.src_port;
1348 filter->input.flow.sctp4_flow.dst_port =
1349 sctp_spec->hdr.dst_port;
1350 filter->input.flow.sctp4_flow.verify_tag
1351 = sctp_spec->hdr.tag;
1352 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1353 filter->input.flow.sctp6_flow.src_port =
1354 sctp_spec->hdr.src_port;
1355 filter->input.flow.sctp6_flow.dst_port =
1356 sctp_spec->hdr.dst_port;
1357 filter->input.flow.sctp6_flow.verify_tag
1358 = sctp_spec->hdr.tag;
1362 layer_idx = I40E_FLXPLD_L4_IDX;
1365 case RTE_FLOW_ITEM_TYPE_RAW:
1366 raw_spec = (const struct rte_flow_item_raw *)item->spec;
1367 raw_mask = (const struct rte_flow_item_raw *)item->mask;
1369 if (!raw_spec || !raw_mask) {
1370 rte_flow_error_set(error, EINVAL,
1371 RTE_FLOW_ERROR_TYPE_ITEM,
1373 "NULL RAW spec/mask");
1377 ret = i40e_flow_check_raw_item(item, raw_spec, error);
1381 off_arr[raw_id] = raw_spec->offset;
1382 len_arr[raw_id] = raw_spec->length;
1385 memset(&flex_pit, 0, sizeof(struct i40e_fdir_flex_pit));
1387 raw_spec->length / sizeof(uint16_t);
1388 flex_pit.dst_offset =
1389 next_dst_off / sizeof(uint16_t);
1391 for (i = 0; i <= raw_id; i++) {
1393 flex_pit.src_offset +=
1397 flex_pit.src_offset +=
1398 (off_arr[i] + len_arr[i]) /
1400 flex_size += len_arr[i];
1402 if (((flex_pit.src_offset + flex_pit.size) >=
1403 I40E_MAX_FLX_SOURCE_OFF / sizeof(uint16_t)) ||
1404 flex_size > I40E_FDIR_MAX_FLEXLEN) {
1405 rte_flow_error_set(error, EINVAL,
1406 RTE_FLOW_ERROR_TYPE_ITEM,
1408 "Exceeds maxmial payload limit.");
1412 /* Store flex pit to SW */
1413 ret = i40e_flow_store_flex_pit(pf, &flex_pit,
1416 rte_flow_error_set(error, EINVAL,
1417 RTE_FLOW_ERROR_TYPE_ITEM,
1419 "Conflict with the first flexible rule.");
1422 cfg_flex_pit = false;
1424 for (i = 0; i < raw_spec->length; i++) {
1425 j = i + next_dst_off;
1426 filter->input.flow_ext.flexbytes[j] =
1427 raw_spec->pattern[i];
1428 flex_mask[j] = raw_mask->pattern[i];
1431 next_dst_off += raw_spec->length;
1434 case RTE_FLOW_ITEM_TYPE_VF:
1435 vf_spec = (const struct rte_flow_item_vf *)item->spec;
1436 filter->input.flow_ext.is_vf = 1;
1437 filter->input.flow_ext.dst_id = vf_spec->id;
1438 if (filter->input.flow_ext.is_vf &&
1439 filter->input.flow_ext.dst_id >= pf->vf_num) {
1440 rte_flow_error_set(error, EINVAL,
1441 RTE_FLOW_ERROR_TYPE_ITEM,
1443 "Invalid VF ID for FDIR.");
1452 pctype = i40e_flowtype_to_pctype(flow_type);
1453 if (pctype == 0 || pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) {
1454 rte_flow_error_set(error, EINVAL,
1455 RTE_FLOW_ERROR_TYPE_ITEM, item,
1456 "Unsupported flow type");
1460 ret = i40e_flow_set_fdir_inset(pf, pctype, input_set);
1462 rte_flow_error_set(error, EINVAL,
1463 RTE_FLOW_ERROR_TYPE_ITEM, item,
1464 "Conflict with the first rule's input set.");
1466 } else if (ret == -EINVAL) {
1467 rte_flow_error_set(error, EINVAL,
1468 RTE_FLOW_ERROR_TYPE_ITEM, item,
1469 "Invalid pattern mask.");
1473 filter->input.flow_type = flow_type;
1475 /* Store flex mask to SW */
1476 ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
1478 rte_flow_error_set(error, EINVAL,
1479 RTE_FLOW_ERROR_TYPE_ITEM,
1481 "Exceed maximal number of bitmasks");
1483 } else if (ret == -2) {
1484 rte_flow_error_set(error, EINVAL,
1485 RTE_FLOW_ERROR_TYPE_ITEM,
1487 "Conflict with the first flexible rule");
1490 cfg_flex_msk = false;
1493 i40e_flow_set_fdir_flex_pit(pf, layer_idx, raw_id);
1496 i40e_flow_set_fdir_flex_msk(pf, pctype);
1501 /* Parse to get the action info of a FDIR filter.
1502 * FDIR action supports QUEUE or (QUEUE + MARK).
1505 i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
1506 const struct rte_flow_action *actions,
1507 struct rte_flow_error *error,
1508 struct rte_eth_fdir_filter *filter)
1510 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1511 const struct rte_flow_action *act;
1512 const struct rte_flow_action_queue *act_q;
1513 const struct rte_flow_action_mark *mark_spec;
1516 /* Check if the first non-void action is QUEUE or DROP or PASSTHRU. */
1517 NEXT_ITEM_OF_ACTION(act, actions, index);
1518 switch (act->type) {
1519 case RTE_FLOW_ACTION_TYPE_QUEUE:
1520 act_q = (const struct rte_flow_action_queue *)act->conf;
1521 filter->action.rx_queue = act_q->index;
1522 if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
1523 rte_flow_error_set(error, EINVAL,
1524 RTE_FLOW_ERROR_TYPE_ACTION, act,
1525 "Invalid queue ID for FDIR.");
1528 filter->action.behavior = RTE_ETH_FDIR_ACCEPT;
1530 case RTE_FLOW_ACTION_TYPE_DROP:
1531 filter->action.behavior = RTE_ETH_FDIR_REJECT;
1533 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
1534 filter->action.behavior = RTE_ETH_FDIR_PASSTHRU;
1537 rte_flow_error_set(error, EINVAL,
1538 RTE_FLOW_ERROR_TYPE_ACTION, act,
1543 /* Check if the next non-void item is MARK or FLAG or END. */
1545 NEXT_ITEM_OF_ACTION(act, actions, index);
1546 switch (act->type) {
1547 case RTE_FLOW_ACTION_TYPE_MARK:
1548 mark_spec = (const struct rte_flow_action_mark *)act->conf;
1549 filter->action.report_status = RTE_ETH_FDIR_REPORT_ID;
1550 filter->soft_id = mark_spec->id;
1552 case RTE_FLOW_ACTION_TYPE_FLAG:
1553 filter->action.report_status = RTE_ETH_FDIR_NO_REPORT_STATUS;
1555 case RTE_FLOW_ACTION_TYPE_END:
1558 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1559 act, "Invalid action.");
1563 /* Check if the next non-void item is END */
1565 NEXT_ITEM_OF_ACTION(act, actions, index);
1566 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1567 rte_flow_error_set(error, EINVAL,
1568 RTE_FLOW_ERROR_TYPE_ACTION,
1569 act, "Invalid action.");
1577 i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
1578 const struct rte_flow_attr *attr,
1579 const struct rte_flow_item pattern[],
1580 const struct rte_flow_action actions[],
1581 struct rte_flow_error *error,
1582 union i40e_filter_t *filter)
1584 struct rte_eth_fdir_filter *fdir_filter =
1585 &filter->fdir_filter;
1588 ret = i40e_flow_parse_fdir_pattern(dev, pattern, error, fdir_filter);
1592 ret = i40e_flow_parse_fdir_action(dev, actions, error, fdir_filter);
1596 ret = i40e_flow_parse_attr(attr, error);
1600 cons_filter_type = RTE_ETH_FILTER_FDIR;
1602 if (dev->data->dev_conf.fdir_conf.mode !=
1603 RTE_FDIR_MODE_PERFECT) {
1604 rte_flow_error_set(error, ENOTSUP,
1605 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1607 "Check the mode in fdir_conf.");
1614 /* Parse to get the action info of a tunnel filter
1615 * Tunnel action only supports PF, VF and QUEUE.
1618 i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
1619 const struct rte_flow_action *actions,
1620 struct rte_flow_error *error,
1621 struct i40e_tunnel_filter_conf *filter)
1623 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1624 const struct rte_flow_action *act;
1625 const struct rte_flow_action_queue *act_q;
1626 const struct rte_flow_action_vf *act_vf;
1629 /* Check if the first non-void action is PF or VF. */
1630 NEXT_ITEM_OF_ACTION(act, actions, index);
1631 if (act->type != RTE_FLOW_ACTION_TYPE_PF &&
1632 act->type != RTE_FLOW_ACTION_TYPE_VF) {
1633 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1634 act, "Not supported action.");
1638 if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
1639 act_vf = (const struct rte_flow_action_vf *)act->conf;
1640 filter->vf_id = act_vf->id;
1641 filter->is_to_vf = 1;
1642 if (filter->vf_id >= pf->vf_num) {
1643 rte_flow_error_set(error, EINVAL,
1644 RTE_FLOW_ERROR_TYPE_ACTION,
1645 act, "Invalid VF ID for tunnel filter");
1650 /* Check if the next non-void item is QUEUE */
1652 NEXT_ITEM_OF_ACTION(act, actions, index);
1653 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1654 act_q = (const struct rte_flow_action_queue *)act->conf;
1655 filter->queue_id = act_q->index;
1656 if ((!filter->is_to_vf) &&
1657 (filter->queue_id >= pf->dev_data->nb_rx_queues)) {
1658 rte_flow_error_set(error, EINVAL,
1659 RTE_FLOW_ERROR_TYPE_ACTION,
1660 act, "Invalid queue ID for tunnel filter");
1662 } else if (filter->is_to_vf &&
1663 (filter->queue_id >= pf->vf_nb_qps)) {
1664 rte_flow_error_set(error, EINVAL,
1665 RTE_FLOW_ERROR_TYPE_ACTION,
1666 act, "Invalid queue ID for tunnel filter");
1671 /* Check if the next non-void item is END */
1673 NEXT_ITEM_OF_ACTION(act, actions, index);
1674 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1675 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1676 act, "Not supported action.");
1683 static uint16_t i40e_supported_tunnel_filter_types[] = {
1684 ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID |
1685 ETH_TUNNEL_FILTER_IVLAN,
1686 ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
1687 ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID,
1688 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID |
1689 ETH_TUNNEL_FILTER_IMAC,
1690 ETH_TUNNEL_FILTER_IMAC,
1694 i40e_check_tunnel_filter_type(uint8_t filter_type)
1698 for (i = 0; i < RTE_DIM(i40e_supported_tunnel_filter_types); i++) {
1699 if (filter_type == i40e_supported_tunnel_filter_types[i])
1706 /* 1. Last in item should be NULL as range is not supported.
1707 * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
1708 * IMAC_TENID, OMAC_TENID_IMAC and IMAC.
1709 * 3. Mask of fields which need to be matched should be
1711 * 4. Mask of fields which needn't to be matched should be
1715 i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
1716 const struct rte_flow_item *pattern,
1717 struct rte_flow_error *error,
1718 struct i40e_tunnel_filter_conf *filter)
1720 const struct rte_flow_item *item = pattern;
1721 const struct rte_flow_item_eth *eth_spec;
1722 const struct rte_flow_item_eth *eth_mask;
1723 const struct rte_flow_item_vxlan *vxlan_spec;
1724 const struct rte_flow_item_vxlan *vxlan_mask;
1725 const struct rte_flow_item_vlan *vlan_spec;
1726 const struct rte_flow_item_vlan *vlan_mask;
1727 uint8_t filter_type = 0;
1728 bool is_vni_masked = 0;
1729 uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
1730 enum rte_flow_item_type item_type;
1731 bool vxlan_flag = 0;
1732 uint32_t tenant_id_be = 0;
1735 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1737 rte_flow_error_set(error, EINVAL,
1738 RTE_FLOW_ERROR_TYPE_ITEM,
1740 "Not support range");
1743 item_type = item->type;
1744 switch (item_type) {
1745 case RTE_FLOW_ITEM_TYPE_ETH:
1746 eth_spec = (const struct rte_flow_item_eth *)item->spec;
1747 eth_mask = (const struct rte_flow_item_eth *)item->mask;
1749 /* Check if ETH item is used for place holder.
1750 * If yes, both spec and mask should be NULL.
1751 * If no, both spec and mask shouldn't be NULL.
1753 if ((!eth_spec && eth_mask) ||
1754 (eth_spec && !eth_mask)) {
1755 rte_flow_error_set(error, EINVAL,
1756 RTE_FLOW_ERROR_TYPE_ITEM,
1758 "Invalid ether spec/mask");
1762 if (eth_spec && eth_mask) {
1763 /* DST address of inner MAC shouldn't be masked.
1764 * SRC address of Inner MAC should be masked.
1766 if (!is_broadcast_ether_addr(ð_mask->dst) ||
1767 !is_zero_ether_addr(ð_mask->src) ||
1769 rte_flow_error_set(error, EINVAL,
1770 RTE_FLOW_ERROR_TYPE_ITEM,
1772 "Invalid ether spec/mask");
1777 rte_memcpy(&filter->outer_mac,
1780 filter_type |= ETH_TUNNEL_FILTER_OMAC;
1782 rte_memcpy(&filter->inner_mac,
1785 filter_type |= ETH_TUNNEL_FILTER_IMAC;
1789 case RTE_FLOW_ITEM_TYPE_VLAN:
1791 (const struct rte_flow_item_vlan *)item->spec;
1793 (const struct rte_flow_item_vlan *)item->mask;
1794 if (!(vlan_spec && vlan_mask)) {
1795 rte_flow_error_set(error, EINVAL,
1796 RTE_FLOW_ERROR_TYPE_ITEM,
1798 "Invalid vlan item");
1802 if (vlan_spec && vlan_mask) {
1803 if (vlan_mask->tci ==
1804 rte_cpu_to_be_16(I40E_TCI_MASK))
1805 filter->inner_vlan =
1806 rte_be_to_cpu_16(vlan_spec->tci) &
1808 filter_type |= ETH_TUNNEL_FILTER_IVLAN;
1811 case RTE_FLOW_ITEM_TYPE_IPV4:
1812 filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
1813 /* IPv4 is used to describe protocol,
1814 * spec and mask should be NULL.
1816 if (item->spec || item->mask) {
1817 rte_flow_error_set(error, EINVAL,
1818 RTE_FLOW_ERROR_TYPE_ITEM,
1820 "Invalid IPv4 item");
1824 case RTE_FLOW_ITEM_TYPE_IPV6:
1825 filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
1826 /* IPv6 is used to describe protocol,
1827 * spec and mask should be NULL.
1829 if (item->spec || item->mask) {
1830 rte_flow_error_set(error, EINVAL,
1831 RTE_FLOW_ERROR_TYPE_ITEM,
1833 "Invalid IPv6 item");
1837 case RTE_FLOW_ITEM_TYPE_UDP:
1838 /* UDP is used to describe protocol,
1839 * spec and mask should be NULL.
1841 if (item->spec || item->mask) {
1842 rte_flow_error_set(error, EINVAL,
1843 RTE_FLOW_ERROR_TYPE_ITEM,
1845 "Invalid UDP item");
1849 case RTE_FLOW_ITEM_TYPE_VXLAN:
1851 (const struct rte_flow_item_vxlan *)item->spec;
1853 (const struct rte_flow_item_vxlan *)item->mask;
1854 /* Check if VXLAN item is used to describe protocol.
1855 * If yes, both spec and mask should be NULL.
1856 * If no, both spec and mask shouldn't be NULL.
1858 if ((!vxlan_spec && vxlan_mask) ||
1859 (vxlan_spec && !vxlan_mask)) {
1860 rte_flow_error_set(error, EINVAL,
1861 RTE_FLOW_ERROR_TYPE_ITEM,
1863 "Invalid VXLAN item");
1867 /* Check if VNI is masked. */
1868 if (vxlan_spec && vxlan_mask) {
1870 !!memcmp(vxlan_mask->vni, vni_mask,
1872 if (is_vni_masked) {
1873 rte_flow_error_set(error, EINVAL,
1874 RTE_FLOW_ERROR_TYPE_ITEM,
1876 "Invalid VNI mask");
1880 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
1881 vxlan_spec->vni, 3);
1883 rte_be_to_cpu_32(tenant_id_be);
1884 filter_type |= ETH_TUNNEL_FILTER_TENID;
1894 ret = i40e_check_tunnel_filter_type(filter_type);
1896 rte_flow_error_set(error, EINVAL,
1897 RTE_FLOW_ERROR_TYPE_ITEM,
1899 "Invalid filter type");
1902 filter->filter_type = filter_type;
1904 filter->tunnel_type = I40E_TUNNEL_TYPE_VXLAN;
1910 i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
1911 const struct rte_flow_attr *attr,
1912 const struct rte_flow_item pattern[],
1913 const struct rte_flow_action actions[],
1914 struct rte_flow_error *error,
1915 union i40e_filter_t *filter)
1917 struct i40e_tunnel_filter_conf *tunnel_filter =
1918 &filter->consistent_tunnel_filter;
1921 ret = i40e_flow_parse_vxlan_pattern(dev, pattern,
1922 error, tunnel_filter);
1926 ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
1930 ret = i40e_flow_parse_attr(attr, error);
1934 cons_filter_type = RTE_ETH_FILTER_TUNNEL;
1939 /* 1. Last in item should be NULL as range is not supported.
1940 * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
1941 * IMAC_TENID, OMAC_TENID_IMAC and IMAC.
1942 * 3. Mask of fields which need to be matched should be
1944 * 4. Mask of fields which needn't to be matched should be
1948 i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
1949 const struct rte_flow_item *pattern,
1950 struct rte_flow_error *error,
1951 struct i40e_tunnel_filter_conf *filter)
1953 const struct rte_flow_item *item = pattern;
1954 const struct rte_flow_item_eth *eth_spec;
1955 const struct rte_flow_item_eth *eth_mask;
1956 const struct rte_flow_item_nvgre *nvgre_spec;
1957 const struct rte_flow_item_nvgre *nvgre_mask;
1958 const struct rte_flow_item_vlan *vlan_spec;
1959 const struct rte_flow_item_vlan *vlan_mask;
1960 enum rte_flow_item_type item_type;
1961 uint8_t filter_type = 0;
1962 bool is_tni_masked = 0;
1963 uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
1964 bool nvgre_flag = 0;
1965 uint32_t tenant_id_be = 0;
1968 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1970 rte_flow_error_set(error, EINVAL,
1971 RTE_FLOW_ERROR_TYPE_ITEM,
1973 "Not support range");
1976 item_type = item->type;
1977 switch (item_type) {
1978 case RTE_FLOW_ITEM_TYPE_ETH:
1979 eth_spec = (const struct rte_flow_item_eth *)item->spec;
1980 eth_mask = (const struct rte_flow_item_eth *)item->mask;
1982 /* Check if ETH item is used for place holder.
1983 * If yes, both spec and mask should be NULL.
1984 * If no, both spec and mask shouldn't be NULL.
1986 if ((!eth_spec && eth_mask) ||
1987 (eth_spec && !eth_mask)) {
1988 rte_flow_error_set(error, EINVAL,
1989 RTE_FLOW_ERROR_TYPE_ITEM,
1991 "Invalid ether spec/mask");
1995 if (eth_spec && eth_mask) {
1996 /* DST address of inner MAC shouldn't be masked.
1997 * SRC address of Inner MAC should be masked.
1999 if (!is_broadcast_ether_addr(ð_mask->dst) ||
2000 !is_zero_ether_addr(ð_mask->src) ||
2002 rte_flow_error_set(error, EINVAL,
2003 RTE_FLOW_ERROR_TYPE_ITEM,
2005 "Invalid ether spec/mask");
2010 rte_memcpy(&filter->outer_mac,
2013 filter_type |= ETH_TUNNEL_FILTER_OMAC;
2015 rte_memcpy(&filter->inner_mac,
2018 filter_type |= ETH_TUNNEL_FILTER_IMAC;
2023 case RTE_FLOW_ITEM_TYPE_VLAN:
2025 (const struct rte_flow_item_vlan *)item->spec;
2027 (const struct rte_flow_item_vlan *)item->mask;
2028 if (!(vlan_spec && vlan_mask)) {
2029 rte_flow_error_set(error, EINVAL,
2030 RTE_FLOW_ERROR_TYPE_ITEM,
2032 "Invalid vlan item");
2036 if (vlan_spec && vlan_mask) {
2037 if (vlan_mask->tci ==
2038 rte_cpu_to_be_16(I40E_TCI_MASK))
2039 filter->inner_vlan =
2040 rte_be_to_cpu_16(vlan_spec->tci) &
2042 filter_type |= ETH_TUNNEL_FILTER_IVLAN;
2045 case RTE_FLOW_ITEM_TYPE_IPV4:
2046 filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
2047 /* IPv4 is used to describe protocol,
2048 * spec and mask should be NULL.
2050 if (item->spec || item->mask) {
2051 rte_flow_error_set(error, EINVAL,
2052 RTE_FLOW_ERROR_TYPE_ITEM,
2054 "Invalid IPv4 item");
2058 case RTE_FLOW_ITEM_TYPE_IPV6:
2059 filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
2060 /* IPv6 is used to describe protocol,
2061 * spec and mask should be NULL.
2063 if (item->spec || item->mask) {
2064 rte_flow_error_set(error, EINVAL,
2065 RTE_FLOW_ERROR_TYPE_ITEM,
2067 "Invalid IPv6 item");
2071 case RTE_FLOW_ITEM_TYPE_NVGRE:
2073 (const struct rte_flow_item_nvgre *)item->spec;
2075 (const struct rte_flow_item_nvgre *)item->mask;
2076 /* Check if NVGRE item is used to describe protocol.
2077 * If yes, both spec and mask should be NULL.
2078 * If no, both spec and mask shouldn't be NULL.
2080 if ((!nvgre_spec && nvgre_mask) ||
2081 (nvgre_spec && !nvgre_mask)) {
2082 rte_flow_error_set(error, EINVAL,
2083 RTE_FLOW_ERROR_TYPE_ITEM,
2085 "Invalid NVGRE item");
2089 if (nvgre_spec && nvgre_mask) {
2091 !!memcmp(nvgre_mask->tni, tni_mask,
2093 if (is_tni_masked) {
2094 rte_flow_error_set(error, EINVAL,
2095 RTE_FLOW_ERROR_TYPE_ITEM,
2097 "Invalid TNI mask");
2100 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
2101 nvgre_spec->tni, 3);
2103 rte_be_to_cpu_32(tenant_id_be);
2104 filter_type |= ETH_TUNNEL_FILTER_TENID;
2114 ret = i40e_check_tunnel_filter_type(filter_type);
2116 rte_flow_error_set(error, EINVAL,
2117 RTE_FLOW_ERROR_TYPE_ITEM,
2119 "Invalid filter type");
2122 filter->filter_type = filter_type;
2124 filter->tunnel_type = I40E_TUNNEL_TYPE_NVGRE;
2130 i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
2131 const struct rte_flow_attr *attr,
2132 const struct rte_flow_item pattern[],
2133 const struct rte_flow_action actions[],
2134 struct rte_flow_error *error,
2135 union i40e_filter_t *filter)
2137 struct i40e_tunnel_filter_conf *tunnel_filter =
2138 &filter->consistent_tunnel_filter;
2141 ret = i40e_flow_parse_nvgre_pattern(dev, pattern,
2142 error, tunnel_filter);
2146 ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
2150 ret = i40e_flow_parse_attr(attr, error);
2154 cons_filter_type = RTE_ETH_FILTER_TUNNEL;
2159 /* 1. Last in item should be NULL as range is not supported.
2160 * 2. Supported filter types: MPLS label.
2161 * 3. Mask of fields which need to be matched should be
2163 * 4. Mask of fields which needn't to be matched should be
2167 i40e_flow_parse_mpls_pattern(__rte_unused struct rte_eth_dev *dev,
2168 const struct rte_flow_item *pattern,
2169 struct rte_flow_error *error,
2170 struct i40e_tunnel_filter_conf *filter)
2172 const struct rte_flow_item *item = pattern;
2173 const struct rte_flow_item_mpls *mpls_spec;
2174 const struct rte_flow_item_mpls *mpls_mask;
2175 enum rte_flow_item_type item_type;
2176 bool is_mplsoudp = 0; /* 1 - MPLSoUDP, 0 - MPLSoGRE */
2177 const uint8_t label_mask[3] = {0xFF, 0xFF, 0xF0};
2178 uint32_t label_be = 0;
2180 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
2182 rte_flow_error_set(error, EINVAL,
2183 RTE_FLOW_ERROR_TYPE_ITEM,
2185 "Not support range");
2188 item_type = item->type;
2189 switch (item_type) {
2190 case RTE_FLOW_ITEM_TYPE_ETH:
2191 if (item->spec || item->mask) {
2192 rte_flow_error_set(error, EINVAL,
2193 RTE_FLOW_ERROR_TYPE_ITEM,
2195 "Invalid ETH item");
2199 case RTE_FLOW_ITEM_TYPE_IPV4:
2200 filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
2201 /* IPv4 is used to describe protocol,
2202 * spec and mask should be NULL.
2204 if (item->spec || item->mask) {
2205 rte_flow_error_set(error, EINVAL,
2206 RTE_FLOW_ERROR_TYPE_ITEM,
2208 "Invalid IPv4 item");
2212 case RTE_FLOW_ITEM_TYPE_IPV6:
2213 filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
2214 /* IPv6 is used to describe protocol,
2215 * spec and mask should be NULL.
2217 if (item->spec || item->mask) {
2218 rte_flow_error_set(error, EINVAL,
2219 RTE_FLOW_ERROR_TYPE_ITEM,
2221 "Invalid IPv6 item");
2225 case RTE_FLOW_ITEM_TYPE_UDP:
2226 /* UDP is used to describe protocol,
2227 * spec and mask should be NULL.
2229 if (item->spec || item->mask) {
2230 rte_flow_error_set(error, EINVAL,
2231 RTE_FLOW_ERROR_TYPE_ITEM,
2233 "Invalid UDP item");
2238 case RTE_FLOW_ITEM_TYPE_GRE:
2239 /* GRE is used to describe protocol,
2240 * spec and mask should be NULL.
2242 if (item->spec || item->mask) {
2243 rte_flow_error_set(error, EINVAL,
2244 RTE_FLOW_ERROR_TYPE_ITEM,
2246 "Invalid GRE item");
2250 case RTE_FLOW_ITEM_TYPE_MPLS:
2252 (const struct rte_flow_item_mpls *)item->spec;
2254 (const struct rte_flow_item_mpls *)item->mask;
2256 if (!mpls_spec || !mpls_mask) {
2257 rte_flow_error_set(error, EINVAL,
2258 RTE_FLOW_ERROR_TYPE_ITEM,
2260 "Invalid MPLS item");
2264 if (memcmp(mpls_mask->label_tc_s, label_mask, 3)) {
2265 rte_flow_error_set(error, EINVAL,
2266 RTE_FLOW_ERROR_TYPE_ITEM,
2268 "Invalid MPLS label mask");
2271 rte_memcpy(((uint8_t *)&label_be + 1),
2272 mpls_spec->label_tc_s, 3);
2273 filter->tenant_id = rte_be_to_cpu_32(label_be) >> 4;
2281 filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoUDP;
2283 filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoGRE;
2289 i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
2290 const struct rte_flow_attr *attr,
2291 const struct rte_flow_item pattern[],
2292 const struct rte_flow_action actions[],
2293 struct rte_flow_error *error,
2294 union i40e_filter_t *filter)
2296 struct i40e_tunnel_filter_conf *tunnel_filter =
2297 &filter->consistent_tunnel_filter;
2300 ret = i40e_flow_parse_mpls_pattern(dev, pattern,
2301 error, tunnel_filter);
2305 ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
2309 ret = i40e_flow_parse_attr(attr, error);
2313 cons_filter_type = RTE_ETH_FILTER_TUNNEL;
2318 /* 1. Last in item should be NULL as range is not supported.
2319 * 2. Supported filter types: QINQ.
2320 * 3. Mask of fields which need to be matched should be
2322 * 4. Mask of fields which needn't to be matched should be
2326 i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev,
2327 const struct rte_flow_item *pattern,
2328 struct rte_flow_error *error,
2329 struct i40e_tunnel_filter_conf *filter)
2331 const struct rte_flow_item *item = pattern;
2332 const struct rte_flow_item_vlan *vlan_spec = NULL;
2333 const struct rte_flow_item_vlan *vlan_mask = NULL;
2334 const struct rte_flow_item_vlan *i_vlan_spec = NULL;
2335 const struct rte_flow_item_vlan *i_vlan_mask = NULL;
2336 const struct rte_flow_item_vlan *o_vlan_spec = NULL;
2337 const struct rte_flow_item_vlan *o_vlan_mask = NULL;
2339 enum rte_flow_item_type item_type;
2342 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
2344 rte_flow_error_set(error, EINVAL,
2345 RTE_FLOW_ERROR_TYPE_ITEM,
2347 "Not support range");
2350 item_type = item->type;
2351 switch (item_type) {
2352 case RTE_FLOW_ITEM_TYPE_ETH:
2353 if (item->spec || item->mask) {
2354 rte_flow_error_set(error, EINVAL,
2355 RTE_FLOW_ERROR_TYPE_ITEM,
2357 "Invalid ETH item");
2361 case RTE_FLOW_ITEM_TYPE_VLAN:
2363 (const struct rte_flow_item_vlan *)item->spec;
2365 (const struct rte_flow_item_vlan *)item->mask;
2367 if (!(vlan_spec && vlan_mask)) {
2368 rte_flow_error_set(error, EINVAL,
2369 RTE_FLOW_ERROR_TYPE_ITEM,
2371 "Invalid vlan item");
2376 o_vlan_spec = vlan_spec;
2377 o_vlan_mask = vlan_mask;
2380 i_vlan_spec = vlan_spec;
2381 i_vlan_mask = vlan_mask;
2391 /* Get filter specification */
2392 if ((o_vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK)) &&
2393 (i_vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
2394 filter->outer_vlan = rte_be_to_cpu_16(o_vlan_spec->tci)
2396 filter->inner_vlan = rte_be_to_cpu_16(i_vlan_spec->tci)
2399 rte_flow_error_set(error, EINVAL,
2400 RTE_FLOW_ERROR_TYPE_ITEM,
2402 "Invalid filter type");
2406 filter->tunnel_type = I40E_TUNNEL_TYPE_QINQ;
2411 i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
2412 const struct rte_flow_attr *attr,
2413 const struct rte_flow_item pattern[],
2414 const struct rte_flow_action actions[],
2415 struct rte_flow_error *error,
2416 union i40e_filter_t *filter)
2418 struct i40e_tunnel_filter_conf *tunnel_filter =
2419 &filter->consistent_tunnel_filter;
2422 ret = i40e_flow_parse_qinq_pattern(dev, pattern,
2423 error, tunnel_filter);
2427 ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
2431 ret = i40e_flow_parse_attr(attr, error);
2435 cons_filter_type = RTE_ETH_FILTER_TUNNEL;
2441 i40e_flow_validate(struct rte_eth_dev *dev,
2442 const struct rte_flow_attr *attr,
2443 const struct rte_flow_item pattern[],
2444 const struct rte_flow_action actions[],
2445 struct rte_flow_error *error)
2447 struct rte_flow_item *items; /* internal pattern w/o VOID items */
2448 parse_filter_t parse_filter;
2449 uint32_t item_num = 0; /* non-void item number of pattern*/
2454 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2455 NULL, "NULL pattern.");
2460 rte_flow_error_set(error, EINVAL,
2461 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2462 NULL, "NULL action.");
2467 rte_flow_error_set(error, EINVAL,
2468 RTE_FLOW_ERROR_TYPE_ATTR,
2469 NULL, "NULL attribute.");
2473 memset(&cons_filter, 0, sizeof(cons_filter));
2475 /* Get the non-void item number of pattern */
2476 while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
2477 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
2483 items = rte_zmalloc("i40e_pattern",
2484 item_num * sizeof(struct rte_flow_item), 0);
2486 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2487 NULL, "No memory for PMD internal items.");
2491 i40e_pattern_skip_void_item(items, pattern);
2493 /* Find if there's matched parse filter function */
2494 parse_filter = i40e_find_parse_filter_func(items);
2495 if (!parse_filter) {
2496 rte_flow_error_set(error, EINVAL,
2497 RTE_FLOW_ERROR_TYPE_ITEM,
2498 pattern, "Unsupported pattern");
2502 ret = parse_filter(dev, attr, items, actions, error, &cons_filter);
2509 static struct rte_flow *
2510 i40e_flow_create(struct rte_eth_dev *dev,
2511 const struct rte_flow_attr *attr,
2512 const struct rte_flow_item pattern[],
2513 const struct rte_flow_action actions[],
2514 struct rte_flow_error *error)
2516 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2517 struct rte_flow *flow;
2520 flow = rte_zmalloc("i40e_flow", sizeof(struct rte_flow), 0);
2522 rte_flow_error_set(error, ENOMEM,
2523 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2524 "Failed to allocate memory");
2528 ret = i40e_flow_validate(dev, attr, pattern, actions, error);
2532 switch (cons_filter_type) {
2533 case RTE_ETH_FILTER_ETHERTYPE:
2534 ret = i40e_ethertype_filter_set(pf,
2535 &cons_filter.ethertype_filter, 1);
2538 flow->rule = TAILQ_LAST(&pf->ethertype.ethertype_list,
2539 i40e_ethertype_filter_list);
2541 case RTE_ETH_FILTER_FDIR:
2542 ret = i40e_add_del_fdir_filter(dev,
2543 &cons_filter.fdir_filter, 1);
2546 flow->rule = TAILQ_LAST(&pf->fdir.fdir_list,
2547 i40e_fdir_filter_list);
2549 case RTE_ETH_FILTER_TUNNEL:
2550 ret = i40e_dev_consistent_tunnel_filter_set(pf,
2551 &cons_filter.consistent_tunnel_filter, 1);
2554 flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
2555 i40e_tunnel_filter_list);
2561 flow->filter_type = cons_filter_type;
2562 TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
2566 rte_flow_error_set(error, -ret,
2567 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2568 "Failed to create flow.");
2574 i40e_flow_destroy(struct rte_eth_dev *dev,
2575 struct rte_flow *flow,
2576 struct rte_flow_error *error)
2578 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2579 enum rte_filter_type filter_type = flow->filter_type;
2582 switch (filter_type) {
2583 case RTE_ETH_FILTER_ETHERTYPE:
2584 ret = i40e_flow_destroy_ethertype_filter(pf,
2585 (struct i40e_ethertype_filter *)flow->rule);
2587 case RTE_ETH_FILTER_TUNNEL:
2588 ret = i40e_flow_destroy_tunnel_filter(pf,
2589 (struct i40e_tunnel_filter *)flow->rule);
2591 case RTE_ETH_FILTER_FDIR:
2592 ret = i40e_add_del_fdir_filter(dev,
2593 &((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
2596 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
2603 TAILQ_REMOVE(&pf->flow_list, flow, node);
2606 rte_flow_error_set(error, -ret,
2607 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2608 "Failed to destroy flow.");
2614 i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
2615 struct i40e_ethertype_filter *filter)
2617 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2618 struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
2619 struct i40e_ethertype_filter *node;
2620 struct i40e_control_filter_stats stats;
2624 if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
2625 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
2626 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
2627 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
2628 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
2630 memset(&stats, 0, sizeof(stats));
2631 ret = i40e_aq_add_rem_control_packet_filter(hw,
2632 filter->input.mac_addr.addr_bytes,
2633 filter->input.ether_type,
2634 flags, pf->main_vsi->seid,
2635 filter->queue, 0, &stats, NULL);
2639 node = i40e_sw_ethertype_filter_lookup(ethertype_rule, &filter->input);
2643 ret = i40e_sw_ethertype_filter_del(pf, &node->input);
2649 i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
2650 struct i40e_tunnel_filter *filter)
2652 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2653 struct i40e_vsi *vsi;
2654 struct i40e_pf_vf *vf;
2655 struct i40e_aqc_add_rm_cloud_filt_elem_ext cld_filter;
2656 struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
2657 struct i40e_tunnel_filter *node;
2658 bool big_buffer = 0;
2661 memset(&cld_filter, 0, sizeof(cld_filter));
2662 ether_addr_copy((struct ether_addr *)&filter->input.outer_mac,
2663 (struct ether_addr *)&cld_filter.element.outer_mac);
2664 ether_addr_copy((struct ether_addr *)&filter->input.inner_mac,
2665 (struct ether_addr *)&cld_filter.element.inner_mac);
2666 cld_filter.element.inner_vlan = filter->input.inner_vlan;
2667 cld_filter.element.flags = filter->input.flags;
2668 cld_filter.element.tenant_id = filter->input.tenant_id;
2669 cld_filter.element.queue_number = filter->queue;
2670 rte_memcpy(cld_filter.general_fields,
2671 filter->input.general_fields,
2672 sizeof(cld_filter.general_fields));
2674 if (!filter->is_to_vf)
2677 vf = &pf->vfs[filter->vf_id];
2681 if (((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ==
2682 I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ||
2683 ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ==
2684 I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ||
2685 ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ) ==
2686 I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ))
2690 ret = i40e_aq_remove_cloud_filters_big_buffer(hw, vsi->seid,
2693 ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
2694 &cld_filter.element, 1);
2698 node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &filter->input);
2702 ret = i40e_sw_tunnel_filter_del(pf, &node->input);
2708 i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
2710 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2713 ret = i40e_flow_flush_fdir_filter(pf);
2715 rte_flow_error_set(error, -ret,
2716 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2717 "Failed to flush FDIR flows.");
2721 ret = i40e_flow_flush_ethertype_filter(pf);
2723 rte_flow_error_set(error, -ret,
2724 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2725 "Failed to ethertype flush flows.");
2729 ret = i40e_flow_flush_tunnel_filter(pf);
2731 rte_flow_error_set(error, -ret,
2732 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2733 "Failed to flush tunnel flows.");
2741 i40e_flow_flush_fdir_filter(struct i40e_pf *pf)
2743 struct rte_eth_dev *dev = pf->adapter->eth_dev;
2744 struct i40e_fdir_info *fdir_info = &pf->fdir;
2745 struct i40e_fdir_filter *fdir_filter;
2746 struct rte_flow *flow;
2750 ret = i40e_fdir_flush(dev);
2752 /* Delete FDIR filters in FDIR list. */
2753 while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
2754 ret = i40e_sw_fdir_filter_del(pf,
2755 &fdir_filter->fdir.input);
2760 /* Delete FDIR flows in flow list. */
2761 TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
2762 if (flow->filter_type == RTE_ETH_FILTER_FDIR) {
2763 TAILQ_REMOVE(&pf->flow_list, flow, node);
2772 /* Flush all ethertype filters */
2774 i40e_flow_flush_ethertype_filter(struct i40e_pf *pf)
2776 struct i40e_ethertype_filter_list
2777 *ethertype_list = &pf->ethertype.ethertype_list;
2778 struct i40e_ethertype_filter *filter;
2779 struct rte_flow *flow;
2783 while ((filter = TAILQ_FIRST(ethertype_list))) {
2784 ret = i40e_flow_destroy_ethertype_filter(pf, filter);
2789 /* Delete ethertype flows in flow list. */
2790 TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
2791 if (flow->filter_type == RTE_ETH_FILTER_ETHERTYPE) {
2792 TAILQ_REMOVE(&pf->flow_list, flow, node);
2800 /* Flush all tunnel filters */
2802 i40e_flow_flush_tunnel_filter(struct i40e_pf *pf)
2804 struct i40e_tunnel_filter_list
2805 *tunnel_list = &pf->tunnel.tunnel_list;
2806 struct i40e_tunnel_filter *filter;
2807 struct rte_flow *flow;
2811 while ((filter = TAILQ_FIRST(tunnel_list))) {
2812 ret = i40e_flow_destroy_tunnel_filter(pf, filter);
2817 /* Delete tunnel flows in flow list. */
2818 TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
2819 if (flow->filter_type == RTE_ETH_FILTER_TUNNEL) {
2820 TAILQ_REMOVE(&pf->flow_list, flow, node);