4 * Copyright (c) 2016-2017 Intel Corporation. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
10 * * Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * * Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the
16 * * Neither the name of Intel Corporation nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
24 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
25 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
26 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
27 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
28 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
29 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
30 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/queue.h>
41 #include <rte_ether.h>
42 #include <rte_ethdev.h>
44 #include <rte_memzone.h>
45 #include <rte_malloc.h>
46 #include <rte_eth_ctrl.h>
47 #include <rte_tailq.h>
48 #include <rte_flow_driver.h>
50 #include "i40e_logs.h"
51 #include "base/i40e_type.h"
52 #include "base/i40e_prototype.h"
53 #include "i40e_ethdev.h"
55 #define I40E_IPV4_TC_SHIFT 4
56 #define I40E_IPV6_TC_MASK (0x00FF << I40E_IPV4_TC_SHIFT)
57 #define I40E_IPV6_FRAG_HEADER 44
58 #define I40E_TENANT_ARRAY_NUM 3
59 #define I40E_TCI_MASK 0xFFFF
61 static int i40e_flow_validate(struct rte_eth_dev *dev,
62 const struct rte_flow_attr *attr,
63 const struct rte_flow_item pattern[],
64 const struct rte_flow_action actions[],
65 struct rte_flow_error *error);
66 static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
67 const struct rte_flow_attr *attr,
68 const struct rte_flow_item pattern[],
69 const struct rte_flow_action actions[],
70 struct rte_flow_error *error);
71 static int i40e_flow_destroy(struct rte_eth_dev *dev,
72 struct rte_flow *flow,
73 struct rte_flow_error *error);
74 static int i40e_flow_flush(struct rte_eth_dev *dev,
75 struct rte_flow_error *error);
77 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
78 const struct rte_flow_item *pattern,
79 struct rte_flow_error *error,
80 struct rte_eth_ethertype_filter *filter);
81 static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
82 const struct rte_flow_action *actions,
83 struct rte_flow_error *error,
84 struct rte_eth_ethertype_filter *filter);
85 static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
86 const struct rte_flow_item *pattern,
87 struct rte_flow_error *error,
88 struct rte_eth_fdir_filter *filter);
89 static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
90 const struct rte_flow_action *actions,
91 struct rte_flow_error *error,
92 struct rte_eth_fdir_filter *filter);
93 static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
94 const struct rte_flow_action *actions,
95 struct rte_flow_error *error,
96 struct i40e_tunnel_filter_conf *filter);
97 static int i40e_flow_parse_attr(const struct rte_flow_attr *attr,
98 struct rte_flow_error *error);
99 static int i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
100 const struct rte_flow_attr *attr,
101 const struct rte_flow_item pattern[],
102 const struct rte_flow_action actions[],
103 struct rte_flow_error *error,
104 union i40e_filter_t *filter);
105 static int i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
106 const struct rte_flow_attr *attr,
107 const struct rte_flow_item pattern[],
108 const struct rte_flow_action actions[],
109 struct rte_flow_error *error,
110 union i40e_filter_t *filter);
111 static int i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
112 const struct rte_flow_attr *attr,
113 const struct rte_flow_item pattern[],
114 const struct rte_flow_action actions[],
115 struct rte_flow_error *error,
116 union i40e_filter_t *filter);
117 static int i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
118 const struct rte_flow_attr *attr,
119 const struct rte_flow_item pattern[],
120 const struct rte_flow_action actions[],
121 struct rte_flow_error *error,
122 union i40e_filter_t *filter);
123 static int i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
124 const struct rte_flow_attr *attr,
125 const struct rte_flow_item pattern[],
126 const struct rte_flow_action actions[],
127 struct rte_flow_error *error,
128 union i40e_filter_t *filter);
129 static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
130 struct i40e_ethertype_filter *filter);
131 static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
132 struct i40e_tunnel_filter *filter);
133 static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);
134 static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf);
135 static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf);
137 i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
138 const struct rte_flow_attr *attr,
139 const struct rte_flow_item pattern[],
140 const struct rte_flow_action actions[],
141 struct rte_flow_error *error,
142 union i40e_filter_t *filter);
144 i40e_flow_parse_qinq_pattern(struct rte_eth_dev *dev,
145 const struct rte_flow_item *pattern,
146 struct rte_flow_error *error,
147 struct i40e_tunnel_filter_conf *filter);
149 const struct rte_flow_ops i40e_flow_ops = {
150 .validate = i40e_flow_validate,
151 .create = i40e_flow_create,
152 .destroy = i40e_flow_destroy,
153 .flush = i40e_flow_flush,
156 union i40e_filter_t cons_filter;
157 enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
159 /* Pattern matched ethertype filter */
160 static enum rte_flow_item_type pattern_ethertype[] = {
161 RTE_FLOW_ITEM_TYPE_ETH,
162 RTE_FLOW_ITEM_TYPE_END,
165 /* Pattern matched flow director filter */
166 static enum rte_flow_item_type pattern_fdir_ipv4[] = {
167 RTE_FLOW_ITEM_TYPE_IPV4,
168 RTE_FLOW_ITEM_TYPE_END,
171 static enum rte_flow_item_type pattern_fdir_ipv4_ext[] = {
172 RTE_FLOW_ITEM_TYPE_ETH,
173 RTE_FLOW_ITEM_TYPE_IPV4,
174 RTE_FLOW_ITEM_TYPE_END,
177 static enum rte_flow_item_type pattern_fdir_ipv4_udp[] = {
178 RTE_FLOW_ITEM_TYPE_IPV4,
179 RTE_FLOW_ITEM_TYPE_UDP,
180 RTE_FLOW_ITEM_TYPE_END,
183 static enum rte_flow_item_type pattern_fdir_ipv4_udp_ext[] = {
184 RTE_FLOW_ITEM_TYPE_ETH,
185 RTE_FLOW_ITEM_TYPE_IPV4,
186 RTE_FLOW_ITEM_TYPE_UDP,
187 RTE_FLOW_ITEM_TYPE_END,
190 static enum rte_flow_item_type pattern_fdir_ipv4_tcp[] = {
191 RTE_FLOW_ITEM_TYPE_IPV4,
192 RTE_FLOW_ITEM_TYPE_TCP,
193 RTE_FLOW_ITEM_TYPE_END,
196 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_ext[] = {
197 RTE_FLOW_ITEM_TYPE_ETH,
198 RTE_FLOW_ITEM_TYPE_IPV4,
199 RTE_FLOW_ITEM_TYPE_TCP,
200 RTE_FLOW_ITEM_TYPE_END,
203 static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
204 RTE_FLOW_ITEM_TYPE_IPV4,
205 RTE_FLOW_ITEM_TYPE_SCTP,
206 RTE_FLOW_ITEM_TYPE_END,
209 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_ext[] = {
210 RTE_FLOW_ITEM_TYPE_ETH,
211 RTE_FLOW_ITEM_TYPE_IPV4,
212 RTE_FLOW_ITEM_TYPE_SCTP,
213 RTE_FLOW_ITEM_TYPE_END,
216 static enum rte_flow_item_type pattern_fdir_ipv6[] = {
217 RTE_FLOW_ITEM_TYPE_IPV6,
218 RTE_FLOW_ITEM_TYPE_END,
221 static enum rte_flow_item_type pattern_fdir_ipv6_ext[] = {
222 RTE_FLOW_ITEM_TYPE_ETH,
223 RTE_FLOW_ITEM_TYPE_IPV6,
224 RTE_FLOW_ITEM_TYPE_END,
227 static enum rte_flow_item_type pattern_fdir_ipv6_udp[] = {
228 RTE_FLOW_ITEM_TYPE_IPV6,
229 RTE_FLOW_ITEM_TYPE_UDP,
230 RTE_FLOW_ITEM_TYPE_END,
233 static enum rte_flow_item_type pattern_fdir_ipv6_udp_ext[] = {
234 RTE_FLOW_ITEM_TYPE_ETH,
235 RTE_FLOW_ITEM_TYPE_IPV6,
236 RTE_FLOW_ITEM_TYPE_UDP,
237 RTE_FLOW_ITEM_TYPE_END,
240 static enum rte_flow_item_type pattern_fdir_ipv6_tcp[] = {
241 RTE_FLOW_ITEM_TYPE_IPV6,
242 RTE_FLOW_ITEM_TYPE_TCP,
243 RTE_FLOW_ITEM_TYPE_END,
246 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_ext[] = {
247 RTE_FLOW_ITEM_TYPE_ETH,
248 RTE_FLOW_ITEM_TYPE_IPV6,
249 RTE_FLOW_ITEM_TYPE_TCP,
250 RTE_FLOW_ITEM_TYPE_END,
253 static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
254 RTE_FLOW_ITEM_TYPE_IPV6,
255 RTE_FLOW_ITEM_TYPE_SCTP,
256 RTE_FLOW_ITEM_TYPE_END,
259 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_ext[] = {
260 RTE_FLOW_ITEM_TYPE_ETH,
261 RTE_FLOW_ITEM_TYPE_IPV6,
262 RTE_FLOW_ITEM_TYPE_SCTP,
263 RTE_FLOW_ITEM_TYPE_END,
266 /* Pattern matched tunnel filter */
267 static enum rte_flow_item_type pattern_vxlan_1[] = {
268 RTE_FLOW_ITEM_TYPE_ETH,
269 RTE_FLOW_ITEM_TYPE_IPV4,
270 RTE_FLOW_ITEM_TYPE_UDP,
271 RTE_FLOW_ITEM_TYPE_VXLAN,
272 RTE_FLOW_ITEM_TYPE_ETH,
273 RTE_FLOW_ITEM_TYPE_END,
276 static enum rte_flow_item_type pattern_vxlan_2[] = {
277 RTE_FLOW_ITEM_TYPE_ETH,
278 RTE_FLOW_ITEM_TYPE_IPV6,
279 RTE_FLOW_ITEM_TYPE_UDP,
280 RTE_FLOW_ITEM_TYPE_VXLAN,
281 RTE_FLOW_ITEM_TYPE_ETH,
282 RTE_FLOW_ITEM_TYPE_END,
285 static enum rte_flow_item_type pattern_vxlan_3[] = {
286 RTE_FLOW_ITEM_TYPE_ETH,
287 RTE_FLOW_ITEM_TYPE_IPV4,
288 RTE_FLOW_ITEM_TYPE_UDP,
289 RTE_FLOW_ITEM_TYPE_VXLAN,
290 RTE_FLOW_ITEM_TYPE_ETH,
291 RTE_FLOW_ITEM_TYPE_VLAN,
292 RTE_FLOW_ITEM_TYPE_END,
295 static enum rte_flow_item_type pattern_vxlan_4[] = {
296 RTE_FLOW_ITEM_TYPE_ETH,
297 RTE_FLOW_ITEM_TYPE_IPV6,
298 RTE_FLOW_ITEM_TYPE_UDP,
299 RTE_FLOW_ITEM_TYPE_VXLAN,
300 RTE_FLOW_ITEM_TYPE_ETH,
301 RTE_FLOW_ITEM_TYPE_VLAN,
302 RTE_FLOW_ITEM_TYPE_END,
305 static enum rte_flow_item_type pattern_nvgre_1[] = {
306 RTE_FLOW_ITEM_TYPE_ETH,
307 RTE_FLOW_ITEM_TYPE_IPV4,
308 RTE_FLOW_ITEM_TYPE_NVGRE,
309 RTE_FLOW_ITEM_TYPE_ETH,
310 RTE_FLOW_ITEM_TYPE_END,
313 static enum rte_flow_item_type pattern_nvgre_2[] = {
314 RTE_FLOW_ITEM_TYPE_ETH,
315 RTE_FLOW_ITEM_TYPE_IPV6,
316 RTE_FLOW_ITEM_TYPE_NVGRE,
317 RTE_FLOW_ITEM_TYPE_ETH,
318 RTE_FLOW_ITEM_TYPE_END,
321 static enum rte_flow_item_type pattern_nvgre_3[] = {
322 RTE_FLOW_ITEM_TYPE_ETH,
323 RTE_FLOW_ITEM_TYPE_IPV4,
324 RTE_FLOW_ITEM_TYPE_NVGRE,
325 RTE_FLOW_ITEM_TYPE_ETH,
326 RTE_FLOW_ITEM_TYPE_VLAN,
327 RTE_FLOW_ITEM_TYPE_END,
330 static enum rte_flow_item_type pattern_nvgre_4[] = {
331 RTE_FLOW_ITEM_TYPE_ETH,
332 RTE_FLOW_ITEM_TYPE_IPV6,
333 RTE_FLOW_ITEM_TYPE_NVGRE,
334 RTE_FLOW_ITEM_TYPE_ETH,
335 RTE_FLOW_ITEM_TYPE_VLAN,
336 RTE_FLOW_ITEM_TYPE_END,
339 static enum rte_flow_item_type pattern_mpls_1[] = {
340 RTE_FLOW_ITEM_TYPE_ETH,
341 RTE_FLOW_ITEM_TYPE_IPV4,
342 RTE_FLOW_ITEM_TYPE_UDP,
343 RTE_FLOW_ITEM_TYPE_MPLS,
344 RTE_FLOW_ITEM_TYPE_END,
347 static enum rte_flow_item_type pattern_mpls_2[] = {
348 RTE_FLOW_ITEM_TYPE_ETH,
349 RTE_FLOW_ITEM_TYPE_IPV6,
350 RTE_FLOW_ITEM_TYPE_UDP,
351 RTE_FLOW_ITEM_TYPE_MPLS,
352 RTE_FLOW_ITEM_TYPE_END,
355 static enum rte_flow_item_type pattern_mpls_3[] = {
356 RTE_FLOW_ITEM_TYPE_ETH,
357 RTE_FLOW_ITEM_TYPE_IPV4,
358 RTE_FLOW_ITEM_TYPE_GRE,
359 RTE_FLOW_ITEM_TYPE_MPLS,
360 RTE_FLOW_ITEM_TYPE_END,
363 static enum rte_flow_item_type pattern_mpls_4[] = {
364 RTE_FLOW_ITEM_TYPE_ETH,
365 RTE_FLOW_ITEM_TYPE_IPV6,
366 RTE_FLOW_ITEM_TYPE_GRE,
367 RTE_FLOW_ITEM_TYPE_MPLS,
368 RTE_FLOW_ITEM_TYPE_END,
371 static enum rte_flow_item_type pattern_qinq_1[] = {
372 RTE_FLOW_ITEM_TYPE_ETH,
373 RTE_FLOW_ITEM_TYPE_VLAN,
374 RTE_FLOW_ITEM_TYPE_VLAN,
375 RTE_FLOW_ITEM_TYPE_END,
378 static struct i40e_valid_pattern i40e_supported_patterns[] = {
380 { pattern_ethertype, i40e_flow_parse_ethertype_filter },
382 { pattern_fdir_ipv4, i40e_flow_parse_fdir_filter },
383 { pattern_fdir_ipv4_ext, i40e_flow_parse_fdir_filter },
384 { pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
385 { pattern_fdir_ipv4_udp_ext, i40e_flow_parse_fdir_filter },
386 { pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
387 { pattern_fdir_ipv4_tcp_ext, i40e_flow_parse_fdir_filter },
388 { pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
389 { pattern_fdir_ipv4_sctp_ext, i40e_flow_parse_fdir_filter },
390 { pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
391 { pattern_fdir_ipv6_ext, i40e_flow_parse_fdir_filter },
392 { pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
393 { pattern_fdir_ipv6_udp_ext, i40e_flow_parse_fdir_filter },
394 { pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
395 { pattern_fdir_ipv6_tcp_ext, i40e_flow_parse_fdir_filter },
396 { pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
397 { pattern_fdir_ipv6_sctp_ext, i40e_flow_parse_fdir_filter },
399 { pattern_vxlan_1, i40e_flow_parse_vxlan_filter },
400 { pattern_vxlan_2, i40e_flow_parse_vxlan_filter },
401 { pattern_vxlan_3, i40e_flow_parse_vxlan_filter },
402 { pattern_vxlan_4, i40e_flow_parse_vxlan_filter },
404 { pattern_nvgre_1, i40e_flow_parse_nvgre_filter },
405 { pattern_nvgre_2, i40e_flow_parse_nvgre_filter },
406 { pattern_nvgre_3, i40e_flow_parse_nvgre_filter },
407 { pattern_nvgre_4, i40e_flow_parse_nvgre_filter },
408 /* MPLSoUDP & MPLSoGRE */
409 { pattern_mpls_1, i40e_flow_parse_mpls_filter },
410 { pattern_mpls_2, i40e_flow_parse_mpls_filter },
411 { pattern_mpls_3, i40e_flow_parse_mpls_filter },
412 { pattern_mpls_4, i40e_flow_parse_mpls_filter },
414 { pattern_qinq_1, i40e_flow_parse_qinq_filter },
417 #define NEXT_ITEM_OF_ACTION(act, actions, index) \
419 act = actions + index; \
420 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) { \
422 act = actions + index; \
426 /* Find the first VOID or non-VOID item pointer */
427 static const struct rte_flow_item *
428 i40e_find_first_item(const struct rte_flow_item *item, bool is_void)
432 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
434 is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
436 is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
444 /* Skip all VOID items of the pattern */
446 i40e_pattern_skip_void_item(struct rte_flow_item *items,
447 const struct rte_flow_item *pattern)
449 uint32_t cpy_count = 0;
450 const struct rte_flow_item *pb = pattern, *pe = pattern;
453 /* Find a non-void item first */
454 pb = i40e_find_first_item(pb, false);
455 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
460 /* Find a void item */
461 pe = i40e_find_first_item(pb + 1, true);
464 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
468 if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
475 /* Copy the END item. */
476 rte_memcpy(items, pe, sizeof(struct rte_flow_item));
479 /* Check if the pattern matches a supported item type array */
481 i40e_match_pattern(enum rte_flow_item_type *item_array,
482 struct rte_flow_item *pattern)
484 struct rte_flow_item *item = pattern;
486 while ((*item_array == item->type) &&
487 (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
492 return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
493 item->type == RTE_FLOW_ITEM_TYPE_END);
496 /* Find if there's parse filter function matched */
497 static parse_filter_t
498 i40e_find_parse_filter_func(struct rte_flow_item *pattern)
500 parse_filter_t parse_filter = NULL;
503 for (; i < RTE_DIM(i40e_supported_patterns); i++) {
504 if (i40e_match_pattern(i40e_supported_patterns[i].items,
506 parse_filter = i40e_supported_patterns[i].parse_filter;
514 /* Parse attributes */
516 i40e_flow_parse_attr(const struct rte_flow_attr *attr,
517 struct rte_flow_error *error)
519 /* Must be input direction */
520 if (!attr->ingress) {
521 rte_flow_error_set(error, EINVAL,
522 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
523 attr, "Only support ingress.");
529 rte_flow_error_set(error, EINVAL,
530 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
531 attr, "Not support egress.");
536 if (attr->priority) {
537 rte_flow_error_set(error, EINVAL,
538 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
539 attr, "Not support priority.");
545 rte_flow_error_set(error, EINVAL,
546 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
547 attr, "Not support group.");
555 i40e_get_outer_vlan(struct rte_eth_dev *dev)
557 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
558 int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
568 i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
571 tpid = (reg_r >> I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT) & 0xFFFF;
576 /* 1. Last in item should be NULL as range is not supported.
577 * 2. Supported filter types: MAC_ETHTYPE and ETHTYPE.
578 * 3. SRC mac_addr mask should be 00:00:00:00:00:00.
579 * 4. DST mac_addr mask should be 00:00:00:00:00:00 or
581 * 5. Ether_type mask should be 0xFFFF.
584 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
585 const struct rte_flow_item *pattern,
586 struct rte_flow_error *error,
587 struct rte_eth_ethertype_filter *filter)
589 const struct rte_flow_item *item = pattern;
590 const struct rte_flow_item_eth *eth_spec;
591 const struct rte_flow_item_eth *eth_mask;
592 enum rte_flow_item_type item_type;
595 outer_tpid = i40e_get_outer_vlan(dev);
597 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
599 rte_flow_error_set(error, EINVAL,
600 RTE_FLOW_ERROR_TYPE_ITEM,
602 "Not support range");
605 item_type = item->type;
607 case RTE_FLOW_ITEM_TYPE_ETH:
608 eth_spec = (const struct rte_flow_item_eth *)item->spec;
609 eth_mask = (const struct rte_flow_item_eth *)item->mask;
610 /* Get the MAC info. */
611 if (!eth_spec || !eth_mask) {
612 rte_flow_error_set(error, EINVAL,
613 RTE_FLOW_ERROR_TYPE_ITEM,
615 "NULL ETH spec/mask");
619 /* Mask bits of source MAC address must be full of 0.
620 * Mask bits of destination MAC address must be full
623 if (!is_zero_ether_addr(ð_mask->src) ||
624 (!is_zero_ether_addr(ð_mask->dst) &&
625 !is_broadcast_ether_addr(ð_mask->dst))) {
626 rte_flow_error_set(error, EINVAL,
627 RTE_FLOW_ERROR_TYPE_ITEM,
629 "Invalid MAC_addr mask");
633 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
634 rte_flow_error_set(error, EINVAL,
635 RTE_FLOW_ERROR_TYPE_ITEM,
637 "Invalid ethertype mask");
641 /* If mask bits of destination MAC address
642 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
644 if (is_broadcast_ether_addr(ð_mask->dst)) {
645 filter->mac_addr = eth_spec->dst;
646 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
648 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
650 filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
652 if (filter->ether_type == ETHER_TYPE_IPv4 ||
653 filter->ether_type == ETHER_TYPE_IPv6 ||
654 filter->ether_type == ETHER_TYPE_LLDP ||
655 filter->ether_type == outer_tpid) {
656 rte_flow_error_set(error, EINVAL,
657 RTE_FLOW_ERROR_TYPE_ITEM,
659 "Unsupported ether_type in"
660 " control packet filter.");
672 /* Ethertype action only supports QUEUE or DROP. */
674 i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
675 const struct rte_flow_action *actions,
676 struct rte_flow_error *error,
677 struct rte_eth_ethertype_filter *filter)
679 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
680 const struct rte_flow_action *act;
681 const struct rte_flow_action_queue *act_q;
684 /* Check if the first non-void action is QUEUE or DROP. */
685 NEXT_ITEM_OF_ACTION(act, actions, index);
686 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
687 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
688 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
689 act, "Not supported action.");
693 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
694 act_q = (const struct rte_flow_action_queue *)act->conf;
695 filter->queue = act_q->index;
696 if (filter->queue >= pf->dev_data->nb_rx_queues) {
697 rte_flow_error_set(error, EINVAL,
698 RTE_FLOW_ERROR_TYPE_ACTION,
699 act, "Invalid queue ID for"
700 " ethertype_filter.");
704 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
707 /* Check if the next non-void item is END */
709 NEXT_ITEM_OF_ACTION(act, actions, index);
710 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
711 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
712 act, "Not supported action.");
720 i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
721 const struct rte_flow_attr *attr,
722 const struct rte_flow_item pattern[],
723 const struct rte_flow_action actions[],
724 struct rte_flow_error *error,
725 union i40e_filter_t *filter)
727 struct rte_eth_ethertype_filter *ethertype_filter =
728 &filter->ethertype_filter;
731 ret = i40e_flow_parse_ethertype_pattern(dev, pattern, error,
736 ret = i40e_flow_parse_ethertype_action(dev, actions, error,
741 ret = i40e_flow_parse_attr(attr, error);
745 cons_filter_type = RTE_ETH_FILTER_ETHERTYPE;
750 /* 1. Last in item should be NULL as range is not supported.
751 * 2. Supported flow type and input set: refer to array
752 * default_inset_table in i40e_ethdev.c.
753 * 3. Mask of fields which need to be matched should be
755 * 4. Mask of fields which needn't to be matched should be
759 i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
760 const struct rte_flow_item *pattern,
761 struct rte_flow_error *error,
762 struct rte_eth_fdir_filter *filter)
764 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
765 const struct rte_flow_item *item = pattern;
766 const struct rte_flow_item_eth *eth_spec, *eth_mask;
767 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
768 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
769 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
770 const struct rte_flow_item_udp *udp_spec, *udp_mask;
771 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
772 const struct rte_flow_item_vf *vf_spec;
773 uint32_t flow_type = RTE_ETH_FLOW_UNKNOWN;
774 enum i40e_filter_pctype pctype;
775 uint64_t input_set = I40E_INSET_NONE;
776 uint16_t flag_offset;
777 enum rte_flow_item_type item_type;
778 enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
781 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
783 rte_flow_error_set(error, EINVAL,
784 RTE_FLOW_ERROR_TYPE_ITEM,
786 "Not support range");
789 item_type = item->type;
791 case RTE_FLOW_ITEM_TYPE_ETH:
792 eth_spec = (const struct rte_flow_item_eth *)item->spec;
793 eth_mask = (const struct rte_flow_item_eth *)item->mask;
794 if (eth_spec || eth_mask) {
795 rte_flow_error_set(error, EINVAL,
796 RTE_FLOW_ERROR_TYPE_ITEM,
798 "Invalid ETH spec/mask");
802 case RTE_FLOW_ITEM_TYPE_IPV4:
803 l3 = RTE_FLOW_ITEM_TYPE_IPV4;
805 (const struct rte_flow_item_ipv4 *)item->spec;
807 (const struct rte_flow_item_ipv4 *)item->mask;
808 if (!ipv4_spec || !ipv4_mask) {
809 rte_flow_error_set(error, EINVAL,
810 RTE_FLOW_ERROR_TYPE_ITEM,
812 "NULL IPv4 spec/mask");
816 /* Check IPv4 mask and update input set */
817 if (ipv4_mask->hdr.version_ihl ||
818 ipv4_mask->hdr.total_length ||
819 ipv4_mask->hdr.packet_id ||
820 ipv4_mask->hdr.fragment_offset ||
821 ipv4_mask->hdr.hdr_checksum) {
822 rte_flow_error_set(error, EINVAL,
823 RTE_FLOW_ERROR_TYPE_ITEM,
825 "Invalid IPv4 mask.");
829 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
830 input_set |= I40E_INSET_IPV4_SRC;
831 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
832 input_set |= I40E_INSET_IPV4_DST;
833 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
834 input_set |= I40E_INSET_IPV4_TOS;
835 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
836 input_set |= I40E_INSET_IPV4_TTL;
837 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
838 input_set |= I40E_INSET_IPV4_PROTO;
840 /* Get filter info */
841 flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER;
842 /* Check if it is fragment. */
844 rte_be_to_cpu_16(ipv4_spec->hdr.fragment_offset);
845 if (flag_offset & IPV4_HDR_OFFSET_MASK ||
846 flag_offset & IPV4_HDR_MF_FLAG)
847 flow_type = RTE_ETH_FLOW_FRAG_IPV4;
849 /* Get the filter info */
850 filter->input.flow.ip4_flow.proto =
851 ipv4_spec->hdr.next_proto_id;
852 filter->input.flow.ip4_flow.tos =
853 ipv4_spec->hdr.type_of_service;
854 filter->input.flow.ip4_flow.ttl =
855 ipv4_spec->hdr.time_to_live;
856 filter->input.flow.ip4_flow.src_ip =
857 ipv4_spec->hdr.src_addr;
858 filter->input.flow.ip4_flow.dst_ip =
859 ipv4_spec->hdr.dst_addr;
862 case RTE_FLOW_ITEM_TYPE_IPV6:
863 l3 = RTE_FLOW_ITEM_TYPE_IPV6;
865 (const struct rte_flow_item_ipv6 *)item->spec;
867 (const struct rte_flow_item_ipv6 *)item->mask;
868 if (!ipv6_spec || !ipv6_mask) {
869 rte_flow_error_set(error, EINVAL,
870 RTE_FLOW_ERROR_TYPE_ITEM,
872 "NULL IPv6 spec/mask");
876 /* Check IPv6 mask and update input set */
877 if (ipv6_mask->hdr.payload_len) {
878 rte_flow_error_set(error, EINVAL,
879 RTE_FLOW_ERROR_TYPE_ITEM,
881 "Invalid IPv6 mask");
885 /* SCR and DST address of IPv6 shouldn't be masked */
886 for (j = 0; j < RTE_DIM(ipv6_mask->hdr.src_addr); j++) {
887 if (ipv6_mask->hdr.src_addr[j] != UINT8_MAX ||
888 ipv6_mask->hdr.dst_addr[j] != UINT8_MAX) {
889 rte_flow_error_set(error, EINVAL,
890 RTE_FLOW_ERROR_TYPE_ITEM,
892 "Invalid IPv6 mask");
897 input_set |= I40E_INSET_IPV6_SRC;
898 input_set |= I40E_INSET_IPV6_DST;
900 if ((ipv6_mask->hdr.vtc_flow &
901 rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
902 == rte_cpu_to_be_16(I40E_IPV6_TC_MASK))
903 input_set |= I40E_INSET_IPV6_TC;
904 if (ipv6_mask->hdr.proto == UINT8_MAX)
905 input_set |= I40E_INSET_IPV6_NEXT_HDR;
906 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
907 input_set |= I40E_INSET_IPV6_HOP_LIMIT;
909 /* Get filter info */
910 filter->input.flow.ipv6_flow.tc =
911 (uint8_t)(ipv6_spec->hdr.vtc_flow <<
913 filter->input.flow.ipv6_flow.proto =
914 ipv6_spec->hdr.proto;
915 filter->input.flow.ipv6_flow.hop_limits =
916 ipv6_spec->hdr.hop_limits;
918 rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
919 ipv6_spec->hdr.src_addr, 16);
920 rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
921 ipv6_spec->hdr.dst_addr, 16);
923 /* Check if it is fragment. */
924 if (ipv6_spec->hdr.proto == I40E_IPV6_FRAG_HEADER)
925 flow_type = RTE_ETH_FLOW_FRAG_IPV6;
927 flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
929 case RTE_FLOW_ITEM_TYPE_TCP:
930 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
931 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
932 if (!tcp_spec || !tcp_mask) {
933 rte_flow_error_set(error, EINVAL,
934 RTE_FLOW_ERROR_TYPE_ITEM,
936 "NULL TCP spec/mask");
940 /* Check TCP mask and update input set */
941 if (tcp_mask->hdr.sent_seq ||
942 tcp_mask->hdr.recv_ack ||
943 tcp_mask->hdr.data_off ||
944 tcp_mask->hdr.tcp_flags ||
945 tcp_mask->hdr.rx_win ||
946 tcp_mask->hdr.cksum ||
947 tcp_mask->hdr.tcp_urp) {
948 rte_flow_error_set(error, EINVAL,
949 RTE_FLOW_ERROR_TYPE_ITEM,
955 if (tcp_mask->hdr.src_port != UINT16_MAX ||
956 tcp_mask->hdr.dst_port != UINT16_MAX) {
957 rte_flow_error_set(error, EINVAL,
958 RTE_FLOW_ERROR_TYPE_ITEM,
964 input_set |= I40E_INSET_SRC_PORT;
965 input_set |= I40E_INSET_DST_PORT;
967 /* Get filter info */
968 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
969 flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
970 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
971 flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_TCP;
973 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
974 filter->input.flow.tcp4_flow.src_port =
975 tcp_spec->hdr.src_port;
976 filter->input.flow.tcp4_flow.dst_port =
977 tcp_spec->hdr.dst_port;
978 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
979 filter->input.flow.tcp6_flow.src_port =
980 tcp_spec->hdr.src_port;
981 filter->input.flow.tcp6_flow.dst_port =
982 tcp_spec->hdr.dst_port;
985 case RTE_FLOW_ITEM_TYPE_UDP:
986 udp_spec = (const struct rte_flow_item_udp *)item->spec;
987 udp_mask = (const struct rte_flow_item_udp *)item->mask;
988 if (!udp_spec || !udp_mask) {
989 rte_flow_error_set(error, EINVAL,
990 RTE_FLOW_ERROR_TYPE_ITEM,
992 "NULL UDP spec/mask");
996 /* Check UDP mask and update input set*/
997 if (udp_mask->hdr.dgram_len ||
998 udp_mask->hdr.dgram_cksum) {
999 rte_flow_error_set(error, EINVAL,
1000 RTE_FLOW_ERROR_TYPE_ITEM,
1002 "Invalid UDP mask");
1006 if (udp_mask->hdr.src_port != UINT16_MAX ||
1007 udp_mask->hdr.dst_port != UINT16_MAX) {
1008 rte_flow_error_set(error, EINVAL,
1009 RTE_FLOW_ERROR_TYPE_ITEM,
1011 "Invalid UDP mask");
1015 input_set |= I40E_INSET_SRC_PORT;
1016 input_set |= I40E_INSET_DST_PORT;
1018 /* Get filter info */
1019 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1021 RTE_ETH_FLOW_NONFRAG_IPV4_UDP;
1022 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1024 RTE_ETH_FLOW_NONFRAG_IPV6_UDP;
1026 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1027 filter->input.flow.udp4_flow.src_port =
1028 udp_spec->hdr.src_port;
1029 filter->input.flow.udp4_flow.dst_port =
1030 udp_spec->hdr.dst_port;
1031 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1032 filter->input.flow.udp6_flow.src_port =
1033 udp_spec->hdr.src_port;
1034 filter->input.flow.udp6_flow.dst_port =
1035 udp_spec->hdr.dst_port;
1038 case RTE_FLOW_ITEM_TYPE_SCTP:
1040 (const struct rte_flow_item_sctp *)item->spec;
1042 (const struct rte_flow_item_sctp *)item->mask;
1043 if (!sctp_spec || !sctp_mask) {
1044 rte_flow_error_set(error, EINVAL,
1045 RTE_FLOW_ERROR_TYPE_ITEM,
1047 "NULL SCTP spec/mask");
1051 /* Check SCTP mask and update input set */
1052 if (sctp_mask->hdr.cksum) {
1053 rte_flow_error_set(error, EINVAL,
1054 RTE_FLOW_ERROR_TYPE_ITEM,
1056 "Invalid UDP mask");
1060 if (sctp_mask->hdr.src_port != UINT16_MAX ||
1061 sctp_mask->hdr.dst_port != UINT16_MAX ||
1062 sctp_mask->hdr.tag != UINT32_MAX) {
1063 rte_flow_error_set(error, EINVAL,
1064 RTE_FLOW_ERROR_TYPE_ITEM,
1066 "Invalid UDP mask");
1069 input_set |= I40E_INSET_SRC_PORT;
1070 input_set |= I40E_INSET_DST_PORT;
1071 input_set |= I40E_INSET_SCTP_VT;
1073 /* Get filter info */
1074 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
1075 flow_type = RTE_ETH_FLOW_NONFRAG_IPV4_SCTP;
1076 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
1077 flow_type = RTE_ETH_FLOW_NONFRAG_IPV6_SCTP;
1079 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
1080 filter->input.flow.sctp4_flow.src_port =
1081 sctp_spec->hdr.src_port;
1082 filter->input.flow.sctp4_flow.dst_port =
1083 sctp_spec->hdr.dst_port;
1084 filter->input.flow.sctp4_flow.verify_tag =
1086 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
1087 filter->input.flow.sctp6_flow.src_port =
1088 sctp_spec->hdr.src_port;
1089 filter->input.flow.sctp6_flow.dst_port =
1090 sctp_spec->hdr.dst_port;
1091 filter->input.flow.sctp6_flow.verify_tag =
1095 case RTE_FLOW_ITEM_TYPE_VF:
1096 vf_spec = (const struct rte_flow_item_vf *)item->spec;
1097 filter->input.flow_ext.is_vf = 1;
1098 filter->input.flow_ext.dst_id = vf_spec->id;
1099 if (filter->input.flow_ext.is_vf &&
1100 filter->input.flow_ext.dst_id >= pf->vf_num) {
1101 rte_flow_error_set(error, EINVAL,
1102 RTE_FLOW_ERROR_TYPE_ITEM,
1104 "Invalid VF ID for FDIR.");
1113 pctype = i40e_flowtype_to_pctype(flow_type);
1114 if (pctype == 0 || pctype > I40E_FILTER_PCTYPE_L2_PAYLOAD) {
1115 rte_flow_error_set(error, EINVAL,
1116 RTE_FLOW_ERROR_TYPE_ITEM, item,
1117 "Unsupported flow type");
1121 if (input_set != i40e_get_default_input_set(pctype)) {
1122 rte_flow_error_set(error, EINVAL,
1123 RTE_FLOW_ERROR_TYPE_ITEM, item,
1124 "Invalid input set.");
1127 filter->input.flow_type = flow_type;
1132 /* Parse to get the action info of a FDIR filter.
1133 * FDIR action supports QUEUE or (QUEUE + MARK).
1136 i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
1137 const struct rte_flow_action *actions,
1138 struct rte_flow_error *error,
1139 struct rte_eth_fdir_filter *filter)
1141 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1142 const struct rte_flow_action *act;
1143 const struct rte_flow_action_queue *act_q;
1144 const struct rte_flow_action_mark *mark_spec;
1147 /* Check if the first non-void action is QUEUE or DROP or PASSTHRU. */
1148 NEXT_ITEM_OF_ACTION(act, actions, index);
1149 switch (act->type) {
1150 case RTE_FLOW_ACTION_TYPE_QUEUE:
1151 act_q = (const struct rte_flow_action_queue *)act->conf;
1152 filter->action.rx_queue = act_q->index;
1153 if (filter->action.rx_queue >= pf->dev_data->nb_rx_queues) {
1154 rte_flow_error_set(error, EINVAL,
1155 RTE_FLOW_ERROR_TYPE_ACTION, act,
1156 "Invalid queue ID for FDIR.");
1159 filter->action.behavior = RTE_ETH_FDIR_ACCEPT;
1161 case RTE_FLOW_ACTION_TYPE_DROP:
1162 filter->action.behavior = RTE_ETH_FDIR_REJECT;
1164 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
1165 filter->action.behavior = RTE_ETH_FDIR_PASSTHRU;
1168 rte_flow_error_set(error, EINVAL,
1169 RTE_FLOW_ERROR_TYPE_ACTION, act,
1174 /* Check if the next non-void item is MARK or FLAG or END. */
1176 NEXT_ITEM_OF_ACTION(act, actions, index);
1177 switch (act->type) {
1178 case RTE_FLOW_ACTION_TYPE_MARK:
1179 mark_spec = (const struct rte_flow_action_mark *)act->conf;
1180 filter->action.report_status = RTE_ETH_FDIR_REPORT_ID;
1181 filter->soft_id = mark_spec->id;
1183 case RTE_FLOW_ACTION_TYPE_FLAG:
1184 filter->action.report_status = RTE_ETH_FDIR_NO_REPORT_STATUS;
1186 case RTE_FLOW_ACTION_TYPE_END:
1189 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1190 act, "Invalid action.");
1194 /* Check if the next non-void item is END */
1196 NEXT_ITEM_OF_ACTION(act, actions, index);
1197 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1198 rte_flow_error_set(error, EINVAL,
1199 RTE_FLOW_ERROR_TYPE_ACTION,
1200 act, "Invalid action.");
1208 i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
1209 const struct rte_flow_attr *attr,
1210 const struct rte_flow_item pattern[],
1211 const struct rte_flow_action actions[],
1212 struct rte_flow_error *error,
1213 union i40e_filter_t *filter)
1215 struct rte_eth_fdir_filter *fdir_filter =
1216 &filter->fdir_filter;
1219 ret = i40e_flow_parse_fdir_pattern(dev, pattern, error, fdir_filter);
1223 ret = i40e_flow_parse_fdir_action(dev, actions, error, fdir_filter);
1227 ret = i40e_flow_parse_attr(attr, error);
1231 cons_filter_type = RTE_ETH_FILTER_FDIR;
1233 if (dev->data->dev_conf.fdir_conf.mode !=
1234 RTE_FDIR_MODE_PERFECT) {
1235 rte_flow_error_set(error, ENOTSUP,
1236 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1238 "Check the mode in fdir_conf.");
1245 /* Parse to get the action info of a tunnel filter
1246 * Tunnel action only supports PF, VF and QUEUE.
1249 i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
1250 const struct rte_flow_action *actions,
1251 struct rte_flow_error *error,
1252 struct i40e_tunnel_filter_conf *filter)
1254 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
1255 const struct rte_flow_action *act;
1256 const struct rte_flow_action_queue *act_q;
1257 const struct rte_flow_action_vf *act_vf;
1260 /* Check if the first non-void action is PF or VF. */
1261 NEXT_ITEM_OF_ACTION(act, actions, index);
1262 if (act->type != RTE_FLOW_ACTION_TYPE_PF &&
1263 act->type != RTE_FLOW_ACTION_TYPE_VF) {
1264 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1265 act, "Not supported action.");
1269 if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
1270 act_vf = (const struct rte_flow_action_vf *)act->conf;
1271 filter->vf_id = act_vf->id;
1272 filter->is_to_vf = 1;
1273 if (filter->vf_id >= pf->vf_num) {
1274 rte_flow_error_set(error, EINVAL,
1275 RTE_FLOW_ERROR_TYPE_ACTION,
1276 act, "Invalid VF ID for tunnel filter");
1281 /* Check if the next non-void item is QUEUE */
1283 NEXT_ITEM_OF_ACTION(act, actions, index);
1284 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1285 act_q = (const struct rte_flow_action_queue *)act->conf;
1286 filter->queue_id = act_q->index;
1287 if ((!filter->is_to_vf) &&
1288 (filter->queue_id >= pf->dev_data->nb_rx_queues)) {
1289 rte_flow_error_set(error, EINVAL,
1290 RTE_FLOW_ERROR_TYPE_ACTION,
1291 act, "Invalid queue ID for tunnel filter");
1293 } else if (filter->is_to_vf &&
1294 (filter->queue_id >= pf->vf_nb_qps)) {
1295 rte_flow_error_set(error, EINVAL,
1296 RTE_FLOW_ERROR_TYPE_ACTION,
1297 act, "Invalid queue ID for tunnel filter");
1302 /* Check if the next non-void item is END */
1304 NEXT_ITEM_OF_ACTION(act, actions, index);
1305 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
1306 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
1307 act, "Not supported action.");
1314 static uint16_t i40e_supported_tunnel_filter_types[] = {
1315 ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID |
1316 ETH_TUNNEL_FILTER_IVLAN,
1317 ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
1318 ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID,
1319 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID |
1320 ETH_TUNNEL_FILTER_IMAC,
1321 ETH_TUNNEL_FILTER_IMAC,
1325 i40e_check_tunnel_filter_type(uint8_t filter_type)
1329 for (i = 0; i < RTE_DIM(i40e_supported_tunnel_filter_types); i++) {
1330 if (filter_type == i40e_supported_tunnel_filter_types[i])
1337 /* 1. Last in item should be NULL as range is not supported.
1338 * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
1339 * IMAC_TENID, OMAC_TENID_IMAC and IMAC.
1340 * 3. Mask of fields which need to be matched should be
1342 * 4. Mask of fields which needn't to be matched should be
1346 i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
1347 const struct rte_flow_item *pattern,
1348 struct rte_flow_error *error,
1349 struct i40e_tunnel_filter_conf *filter)
1351 const struct rte_flow_item *item = pattern;
1352 const struct rte_flow_item_eth *eth_spec;
1353 const struct rte_flow_item_eth *eth_mask;
1354 const struct rte_flow_item_vxlan *vxlan_spec;
1355 const struct rte_flow_item_vxlan *vxlan_mask;
1356 const struct rte_flow_item_vlan *vlan_spec;
1357 const struct rte_flow_item_vlan *vlan_mask;
1358 uint8_t filter_type = 0;
1359 bool is_vni_masked = 0;
1360 uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
1361 enum rte_flow_item_type item_type;
1362 bool vxlan_flag = 0;
1363 uint32_t tenant_id_be = 0;
1366 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1368 rte_flow_error_set(error, EINVAL,
1369 RTE_FLOW_ERROR_TYPE_ITEM,
1371 "Not support range");
1374 item_type = item->type;
1375 switch (item_type) {
1376 case RTE_FLOW_ITEM_TYPE_ETH:
1377 eth_spec = (const struct rte_flow_item_eth *)item->spec;
1378 eth_mask = (const struct rte_flow_item_eth *)item->mask;
1380 /* Check if ETH item is used for place holder.
1381 * If yes, both spec and mask should be NULL.
1382 * If no, both spec and mask shouldn't be NULL.
1384 if ((!eth_spec && eth_mask) ||
1385 (eth_spec && !eth_mask)) {
1386 rte_flow_error_set(error, EINVAL,
1387 RTE_FLOW_ERROR_TYPE_ITEM,
1389 "Invalid ether spec/mask");
1393 if (eth_spec && eth_mask) {
1394 /* DST address of inner MAC shouldn't be masked.
1395 * SRC address of Inner MAC should be masked.
1397 if (!is_broadcast_ether_addr(ð_mask->dst) ||
1398 !is_zero_ether_addr(ð_mask->src) ||
1400 rte_flow_error_set(error, EINVAL,
1401 RTE_FLOW_ERROR_TYPE_ITEM,
1403 "Invalid ether spec/mask");
1408 rte_memcpy(&filter->outer_mac,
1411 filter_type |= ETH_TUNNEL_FILTER_OMAC;
1413 rte_memcpy(&filter->inner_mac,
1416 filter_type |= ETH_TUNNEL_FILTER_IMAC;
1420 case RTE_FLOW_ITEM_TYPE_VLAN:
1422 (const struct rte_flow_item_vlan *)item->spec;
1424 (const struct rte_flow_item_vlan *)item->mask;
1425 if (!(vlan_spec && vlan_mask)) {
1426 rte_flow_error_set(error, EINVAL,
1427 RTE_FLOW_ERROR_TYPE_ITEM,
1429 "Invalid vlan item");
1433 if (vlan_spec && vlan_mask) {
1434 if (vlan_mask->tci ==
1435 rte_cpu_to_be_16(I40E_TCI_MASK))
1436 filter->inner_vlan =
1437 rte_be_to_cpu_16(vlan_spec->tci) &
1439 filter_type |= ETH_TUNNEL_FILTER_IVLAN;
1442 case RTE_FLOW_ITEM_TYPE_IPV4:
1443 filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
1444 /* IPv4 is used to describe protocol,
1445 * spec and mask should be NULL.
1447 if (item->spec || item->mask) {
1448 rte_flow_error_set(error, EINVAL,
1449 RTE_FLOW_ERROR_TYPE_ITEM,
1451 "Invalid IPv4 item");
1455 case RTE_FLOW_ITEM_TYPE_IPV6:
1456 filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
1457 /* IPv6 is used to describe protocol,
1458 * spec and mask should be NULL.
1460 if (item->spec || item->mask) {
1461 rte_flow_error_set(error, EINVAL,
1462 RTE_FLOW_ERROR_TYPE_ITEM,
1464 "Invalid IPv6 item");
1468 case RTE_FLOW_ITEM_TYPE_UDP:
1469 /* UDP is used to describe protocol,
1470 * spec and mask should be NULL.
1472 if (item->spec || item->mask) {
1473 rte_flow_error_set(error, EINVAL,
1474 RTE_FLOW_ERROR_TYPE_ITEM,
1476 "Invalid UDP item");
1480 case RTE_FLOW_ITEM_TYPE_VXLAN:
1482 (const struct rte_flow_item_vxlan *)item->spec;
1484 (const struct rte_flow_item_vxlan *)item->mask;
1485 /* Check if VXLAN item is used to describe protocol.
1486 * If yes, both spec and mask should be NULL.
1487 * If no, both spec and mask shouldn't be NULL.
1489 if ((!vxlan_spec && vxlan_mask) ||
1490 (vxlan_spec && !vxlan_mask)) {
1491 rte_flow_error_set(error, EINVAL,
1492 RTE_FLOW_ERROR_TYPE_ITEM,
1494 "Invalid VXLAN item");
1498 /* Check if VNI is masked. */
1499 if (vxlan_spec && vxlan_mask) {
1501 !!memcmp(vxlan_mask->vni, vni_mask,
1503 if (is_vni_masked) {
1504 rte_flow_error_set(error, EINVAL,
1505 RTE_FLOW_ERROR_TYPE_ITEM,
1507 "Invalid VNI mask");
1511 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
1512 vxlan_spec->vni, 3);
1514 rte_be_to_cpu_32(tenant_id_be);
1515 filter_type |= ETH_TUNNEL_FILTER_TENID;
1525 ret = i40e_check_tunnel_filter_type(filter_type);
1527 rte_flow_error_set(error, EINVAL,
1528 RTE_FLOW_ERROR_TYPE_ITEM,
1530 "Invalid filter type");
1533 filter->filter_type = filter_type;
1535 filter->tunnel_type = I40E_TUNNEL_TYPE_VXLAN;
1541 i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
1542 const struct rte_flow_attr *attr,
1543 const struct rte_flow_item pattern[],
1544 const struct rte_flow_action actions[],
1545 struct rte_flow_error *error,
1546 union i40e_filter_t *filter)
1548 struct i40e_tunnel_filter_conf *tunnel_filter =
1549 &filter->consistent_tunnel_filter;
1552 ret = i40e_flow_parse_vxlan_pattern(dev, pattern,
1553 error, tunnel_filter);
1557 ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
1561 ret = i40e_flow_parse_attr(attr, error);
1565 cons_filter_type = RTE_ETH_FILTER_TUNNEL;
1570 /* 1. Last in item should be NULL as range is not supported.
1571 * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
1572 * IMAC_TENID, OMAC_TENID_IMAC and IMAC.
1573 * 3. Mask of fields which need to be matched should be
1575 * 4. Mask of fields which needn't to be matched should be
1579 i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
1580 const struct rte_flow_item *pattern,
1581 struct rte_flow_error *error,
1582 struct i40e_tunnel_filter_conf *filter)
1584 const struct rte_flow_item *item = pattern;
1585 const struct rte_flow_item_eth *eth_spec;
1586 const struct rte_flow_item_eth *eth_mask;
1587 const struct rte_flow_item_nvgre *nvgre_spec;
1588 const struct rte_flow_item_nvgre *nvgre_mask;
1589 const struct rte_flow_item_vlan *vlan_spec;
1590 const struct rte_flow_item_vlan *vlan_mask;
1591 enum rte_flow_item_type item_type;
1592 uint8_t filter_type = 0;
1593 bool is_tni_masked = 0;
1594 uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
1595 bool nvgre_flag = 0;
1596 uint32_t tenant_id_be = 0;
1599 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1601 rte_flow_error_set(error, EINVAL,
1602 RTE_FLOW_ERROR_TYPE_ITEM,
1604 "Not support range");
1607 item_type = item->type;
1608 switch (item_type) {
1609 case RTE_FLOW_ITEM_TYPE_ETH:
1610 eth_spec = (const struct rte_flow_item_eth *)item->spec;
1611 eth_mask = (const struct rte_flow_item_eth *)item->mask;
1613 /* Check if ETH item is used for place holder.
1614 * If yes, both spec and mask should be NULL.
1615 * If no, both spec and mask shouldn't be NULL.
1617 if ((!eth_spec && eth_mask) ||
1618 (eth_spec && !eth_mask)) {
1619 rte_flow_error_set(error, EINVAL,
1620 RTE_FLOW_ERROR_TYPE_ITEM,
1622 "Invalid ether spec/mask");
1626 if (eth_spec && eth_mask) {
1627 /* DST address of inner MAC shouldn't be masked.
1628 * SRC address of Inner MAC should be masked.
1630 if (!is_broadcast_ether_addr(ð_mask->dst) ||
1631 !is_zero_ether_addr(ð_mask->src) ||
1633 rte_flow_error_set(error, EINVAL,
1634 RTE_FLOW_ERROR_TYPE_ITEM,
1636 "Invalid ether spec/mask");
1641 rte_memcpy(&filter->outer_mac,
1644 filter_type |= ETH_TUNNEL_FILTER_OMAC;
1646 rte_memcpy(&filter->inner_mac,
1649 filter_type |= ETH_TUNNEL_FILTER_IMAC;
1654 case RTE_FLOW_ITEM_TYPE_VLAN:
1656 (const struct rte_flow_item_vlan *)item->spec;
1658 (const struct rte_flow_item_vlan *)item->mask;
1659 if (!(vlan_spec && vlan_mask)) {
1660 rte_flow_error_set(error, EINVAL,
1661 RTE_FLOW_ERROR_TYPE_ITEM,
1663 "Invalid vlan item");
1667 if (vlan_spec && vlan_mask) {
1668 if (vlan_mask->tci ==
1669 rte_cpu_to_be_16(I40E_TCI_MASK))
1670 filter->inner_vlan =
1671 rte_be_to_cpu_16(vlan_spec->tci) &
1673 filter_type |= ETH_TUNNEL_FILTER_IVLAN;
1676 case RTE_FLOW_ITEM_TYPE_IPV4:
1677 filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
1678 /* IPv4 is used to describe protocol,
1679 * spec and mask should be NULL.
1681 if (item->spec || item->mask) {
1682 rte_flow_error_set(error, EINVAL,
1683 RTE_FLOW_ERROR_TYPE_ITEM,
1685 "Invalid IPv4 item");
1689 case RTE_FLOW_ITEM_TYPE_IPV6:
1690 filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
1691 /* IPv6 is used to describe protocol,
1692 * spec and mask should be NULL.
1694 if (item->spec || item->mask) {
1695 rte_flow_error_set(error, EINVAL,
1696 RTE_FLOW_ERROR_TYPE_ITEM,
1698 "Invalid IPv6 item");
1702 case RTE_FLOW_ITEM_TYPE_NVGRE:
1704 (const struct rte_flow_item_nvgre *)item->spec;
1706 (const struct rte_flow_item_nvgre *)item->mask;
1707 /* Check if NVGRE item is used to describe protocol.
1708 * If yes, both spec and mask should be NULL.
1709 * If no, both spec and mask shouldn't be NULL.
1711 if ((!nvgre_spec && nvgre_mask) ||
1712 (nvgre_spec && !nvgre_mask)) {
1713 rte_flow_error_set(error, EINVAL,
1714 RTE_FLOW_ERROR_TYPE_ITEM,
1716 "Invalid NVGRE item");
1720 if (nvgre_spec && nvgre_mask) {
1722 !!memcmp(nvgre_mask->tni, tni_mask,
1724 if (is_tni_masked) {
1725 rte_flow_error_set(error, EINVAL,
1726 RTE_FLOW_ERROR_TYPE_ITEM,
1728 "Invalid TNI mask");
1731 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
1732 nvgre_spec->tni, 3);
1734 rte_be_to_cpu_32(tenant_id_be);
1735 filter_type |= ETH_TUNNEL_FILTER_TENID;
1745 ret = i40e_check_tunnel_filter_type(filter_type);
1747 rte_flow_error_set(error, EINVAL,
1748 RTE_FLOW_ERROR_TYPE_ITEM,
1750 "Invalid filter type");
1753 filter->filter_type = filter_type;
1755 filter->tunnel_type = I40E_TUNNEL_TYPE_NVGRE;
1761 i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
1762 const struct rte_flow_attr *attr,
1763 const struct rte_flow_item pattern[],
1764 const struct rte_flow_action actions[],
1765 struct rte_flow_error *error,
1766 union i40e_filter_t *filter)
1768 struct i40e_tunnel_filter_conf *tunnel_filter =
1769 &filter->consistent_tunnel_filter;
1772 ret = i40e_flow_parse_nvgre_pattern(dev, pattern,
1773 error, tunnel_filter);
1777 ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
1781 ret = i40e_flow_parse_attr(attr, error);
1785 cons_filter_type = RTE_ETH_FILTER_TUNNEL;
1790 /* 1. Last in item should be NULL as range is not supported.
1791 * 2. Supported filter types: MPLS label.
1792 * 3. Mask of fields which need to be matched should be
1794 * 4. Mask of fields which needn't to be matched should be
1798 i40e_flow_parse_mpls_pattern(__rte_unused struct rte_eth_dev *dev,
1799 const struct rte_flow_item *pattern,
1800 struct rte_flow_error *error,
1801 struct i40e_tunnel_filter_conf *filter)
1803 const struct rte_flow_item *item = pattern;
1804 const struct rte_flow_item_mpls *mpls_spec;
1805 const struct rte_flow_item_mpls *mpls_mask;
1806 enum rte_flow_item_type item_type;
1807 bool is_mplsoudp = 0; /* 1 - MPLSoUDP, 0 - MPLSoGRE */
1808 const uint8_t label_mask[3] = {0xFF, 0xFF, 0xF0};
1809 uint32_t label_be = 0;
1811 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1813 rte_flow_error_set(error, EINVAL,
1814 RTE_FLOW_ERROR_TYPE_ITEM,
1816 "Not support range");
1819 item_type = item->type;
1820 switch (item_type) {
1821 case RTE_FLOW_ITEM_TYPE_ETH:
1822 if (item->spec || item->mask) {
1823 rte_flow_error_set(error, EINVAL,
1824 RTE_FLOW_ERROR_TYPE_ITEM,
1826 "Invalid ETH item");
1830 case RTE_FLOW_ITEM_TYPE_IPV4:
1831 filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
1832 /* IPv4 is used to describe protocol,
1833 * spec and mask should be NULL.
1835 if (item->spec || item->mask) {
1836 rte_flow_error_set(error, EINVAL,
1837 RTE_FLOW_ERROR_TYPE_ITEM,
1839 "Invalid IPv4 item");
1843 case RTE_FLOW_ITEM_TYPE_IPV6:
1844 filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
1845 /* IPv6 is used to describe protocol,
1846 * spec and mask should be NULL.
1848 if (item->spec || item->mask) {
1849 rte_flow_error_set(error, EINVAL,
1850 RTE_FLOW_ERROR_TYPE_ITEM,
1852 "Invalid IPv6 item");
1856 case RTE_FLOW_ITEM_TYPE_UDP:
1857 /* UDP is used to describe protocol,
1858 * spec and mask should be NULL.
1860 if (item->spec || item->mask) {
1861 rte_flow_error_set(error, EINVAL,
1862 RTE_FLOW_ERROR_TYPE_ITEM,
1864 "Invalid UDP item");
1869 case RTE_FLOW_ITEM_TYPE_GRE:
1870 /* GRE is used to describe protocol,
1871 * spec and mask should be NULL.
1873 if (item->spec || item->mask) {
1874 rte_flow_error_set(error, EINVAL,
1875 RTE_FLOW_ERROR_TYPE_ITEM,
1877 "Invalid GRE item");
1881 case RTE_FLOW_ITEM_TYPE_MPLS:
1883 (const struct rte_flow_item_mpls *)item->spec;
1885 (const struct rte_flow_item_mpls *)item->mask;
1887 if (!mpls_spec || !mpls_mask) {
1888 rte_flow_error_set(error, EINVAL,
1889 RTE_FLOW_ERROR_TYPE_ITEM,
1891 "Invalid MPLS item");
1895 if (memcmp(mpls_mask->label_tc_s, label_mask, 3)) {
1896 rte_flow_error_set(error, EINVAL,
1897 RTE_FLOW_ERROR_TYPE_ITEM,
1899 "Invalid MPLS label mask");
1902 rte_memcpy(((uint8_t *)&label_be + 1),
1903 mpls_spec->label_tc_s, 3);
1904 filter->tenant_id = rte_be_to_cpu_32(label_be) >> 4;
1912 filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoUDP;
1914 filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoGRE;
1920 i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
1921 const struct rte_flow_attr *attr,
1922 const struct rte_flow_item pattern[],
1923 const struct rte_flow_action actions[],
1924 struct rte_flow_error *error,
1925 union i40e_filter_t *filter)
1927 struct i40e_tunnel_filter_conf *tunnel_filter =
1928 &filter->consistent_tunnel_filter;
1931 ret = i40e_flow_parse_mpls_pattern(dev, pattern,
1932 error, tunnel_filter);
1936 ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
1940 ret = i40e_flow_parse_attr(attr, error);
1944 cons_filter_type = RTE_ETH_FILTER_TUNNEL;
1949 /* 1. Last in item should be NULL as range is not supported.
1950 * 2. Supported filter types: QINQ.
1951 * 3. Mask of fields which need to be matched should be
1953 * 4. Mask of fields which needn't to be matched should be
1957 i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev,
1958 const struct rte_flow_item *pattern,
1959 struct rte_flow_error *error,
1960 struct i40e_tunnel_filter_conf *filter)
1962 const struct rte_flow_item *item = pattern;
1963 const struct rte_flow_item_vlan *vlan_spec = NULL;
1964 const struct rte_flow_item_vlan *vlan_mask = NULL;
1965 const struct rte_flow_item_vlan *i_vlan_spec = NULL;
1966 const struct rte_flow_item_vlan *i_vlan_mask = NULL;
1967 const struct rte_flow_item_vlan *o_vlan_spec = NULL;
1968 const struct rte_flow_item_vlan *o_vlan_mask = NULL;
1970 enum rte_flow_item_type item_type;
1973 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1975 rte_flow_error_set(error, EINVAL,
1976 RTE_FLOW_ERROR_TYPE_ITEM,
1978 "Not support range");
1981 item_type = item->type;
1982 switch (item_type) {
1983 case RTE_FLOW_ITEM_TYPE_ETH:
1984 if (item->spec || item->mask) {
1985 rte_flow_error_set(error, EINVAL,
1986 RTE_FLOW_ERROR_TYPE_ITEM,
1988 "Invalid ETH item");
1992 case RTE_FLOW_ITEM_TYPE_VLAN:
1994 (const struct rte_flow_item_vlan *)item->spec;
1996 (const struct rte_flow_item_vlan *)item->mask;
1998 if (!(vlan_spec && vlan_mask)) {
1999 rte_flow_error_set(error, EINVAL,
2000 RTE_FLOW_ERROR_TYPE_ITEM,
2002 "Invalid vlan item");
2007 o_vlan_spec = vlan_spec;
2008 o_vlan_mask = vlan_mask;
2011 i_vlan_spec = vlan_spec;
2012 i_vlan_mask = vlan_mask;
2022 /* Get filter specification */
2023 if ((o_vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK)) &&
2024 (i_vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
2025 filter->outer_vlan = rte_be_to_cpu_16(o_vlan_spec->tci)
2027 filter->inner_vlan = rte_be_to_cpu_16(i_vlan_spec->tci)
2030 rte_flow_error_set(error, EINVAL,
2031 RTE_FLOW_ERROR_TYPE_ITEM,
2033 "Invalid filter type");
2037 filter->tunnel_type = I40E_TUNNEL_TYPE_QINQ;
2042 i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
2043 const struct rte_flow_attr *attr,
2044 const struct rte_flow_item pattern[],
2045 const struct rte_flow_action actions[],
2046 struct rte_flow_error *error,
2047 union i40e_filter_t *filter)
2049 struct i40e_tunnel_filter_conf *tunnel_filter =
2050 &filter->consistent_tunnel_filter;
2053 ret = i40e_flow_parse_qinq_pattern(dev, pattern,
2054 error, tunnel_filter);
2058 ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
2062 ret = i40e_flow_parse_attr(attr, error);
2066 cons_filter_type = RTE_ETH_FILTER_TUNNEL;
2072 i40e_flow_validate(struct rte_eth_dev *dev,
2073 const struct rte_flow_attr *attr,
2074 const struct rte_flow_item pattern[],
2075 const struct rte_flow_action actions[],
2076 struct rte_flow_error *error)
2078 struct rte_flow_item *items; /* internal pattern w/o VOID items */
2079 parse_filter_t parse_filter;
2080 uint32_t item_num = 0; /* non-void item number of pattern*/
2085 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2086 NULL, "NULL pattern.");
2091 rte_flow_error_set(error, EINVAL,
2092 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
2093 NULL, "NULL action.");
2098 rte_flow_error_set(error, EINVAL,
2099 RTE_FLOW_ERROR_TYPE_ATTR,
2100 NULL, "NULL attribute.");
2104 memset(&cons_filter, 0, sizeof(cons_filter));
2106 /* Get the non-void item number of pattern */
2107 while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
2108 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
2114 items = rte_zmalloc("i40e_pattern",
2115 item_num * sizeof(struct rte_flow_item), 0);
2117 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
2118 NULL, "No memory for PMD internal items.");
2122 i40e_pattern_skip_void_item(items, pattern);
2124 /* Find if there's matched parse filter function */
2125 parse_filter = i40e_find_parse_filter_func(items);
2126 if (!parse_filter) {
2127 rte_flow_error_set(error, EINVAL,
2128 RTE_FLOW_ERROR_TYPE_ITEM,
2129 pattern, "Unsupported pattern");
2133 ret = parse_filter(dev, attr, items, actions, error, &cons_filter);
2140 static struct rte_flow *
2141 i40e_flow_create(struct rte_eth_dev *dev,
2142 const struct rte_flow_attr *attr,
2143 const struct rte_flow_item pattern[],
2144 const struct rte_flow_action actions[],
2145 struct rte_flow_error *error)
2147 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2148 struct rte_flow *flow;
2151 flow = rte_zmalloc("i40e_flow", sizeof(struct rte_flow), 0);
2153 rte_flow_error_set(error, ENOMEM,
2154 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2155 "Failed to allocate memory");
2159 ret = i40e_flow_validate(dev, attr, pattern, actions, error);
2163 switch (cons_filter_type) {
2164 case RTE_ETH_FILTER_ETHERTYPE:
2165 ret = i40e_ethertype_filter_set(pf,
2166 &cons_filter.ethertype_filter, 1);
2169 flow->rule = TAILQ_LAST(&pf->ethertype.ethertype_list,
2170 i40e_ethertype_filter_list);
2172 case RTE_ETH_FILTER_FDIR:
2173 ret = i40e_add_del_fdir_filter(dev,
2174 &cons_filter.fdir_filter, 1);
2177 flow->rule = TAILQ_LAST(&pf->fdir.fdir_list,
2178 i40e_fdir_filter_list);
2180 case RTE_ETH_FILTER_TUNNEL:
2181 ret = i40e_dev_consistent_tunnel_filter_set(pf,
2182 &cons_filter.consistent_tunnel_filter, 1);
2185 flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
2186 i40e_tunnel_filter_list);
2192 flow->filter_type = cons_filter_type;
2193 TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
2197 rte_flow_error_set(error, -ret,
2198 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2199 "Failed to create flow.");
2205 i40e_flow_destroy(struct rte_eth_dev *dev,
2206 struct rte_flow *flow,
2207 struct rte_flow_error *error)
2209 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2210 enum rte_filter_type filter_type = flow->filter_type;
2213 switch (filter_type) {
2214 case RTE_ETH_FILTER_ETHERTYPE:
2215 ret = i40e_flow_destroy_ethertype_filter(pf,
2216 (struct i40e_ethertype_filter *)flow->rule);
2218 case RTE_ETH_FILTER_TUNNEL:
2219 ret = i40e_flow_destroy_tunnel_filter(pf,
2220 (struct i40e_tunnel_filter *)flow->rule);
2222 case RTE_ETH_FILTER_FDIR:
2223 ret = i40e_add_del_fdir_filter(dev,
2224 &((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
2227 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
2234 TAILQ_REMOVE(&pf->flow_list, flow, node);
2237 rte_flow_error_set(error, -ret,
2238 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2239 "Failed to destroy flow.");
2245 i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
2246 struct i40e_ethertype_filter *filter)
2248 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2249 struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
2250 struct i40e_ethertype_filter *node;
2251 struct i40e_control_filter_stats stats;
2255 if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
2256 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
2257 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
2258 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
2259 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
2261 memset(&stats, 0, sizeof(stats));
2262 ret = i40e_aq_add_rem_control_packet_filter(hw,
2263 filter->input.mac_addr.addr_bytes,
2264 filter->input.ether_type,
2265 flags, pf->main_vsi->seid,
2266 filter->queue, 0, &stats, NULL);
2270 node = i40e_sw_ethertype_filter_lookup(ethertype_rule, &filter->input);
2274 ret = i40e_sw_ethertype_filter_del(pf, &node->input);
2280 i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
2281 struct i40e_tunnel_filter *filter)
2283 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2284 struct i40e_vsi *vsi;
2285 struct i40e_pf_vf *vf;
2286 struct i40e_aqc_add_rm_cloud_filt_elem_ext cld_filter;
2287 struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
2288 struct i40e_tunnel_filter *node;
2289 bool big_buffer = 0;
2292 memset(&cld_filter, 0, sizeof(cld_filter));
2293 ether_addr_copy((struct ether_addr *)&filter->input.outer_mac,
2294 (struct ether_addr *)&cld_filter.element.outer_mac);
2295 ether_addr_copy((struct ether_addr *)&filter->input.inner_mac,
2296 (struct ether_addr *)&cld_filter.element.inner_mac);
2297 cld_filter.element.inner_vlan = filter->input.inner_vlan;
2298 cld_filter.element.flags = filter->input.flags;
2299 cld_filter.element.tenant_id = filter->input.tenant_id;
2300 cld_filter.element.queue_number = filter->queue;
2301 rte_memcpy(cld_filter.general_fields,
2302 filter->input.general_fields,
2303 sizeof(cld_filter.general_fields));
2305 if (!filter->is_to_vf)
2308 vf = &pf->vfs[filter->vf_id];
2312 if (((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ==
2313 I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoUDP) ||
2314 ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ==
2315 I40E_AQC_ADD_CLOUD_FILTER_TEID_MPLSoGRE) ||
2316 ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ) ==
2317 I40E_AQC_ADD_CLOUD_FILTER_CUSTOM_QINQ))
2321 ret = i40e_aq_remove_cloud_filters_big_buffer(hw, vsi->seid,
2324 ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
2325 &cld_filter.element, 1);
2329 node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &filter->input);
2333 ret = i40e_sw_tunnel_filter_del(pf, &node->input);
2339 i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
2341 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2344 ret = i40e_flow_flush_fdir_filter(pf);
2346 rte_flow_error_set(error, -ret,
2347 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2348 "Failed to flush FDIR flows.");
2352 ret = i40e_flow_flush_ethertype_filter(pf);
2354 rte_flow_error_set(error, -ret,
2355 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2356 "Failed to ethertype flush flows.");
2360 ret = i40e_flow_flush_tunnel_filter(pf);
2362 rte_flow_error_set(error, -ret,
2363 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2364 "Failed to flush tunnel flows.");
2372 i40e_flow_flush_fdir_filter(struct i40e_pf *pf)
2374 struct rte_eth_dev *dev = pf->adapter->eth_dev;
2375 struct i40e_fdir_info *fdir_info = &pf->fdir;
2376 struct i40e_fdir_filter *fdir_filter;
2377 struct rte_flow *flow;
2381 ret = i40e_fdir_flush(dev);
2383 /* Delete FDIR filters in FDIR list. */
2384 while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
2385 ret = i40e_sw_fdir_filter_del(pf,
2386 &fdir_filter->fdir.input);
2391 /* Delete FDIR flows in flow list. */
2392 TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
2393 if (flow->filter_type == RTE_ETH_FILTER_FDIR) {
2394 TAILQ_REMOVE(&pf->flow_list, flow, node);
2403 /* Flush all ethertype filters */
2405 i40e_flow_flush_ethertype_filter(struct i40e_pf *pf)
2407 struct i40e_ethertype_filter_list
2408 *ethertype_list = &pf->ethertype.ethertype_list;
2409 struct i40e_ethertype_filter *filter;
2410 struct rte_flow *flow;
2414 while ((filter = TAILQ_FIRST(ethertype_list))) {
2415 ret = i40e_flow_destroy_ethertype_filter(pf, filter);
2420 /* Delete ethertype flows in flow list. */
2421 TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
2422 if (flow->filter_type == RTE_ETH_FILTER_ETHERTYPE) {
2423 TAILQ_REMOVE(&pf->flow_list, flow, node);
2431 /* Flush all tunnel filters */
2433 i40e_flow_flush_tunnel_filter(struct i40e_pf *pf)
2435 struct i40e_tunnel_filter_list
2436 *tunnel_list = &pf->tunnel.tunnel_list;
2437 struct i40e_tunnel_filter *filter;
2438 struct rte_flow *flow;
2442 while ((filter = TAILQ_FIRST(tunnel_list))) {
2443 ret = i40e_flow_destroy_tunnel_filter(pf, filter);
2448 /* Delete tunnel flows in flow list. */
2449 TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
2450 if (flow->filter_type == RTE_ETH_FILTER_TUNNEL) {
2451 TAILQ_REMOVE(&pf->flow_list, flow, node);