1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2017 Intel Corporation
13 #include <rte_ether.h>
14 #include <rte_ethdev.h>
16 #include <rte_malloc.h>
17 #include <rte_eth_ctrl.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
21 #include "i40e_logs.h"
22 #include "base/i40e_type.h"
23 #include "base/i40e_prototype.h"
24 #include "i40e_ethdev.h"
26 #define I40E_IPV6_TC_MASK (0xFF << I40E_FDIR_IPv6_TC_OFFSET)
27 #define I40E_IPV6_FRAG_HEADER 44
28 #define I40E_TENANT_ARRAY_NUM 3
29 #define I40E_TCI_MASK 0xFFFF
31 static int i40e_flow_validate(struct rte_eth_dev *dev,
32 const struct rte_flow_attr *attr,
33 const struct rte_flow_item pattern[],
34 const struct rte_flow_action actions[],
35 struct rte_flow_error *error);
36 static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
37 const struct rte_flow_attr *attr,
38 const struct rte_flow_item pattern[],
39 const struct rte_flow_action actions[],
40 struct rte_flow_error *error);
41 static int i40e_flow_destroy(struct rte_eth_dev *dev,
42 struct rte_flow *flow,
43 struct rte_flow_error *error);
44 static int i40e_flow_flush(struct rte_eth_dev *dev,
45 struct rte_flow_error *error);
47 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
48 const struct rte_flow_item *pattern,
49 struct rte_flow_error *error,
50 struct rte_eth_ethertype_filter *filter);
51 static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
52 const struct rte_flow_action *actions,
53 struct rte_flow_error *error,
54 struct rte_eth_ethertype_filter *filter);
55 static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
56 const struct rte_flow_item *pattern,
57 struct rte_flow_error *error,
58 struct i40e_fdir_filter_conf *filter);
59 static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
60 const struct rte_flow_action *actions,
61 struct rte_flow_error *error,
62 struct i40e_fdir_filter_conf *filter);
63 static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
64 const struct rte_flow_action *actions,
65 struct rte_flow_error *error,
66 struct i40e_tunnel_filter_conf *filter);
67 static int i40e_flow_parse_attr(const struct rte_flow_attr *attr,
68 struct rte_flow_error *error);
69 static int i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
70 const struct rte_flow_attr *attr,
71 const struct rte_flow_item pattern[],
72 const struct rte_flow_action actions[],
73 struct rte_flow_error *error,
74 union i40e_filter_t *filter);
75 static int i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
76 const struct rte_flow_attr *attr,
77 const struct rte_flow_item pattern[],
78 const struct rte_flow_action actions[],
79 struct rte_flow_error *error,
80 union i40e_filter_t *filter);
81 static int i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
82 const struct rte_flow_attr *attr,
83 const struct rte_flow_item pattern[],
84 const struct rte_flow_action actions[],
85 struct rte_flow_error *error,
86 union i40e_filter_t *filter);
87 static int i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
88 const struct rte_flow_attr *attr,
89 const struct rte_flow_item pattern[],
90 const struct rte_flow_action actions[],
91 struct rte_flow_error *error,
92 union i40e_filter_t *filter);
93 static int i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
94 const struct rte_flow_attr *attr,
95 const struct rte_flow_item pattern[],
96 const struct rte_flow_action actions[],
97 struct rte_flow_error *error,
98 union i40e_filter_t *filter);
99 static int i40e_flow_parse_gtp_filter(struct rte_eth_dev *dev,
100 const struct rte_flow_attr *attr,
101 const struct rte_flow_item pattern[],
102 const struct rte_flow_action actions[],
103 struct rte_flow_error *error,
104 union i40e_filter_t *filter);
105 static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
106 struct i40e_ethertype_filter *filter);
107 static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
108 struct i40e_tunnel_filter *filter);
109 static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);
110 static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf);
111 static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf);
113 i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
114 const struct rte_flow_attr *attr,
115 const struct rte_flow_item pattern[],
116 const struct rte_flow_action actions[],
117 struct rte_flow_error *error,
118 union i40e_filter_t *filter);
120 i40e_flow_parse_qinq_pattern(struct rte_eth_dev *dev,
121 const struct rte_flow_item *pattern,
122 struct rte_flow_error *error,
123 struct i40e_tunnel_filter_conf *filter);
125 const struct rte_flow_ops i40e_flow_ops = {
126 .validate = i40e_flow_validate,
127 .create = i40e_flow_create,
128 .destroy = i40e_flow_destroy,
129 .flush = i40e_flow_flush,
132 union i40e_filter_t cons_filter;
133 enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
135 /* Pattern matched ethertype filter */
136 static enum rte_flow_item_type pattern_ethertype[] = {
137 RTE_FLOW_ITEM_TYPE_ETH,
138 RTE_FLOW_ITEM_TYPE_END,
141 /* Pattern matched flow director filter */
142 static enum rte_flow_item_type pattern_fdir_ipv4[] = {
143 RTE_FLOW_ITEM_TYPE_ETH,
144 RTE_FLOW_ITEM_TYPE_IPV4,
145 RTE_FLOW_ITEM_TYPE_END,
148 static enum rte_flow_item_type pattern_fdir_ipv4_udp[] = {
149 RTE_FLOW_ITEM_TYPE_ETH,
150 RTE_FLOW_ITEM_TYPE_IPV4,
151 RTE_FLOW_ITEM_TYPE_UDP,
152 RTE_FLOW_ITEM_TYPE_END,
155 static enum rte_flow_item_type pattern_fdir_ipv4_tcp[] = {
156 RTE_FLOW_ITEM_TYPE_ETH,
157 RTE_FLOW_ITEM_TYPE_IPV4,
158 RTE_FLOW_ITEM_TYPE_TCP,
159 RTE_FLOW_ITEM_TYPE_END,
162 static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
163 RTE_FLOW_ITEM_TYPE_ETH,
164 RTE_FLOW_ITEM_TYPE_IPV4,
165 RTE_FLOW_ITEM_TYPE_SCTP,
166 RTE_FLOW_ITEM_TYPE_END,
169 static enum rte_flow_item_type pattern_fdir_ipv4_gtpc[] = {
170 RTE_FLOW_ITEM_TYPE_ETH,
171 RTE_FLOW_ITEM_TYPE_IPV4,
172 RTE_FLOW_ITEM_TYPE_UDP,
173 RTE_FLOW_ITEM_TYPE_GTPC,
174 RTE_FLOW_ITEM_TYPE_END,
177 static enum rte_flow_item_type pattern_fdir_ipv4_gtpu[] = {
178 RTE_FLOW_ITEM_TYPE_ETH,
179 RTE_FLOW_ITEM_TYPE_IPV4,
180 RTE_FLOW_ITEM_TYPE_UDP,
181 RTE_FLOW_ITEM_TYPE_GTPU,
182 RTE_FLOW_ITEM_TYPE_END,
185 static enum rte_flow_item_type pattern_fdir_ipv4_gtpu_ipv4[] = {
186 RTE_FLOW_ITEM_TYPE_ETH,
187 RTE_FLOW_ITEM_TYPE_IPV4,
188 RTE_FLOW_ITEM_TYPE_UDP,
189 RTE_FLOW_ITEM_TYPE_GTPU,
190 RTE_FLOW_ITEM_TYPE_IPV4,
191 RTE_FLOW_ITEM_TYPE_END,
194 static enum rte_flow_item_type pattern_fdir_ipv4_gtpu_ipv6[] = {
195 RTE_FLOW_ITEM_TYPE_ETH,
196 RTE_FLOW_ITEM_TYPE_IPV4,
197 RTE_FLOW_ITEM_TYPE_UDP,
198 RTE_FLOW_ITEM_TYPE_GTPU,
199 RTE_FLOW_ITEM_TYPE_IPV6,
200 RTE_FLOW_ITEM_TYPE_END,
203 static enum rte_flow_item_type pattern_fdir_ipv6[] = {
204 RTE_FLOW_ITEM_TYPE_ETH,
205 RTE_FLOW_ITEM_TYPE_IPV6,
206 RTE_FLOW_ITEM_TYPE_END,
209 static enum rte_flow_item_type pattern_fdir_ipv6_udp[] = {
210 RTE_FLOW_ITEM_TYPE_ETH,
211 RTE_FLOW_ITEM_TYPE_IPV6,
212 RTE_FLOW_ITEM_TYPE_UDP,
213 RTE_FLOW_ITEM_TYPE_END,
216 static enum rte_flow_item_type pattern_fdir_ipv6_tcp[] = {
217 RTE_FLOW_ITEM_TYPE_ETH,
218 RTE_FLOW_ITEM_TYPE_IPV6,
219 RTE_FLOW_ITEM_TYPE_TCP,
220 RTE_FLOW_ITEM_TYPE_END,
223 static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
224 RTE_FLOW_ITEM_TYPE_ETH,
225 RTE_FLOW_ITEM_TYPE_IPV6,
226 RTE_FLOW_ITEM_TYPE_SCTP,
227 RTE_FLOW_ITEM_TYPE_END,
230 static enum rte_flow_item_type pattern_fdir_ipv6_gtpc[] = {
231 RTE_FLOW_ITEM_TYPE_ETH,
232 RTE_FLOW_ITEM_TYPE_IPV6,
233 RTE_FLOW_ITEM_TYPE_UDP,
234 RTE_FLOW_ITEM_TYPE_GTPC,
235 RTE_FLOW_ITEM_TYPE_END,
238 static enum rte_flow_item_type pattern_fdir_ipv6_gtpu[] = {
239 RTE_FLOW_ITEM_TYPE_ETH,
240 RTE_FLOW_ITEM_TYPE_IPV6,
241 RTE_FLOW_ITEM_TYPE_UDP,
242 RTE_FLOW_ITEM_TYPE_GTPU,
243 RTE_FLOW_ITEM_TYPE_END,
246 static enum rte_flow_item_type pattern_fdir_ipv6_gtpu_ipv4[] = {
247 RTE_FLOW_ITEM_TYPE_ETH,
248 RTE_FLOW_ITEM_TYPE_IPV6,
249 RTE_FLOW_ITEM_TYPE_UDP,
250 RTE_FLOW_ITEM_TYPE_GTPU,
251 RTE_FLOW_ITEM_TYPE_IPV4,
252 RTE_FLOW_ITEM_TYPE_END,
255 static enum rte_flow_item_type pattern_fdir_ipv6_gtpu_ipv6[] = {
256 RTE_FLOW_ITEM_TYPE_ETH,
257 RTE_FLOW_ITEM_TYPE_IPV6,
258 RTE_FLOW_ITEM_TYPE_UDP,
259 RTE_FLOW_ITEM_TYPE_GTPU,
260 RTE_FLOW_ITEM_TYPE_IPV6,
261 RTE_FLOW_ITEM_TYPE_END,
264 static enum rte_flow_item_type pattern_fdir_ethertype_raw_1[] = {
265 RTE_FLOW_ITEM_TYPE_ETH,
266 RTE_FLOW_ITEM_TYPE_RAW,
267 RTE_FLOW_ITEM_TYPE_END,
270 static enum rte_flow_item_type pattern_fdir_ethertype_raw_2[] = {
271 RTE_FLOW_ITEM_TYPE_ETH,
272 RTE_FLOW_ITEM_TYPE_RAW,
273 RTE_FLOW_ITEM_TYPE_RAW,
274 RTE_FLOW_ITEM_TYPE_END,
277 static enum rte_flow_item_type pattern_fdir_ethertype_raw_3[] = {
278 RTE_FLOW_ITEM_TYPE_ETH,
279 RTE_FLOW_ITEM_TYPE_RAW,
280 RTE_FLOW_ITEM_TYPE_RAW,
281 RTE_FLOW_ITEM_TYPE_RAW,
282 RTE_FLOW_ITEM_TYPE_END,
285 static enum rte_flow_item_type pattern_fdir_ipv4_raw_1[] = {
286 RTE_FLOW_ITEM_TYPE_ETH,
287 RTE_FLOW_ITEM_TYPE_IPV4,
288 RTE_FLOW_ITEM_TYPE_RAW,
289 RTE_FLOW_ITEM_TYPE_END,
292 static enum rte_flow_item_type pattern_fdir_ipv4_raw_2[] = {
293 RTE_FLOW_ITEM_TYPE_ETH,
294 RTE_FLOW_ITEM_TYPE_IPV4,
295 RTE_FLOW_ITEM_TYPE_RAW,
296 RTE_FLOW_ITEM_TYPE_RAW,
297 RTE_FLOW_ITEM_TYPE_END,
300 static enum rte_flow_item_type pattern_fdir_ipv4_raw_3[] = {
301 RTE_FLOW_ITEM_TYPE_ETH,
302 RTE_FLOW_ITEM_TYPE_IPV4,
303 RTE_FLOW_ITEM_TYPE_RAW,
304 RTE_FLOW_ITEM_TYPE_RAW,
305 RTE_FLOW_ITEM_TYPE_RAW,
306 RTE_FLOW_ITEM_TYPE_END,
309 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_1[] = {
310 RTE_FLOW_ITEM_TYPE_ETH,
311 RTE_FLOW_ITEM_TYPE_IPV4,
312 RTE_FLOW_ITEM_TYPE_UDP,
313 RTE_FLOW_ITEM_TYPE_RAW,
314 RTE_FLOW_ITEM_TYPE_END,
317 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_2[] = {
318 RTE_FLOW_ITEM_TYPE_ETH,
319 RTE_FLOW_ITEM_TYPE_IPV4,
320 RTE_FLOW_ITEM_TYPE_UDP,
321 RTE_FLOW_ITEM_TYPE_RAW,
322 RTE_FLOW_ITEM_TYPE_RAW,
323 RTE_FLOW_ITEM_TYPE_END,
326 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_3[] = {
327 RTE_FLOW_ITEM_TYPE_ETH,
328 RTE_FLOW_ITEM_TYPE_IPV4,
329 RTE_FLOW_ITEM_TYPE_UDP,
330 RTE_FLOW_ITEM_TYPE_RAW,
331 RTE_FLOW_ITEM_TYPE_RAW,
332 RTE_FLOW_ITEM_TYPE_RAW,
333 RTE_FLOW_ITEM_TYPE_END,
336 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_1[] = {
337 RTE_FLOW_ITEM_TYPE_ETH,
338 RTE_FLOW_ITEM_TYPE_IPV4,
339 RTE_FLOW_ITEM_TYPE_TCP,
340 RTE_FLOW_ITEM_TYPE_RAW,
341 RTE_FLOW_ITEM_TYPE_END,
344 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_2[] = {
345 RTE_FLOW_ITEM_TYPE_ETH,
346 RTE_FLOW_ITEM_TYPE_IPV4,
347 RTE_FLOW_ITEM_TYPE_TCP,
348 RTE_FLOW_ITEM_TYPE_RAW,
349 RTE_FLOW_ITEM_TYPE_RAW,
350 RTE_FLOW_ITEM_TYPE_END,
353 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_3[] = {
354 RTE_FLOW_ITEM_TYPE_ETH,
355 RTE_FLOW_ITEM_TYPE_IPV4,
356 RTE_FLOW_ITEM_TYPE_TCP,
357 RTE_FLOW_ITEM_TYPE_RAW,
358 RTE_FLOW_ITEM_TYPE_RAW,
359 RTE_FLOW_ITEM_TYPE_RAW,
360 RTE_FLOW_ITEM_TYPE_END,
363 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_1[] = {
364 RTE_FLOW_ITEM_TYPE_ETH,
365 RTE_FLOW_ITEM_TYPE_IPV4,
366 RTE_FLOW_ITEM_TYPE_SCTP,
367 RTE_FLOW_ITEM_TYPE_RAW,
368 RTE_FLOW_ITEM_TYPE_END,
371 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_2[] = {
372 RTE_FLOW_ITEM_TYPE_ETH,
373 RTE_FLOW_ITEM_TYPE_IPV4,
374 RTE_FLOW_ITEM_TYPE_SCTP,
375 RTE_FLOW_ITEM_TYPE_RAW,
376 RTE_FLOW_ITEM_TYPE_RAW,
377 RTE_FLOW_ITEM_TYPE_END,
380 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_3[] = {
381 RTE_FLOW_ITEM_TYPE_ETH,
382 RTE_FLOW_ITEM_TYPE_IPV4,
383 RTE_FLOW_ITEM_TYPE_SCTP,
384 RTE_FLOW_ITEM_TYPE_RAW,
385 RTE_FLOW_ITEM_TYPE_RAW,
386 RTE_FLOW_ITEM_TYPE_RAW,
387 RTE_FLOW_ITEM_TYPE_END,
390 static enum rte_flow_item_type pattern_fdir_ipv6_raw_1[] = {
391 RTE_FLOW_ITEM_TYPE_ETH,
392 RTE_FLOW_ITEM_TYPE_IPV6,
393 RTE_FLOW_ITEM_TYPE_RAW,
394 RTE_FLOW_ITEM_TYPE_END,
397 static enum rte_flow_item_type pattern_fdir_ipv6_raw_2[] = {
398 RTE_FLOW_ITEM_TYPE_ETH,
399 RTE_FLOW_ITEM_TYPE_IPV6,
400 RTE_FLOW_ITEM_TYPE_RAW,
401 RTE_FLOW_ITEM_TYPE_RAW,
402 RTE_FLOW_ITEM_TYPE_END,
405 static enum rte_flow_item_type pattern_fdir_ipv6_raw_3[] = {
406 RTE_FLOW_ITEM_TYPE_ETH,
407 RTE_FLOW_ITEM_TYPE_IPV6,
408 RTE_FLOW_ITEM_TYPE_RAW,
409 RTE_FLOW_ITEM_TYPE_RAW,
410 RTE_FLOW_ITEM_TYPE_RAW,
411 RTE_FLOW_ITEM_TYPE_END,
414 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_1[] = {
415 RTE_FLOW_ITEM_TYPE_ETH,
416 RTE_FLOW_ITEM_TYPE_IPV6,
417 RTE_FLOW_ITEM_TYPE_UDP,
418 RTE_FLOW_ITEM_TYPE_RAW,
419 RTE_FLOW_ITEM_TYPE_END,
422 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_2[] = {
423 RTE_FLOW_ITEM_TYPE_ETH,
424 RTE_FLOW_ITEM_TYPE_IPV6,
425 RTE_FLOW_ITEM_TYPE_UDP,
426 RTE_FLOW_ITEM_TYPE_RAW,
427 RTE_FLOW_ITEM_TYPE_RAW,
428 RTE_FLOW_ITEM_TYPE_END,
431 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_3[] = {
432 RTE_FLOW_ITEM_TYPE_ETH,
433 RTE_FLOW_ITEM_TYPE_IPV6,
434 RTE_FLOW_ITEM_TYPE_UDP,
435 RTE_FLOW_ITEM_TYPE_RAW,
436 RTE_FLOW_ITEM_TYPE_RAW,
437 RTE_FLOW_ITEM_TYPE_RAW,
438 RTE_FLOW_ITEM_TYPE_END,
441 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_1[] = {
442 RTE_FLOW_ITEM_TYPE_ETH,
443 RTE_FLOW_ITEM_TYPE_IPV6,
444 RTE_FLOW_ITEM_TYPE_TCP,
445 RTE_FLOW_ITEM_TYPE_RAW,
446 RTE_FLOW_ITEM_TYPE_END,
449 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_2[] = {
450 RTE_FLOW_ITEM_TYPE_ETH,
451 RTE_FLOW_ITEM_TYPE_IPV6,
452 RTE_FLOW_ITEM_TYPE_TCP,
453 RTE_FLOW_ITEM_TYPE_RAW,
454 RTE_FLOW_ITEM_TYPE_RAW,
455 RTE_FLOW_ITEM_TYPE_END,
458 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_3[] = {
459 RTE_FLOW_ITEM_TYPE_ETH,
460 RTE_FLOW_ITEM_TYPE_IPV6,
461 RTE_FLOW_ITEM_TYPE_TCP,
462 RTE_FLOW_ITEM_TYPE_RAW,
463 RTE_FLOW_ITEM_TYPE_RAW,
464 RTE_FLOW_ITEM_TYPE_RAW,
465 RTE_FLOW_ITEM_TYPE_END,
468 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_1[] = {
469 RTE_FLOW_ITEM_TYPE_ETH,
470 RTE_FLOW_ITEM_TYPE_IPV6,
471 RTE_FLOW_ITEM_TYPE_SCTP,
472 RTE_FLOW_ITEM_TYPE_RAW,
473 RTE_FLOW_ITEM_TYPE_END,
476 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_2[] = {
477 RTE_FLOW_ITEM_TYPE_ETH,
478 RTE_FLOW_ITEM_TYPE_IPV6,
479 RTE_FLOW_ITEM_TYPE_SCTP,
480 RTE_FLOW_ITEM_TYPE_RAW,
481 RTE_FLOW_ITEM_TYPE_RAW,
482 RTE_FLOW_ITEM_TYPE_END,
485 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_3[] = {
486 RTE_FLOW_ITEM_TYPE_ETH,
487 RTE_FLOW_ITEM_TYPE_IPV6,
488 RTE_FLOW_ITEM_TYPE_SCTP,
489 RTE_FLOW_ITEM_TYPE_RAW,
490 RTE_FLOW_ITEM_TYPE_RAW,
491 RTE_FLOW_ITEM_TYPE_RAW,
492 RTE_FLOW_ITEM_TYPE_END,
495 static enum rte_flow_item_type pattern_fdir_ethertype_vlan[] = {
496 RTE_FLOW_ITEM_TYPE_ETH,
497 RTE_FLOW_ITEM_TYPE_VLAN,
498 RTE_FLOW_ITEM_TYPE_END,
501 static enum rte_flow_item_type pattern_fdir_vlan_ipv4[] = {
502 RTE_FLOW_ITEM_TYPE_ETH,
503 RTE_FLOW_ITEM_TYPE_VLAN,
504 RTE_FLOW_ITEM_TYPE_IPV4,
505 RTE_FLOW_ITEM_TYPE_END,
508 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp[] = {
509 RTE_FLOW_ITEM_TYPE_ETH,
510 RTE_FLOW_ITEM_TYPE_VLAN,
511 RTE_FLOW_ITEM_TYPE_IPV4,
512 RTE_FLOW_ITEM_TYPE_UDP,
513 RTE_FLOW_ITEM_TYPE_END,
516 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp[] = {
517 RTE_FLOW_ITEM_TYPE_ETH,
518 RTE_FLOW_ITEM_TYPE_VLAN,
519 RTE_FLOW_ITEM_TYPE_IPV4,
520 RTE_FLOW_ITEM_TYPE_TCP,
521 RTE_FLOW_ITEM_TYPE_END,
524 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp[] = {
525 RTE_FLOW_ITEM_TYPE_ETH,
526 RTE_FLOW_ITEM_TYPE_VLAN,
527 RTE_FLOW_ITEM_TYPE_IPV4,
528 RTE_FLOW_ITEM_TYPE_SCTP,
529 RTE_FLOW_ITEM_TYPE_END,
532 static enum rte_flow_item_type pattern_fdir_vlan_ipv6[] = {
533 RTE_FLOW_ITEM_TYPE_ETH,
534 RTE_FLOW_ITEM_TYPE_VLAN,
535 RTE_FLOW_ITEM_TYPE_IPV6,
536 RTE_FLOW_ITEM_TYPE_END,
539 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp[] = {
540 RTE_FLOW_ITEM_TYPE_ETH,
541 RTE_FLOW_ITEM_TYPE_VLAN,
542 RTE_FLOW_ITEM_TYPE_IPV6,
543 RTE_FLOW_ITEM_TYPE_UDP,
544 RTE_FLOW_ITEM_TYPE_END,
547 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp[] = {
548 RTE_FLOW_ITEM_TYPE_ETH,
549 RTE_FLOW_ITEM_TYPE_VLAN,
550 RTE_FLOW_ITEM_TYPE_IPV6,
551 RTE_FLOW_ITEM_TYPE_TCP,
552 RTE_FLOW_ITEM_TYPE_END,
555 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp[] = {
556 RTE_FLOW_ITEM_TYPE_ETH,
557 RTE_FLOW_ITEM_TYPE_VLAN,
558 RTE_FLOW_ITEM_TYPE_IPV6,
559 RTE_FLOW_ITEM_TYPE_SCTP,
560 RTE_FLOW_ITEM_TYPE_END,
563 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_1[] = {
564 RTE_FLOW_ITEM_TYPE_ETH,
565 RTE_FLOW_ITEM_TYPE_VLAN,
566 RTE_FLOW_ITEM_TYPE_RAW,
567 RTE_FLOW_ITEM_TYPE_END,
570 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_2[] = {
571 RTE_FLOW_ITEM_TYPE_ETH,
572 RTE_FLOW_ITEM_TYPE_VLAN,
573 RTE_FLOW_ITEM_TYPE_RAW,
574 RTE_FLOW_ITEM_TYPE_RAW,
575 RTE_FLOW_ITEM_TYPE_END,
578 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_3[] = {
579 RTE_FLOW_ITEM_TYPE_ETH,
580 RTE_FLOW_ITEM_TYPE_VLAN,
581 RTE_FLOW_ITEM_TYPE_RAW,
582 RTE_FLOW_ITEM_TYPE_RAW,
583 RTE_FLOW_ITEM_TYPE_RAW,
584 RTE_FLOW_ITEM_TYPE_END,
587 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_1[] = {
588 RTE_FLOW_ITEM_TYPE_ETH,
589 RTE_FLOW_ITEM_TYPE_VLAN,
590 RTE_FLOW_ITEM_TYPE_IPV4,
591 RTE_FLOW_ITEM_TYPE_RAW,
592 RTE_FLOW_ITEM_TYPE_END,
595 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_2[] = {
596 RTE_FLOW_ITEM_TYPE_ETH,
597 RTE_FLOW_ITEM_TYPE_VLAN,
598 RTE_FLOW_ITEM_TYPE_IPV4,
599 RTE_FLOW_ITEM_TYPE_RAW,
600 RTE_FLOW_ITEM_TYPE_RAW,
601 RTE_FLOW_ITEM_TYPE_END,
604 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_3[] = {
605 RTE_FLOW_ITEM_TYPE_ETH,
606 RTE_FLOW_ITEM_TYPE_VLAN,
607 RTE_FLOW_ITEM_TYPE_IPV4,
608 RTE_FLOW_ITEM_TYPE_RAW,
609 RTE_FLOW_ITEM_TYPE_RAW,
610 RTE_FLOW_ITEM_TYPE_RAW,
611 RTE_FLOW_ITEM_TYPE_END,
614 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_1[] = {
615 RTE_FLOW_ITEM_TYPE_ETH,
616 RTE_FLOW_ITEM_TYPE_VLAN,
617 RTE_FLOW_ITEM_TYPE_IPV4,
618 RTE_FLOW_ITEM_TYPE_UDP,
619 RTE_FLOW_ITEM_TYPE_RAW,
620 RTE_FLOW_ITEM_TYPE_END,
623 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_2[] = {
624 RTE_FLOW_ITEM_TYPE_ETH,
625 RTE_FLOW_ITEM_TYPE_VLAN,
626 RTE_FLOW_ITEM_TYPE_IPV4,
627 RTE_FLOW_ITEM_TYPE_UDP,
628 RTE_FLOW_ITEM_TYPE_RAW,
629 RTE_FLOW_ITEM_TYPE_RAW,
630 RTE_FLOW_ITEM_TYPE_END,
633 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_3[] = {
634 RTE_FLOW_ITEM_TYPE_ETH,
635 RTE_FLOW_ITEM_TYPE_VLAN,
636 RTE_FLOW_ITEM_TYPE_IPV4,
637 RTE_FLOW_ITEM_TYPE_UDP,
638 RTE_FLOW_ITEM_TYPE_RAW,
639 RTE_FLOW_ITEM_TYPE_RAW,
640 RTE_FLOW_ITEM_TYPE_RAW,
641 RTE_FLOW_ITEM_TYPE_END,
644 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_1[] = {
645 RTE_FLOW_ITEM_TYPE_ETH,
646 RTE_FLOW_ITEM_TYPE_VLAN,
647 RTE_FLOW_ITEM_TYPE_IPV4,
648 RTE_FLOW_ITEM_TYPE_TCP,
649 RTE_FLOW_ITEM_TYPE_RAW,
650 RTE_FLOW_ITEM_TYPE_END,
653 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_2[] = {
654 RTE_FLOW_ITEM_TYPE_ETH,
655 RTE_FLOW_ITEM_TYPE_VLAN,
656 RTE_FLOW_ITEM_TYPE_IPV4,
657 RTE_FLOW_ITEM_TYPE_TCP,
658 RTE_FLOW_ITEM_TYPE_RAW,
659 RTE_FLOW_ITEM_TYPE_RAW,
660 RTE_FLOW_ITEM_TYPE_END,
663 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_3[] = {
664 RTE_FLOW_ITEM_TYPE_ETH,
665 RTE_FLOW_ITEM_TYPE_VLAN,
666 RTE_FLOW_ITEM_TYPE_IPV4,
667 RTE_FLOW_ITEM_TYPE_TCP,
668 RTE_FLOW_ITEM_TYPE_RAW,
669 RTE_FLOW_ITEM_TYPE_RAW,
670 RTE_FLOW_ITEM_TYPE_RAW,
671 RTE_FLOW_ITEM_TYPE_END,
674 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_1[] = {
675 RTE_FLOW_ITEM_TYPE_ETH,
676 RTE_FLOW_ITEM_TYPE_VLAN,
677 RTE_FLOW_ITEM_TYPE_IPV4,
678 RTE_FLOW_ITEM_TYPE_SCTP,
679 RTE_FLOW_ITEM_TYPE_RAW,
680 RTE_FLOW_ITEM_TYPE_END,
683 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_2[] = {
684 RTE_FLOW_ITEM_TYPE_ETH,
685 RTE_FLOW_ITEM_TYPE_VLAN,
686 RTE_FLOW_ITEM_TYPE_IPV4,
687 RTE_FLOW_ITEM_TYPE_SCTP,
688 RTE_FLOW_ITEM_TYPE_RAW,
689 RTE_FLOW_ITEM_TYPE_RAW,
690 RTE_FLOW_ITEM_TYPE_END,
693 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_3[] = {
694 RTE_FLOW_ITEM_TYPE_ETH,
695 RTE_FLOW_ITEM_TYPE_VLAN,
696 RTE_FLOW_ITEM_TYPE_IPV4,
697 RTE_FLOW_ITEM_TYPE_SCTP,
698 RTE_FLOW_ITEM_TYPE_RAW,
699 RTE_FLOW_ITEM_TYPE_RAW,
700 RTE_FLOW_ITEM_TYPE_RAW,
701 RTE_FLOW_ITEM_TYPE_END,
704 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_1[] = {
705 RTE_FLOW_ITEM_TYPE_ETH,
706 RTE_FLOW_ITEM_TYPE_VLAN,
707 RTE_FLOW_ITEM_TYPE_IPV6,
708 RTE_FLOW_ITEM_TYPE_RAW,
709 RTE_FLOW_ITEM_TYPE_END,
712 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_2[] = {
713 RTE_FLOW_ITEM_TYPE_ETH,
714 RTE_FLOW_ITEM_TYPE_VLAN,
715 RTE_FLOW_ITEM_TYPE_IPV6,
716 RTE_FLOW_ITEM_TYPE_RAW,
717 RTE_FLOW_ITEM_TYPE_RAW,
718 RTE_FLOW_ITEM_TYPE_END,
721 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_3[] = {
722 RTE_FLOW_ITEM_TYPE_ETH,
723 RTE_FLOW_ITEM_TYPE_VLAN,
724 RTE_FLOW_ITEM_TYPE_IPV6,
725 RTE_FLOW_ITEM_TYPE_RAW,
726 RTE_FLOW_ITEM_TYPE_RAW,
727 RTE_FLOW_ITEM_TYPE_RAW,
728 RTE_FLOW_ITEM_TYPE_END,
731 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_1[] = {
732 RTE_FLOW_ITEM_TYPE_ETH,
733 RTE_FLOW_ITEM_TYPE_VLAN,
734 RTE_FLOW_ITEM_TYPE_IPV6,
735 RTE_FLOW_ITEM_TYPE_UDP,
736 RTE_FLOW_ITEM_TYPE_RAW,
737 RTE_FLOW_ITEM_TYPE_END,
740 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_2[] = {
741 RTE_FLOW_ITEM_TYPE_ETH,
742 RTE_FLOW_ITEM_TYPE_VLAN,
743 RTE_FLOW_ITEM_TYPE_IPV6,
744 RTE_FLOW_ITEM_TYPE_UDP,
745 RTE_FLOW_ITEM_TYPE_RAW,
746 RTE_FLOW_ITEM_TYPE_RAW,
747 RTE_FLOW_ITEM_TYPE_END,
750 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_3[] = {
751 RTE_FLOW_ITEM_TYPE_ETH,
752 RTE_FLOW_ITEM_TYPE_VLAN,
753 RTE_FLOW_ITEM_TYPE_IPV6,
754 RTE_FLOW_ITEM_TYPE_UDP,
755 RTE_FLOW_ITEM_TYPE_RAW,
756 RTE_FLOW_ITEM_TYPE_RAW,
757 RTE_FLOW_ITEM_TYPE_RAW,
758 RTE_FLOW_ITEM_TYPE_END,
761 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_1[] = {
762 RTE_FLOW_ITEM_TYPE_ETH,
763 RTE_FLOW_ITEM_TYPE_VLAN,
764 RTE_FLOW_ITEM_TYPE_IPV6,
765 RTE_FLOW_ITEM_TYPE_TCP,
766 RTE_FLOW_ITEM_TYPE_RAW,
767 RTE_FLOW_ITEM_TYPE_END,
770 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_2[] = {
771 RTE_FLOW_ITEM_TYPE_ETH,
772 RTE_FLOW_ITEM_TYPE_VLAN,
773 RTE_FLOW_ITEM_TYPE_IPV6,
774 RTE_FLOW_ITEM_TYPE_TCP,
775 RTE_FLOW_ITEM_TYPE_RAW,
776 RTE_FLOW_ITEM_TYPE_RAW,
777 RTE_FLOW_ITEM_TYPE_END,
780 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_3[] = {
781 RTE_FLOW_ITEM_TYPE_ETH,
782 RTE_FLOW_ITEM_TYPE_VLAN,
783 RTE_FLOW_ITEM_TYPE_IPV6,
784 RTE_FLOW_ITEM_TYPE_TCP,
785 RTE_FLOW_ITEM_TYPE_RAW,
786 RTE_FLOW_ITEM_TYPE_RAW,
787 RTE_FLOW_ITEM_TYPE_RAW,
788 RTE_FLOW_ITEM_TYPE_END,
791 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_1[] = {
792 RTE_FLOW_ITEM_TYPE_ETH,
793 RTE_FLOW_ITEM_TYPE_VLAN,
794 RTE_FLOW_ITEM_TYPE_IPV6,
795 RTE_FLOW_ITEM_TYPE_SCTP,
796 RTE_FLOW_ITEM_TYPE_RAW,
797 RTE_FLOW_ITEM_TYPE_END,
800 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_2[] = {
801 RTE_FLOW_ITEM_TYPE_ETH,
802 RTE_FLOW_ITEM_TYPE_VLAN,
803 RTE_FLOW_ITEM_TYPE_IPV6,
804 RTE_FLOW_ITEM_TYPE_SCTP,
805 RTE_FLOW_ITEM_TYPE_RAW,
806 RTE_FLOW_ITEM_TYPE_RAW,
807 RTE_FLOW_ITEM_TYPE_END,
810 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_3[] = {
811 RTE_FLOW_ITEM_TYPE_ETH,
812 RTE_FLOW_ITEM_TYPE_VLAN,
813 RTE_FLOW_ITEM_TYPE_IPV6,
814 RTE_FLOW_ITEM_TYPE_SCTP,
815 RTE_FLOW_ITEM_TYPE_RAW,
816 RTE_FLOW_ITEM_TYPE_RAW,
817 RTE_FLOW_ITEM_TYPE_RAW,
818 RTE_FLOW_ITEM_TYPE_END,
821 static enum rte_flow_item_type pattern_fdir_ipv4_vf[] = {
822 RTE_FLOW_ITEM_TYPE_ETH,
823 RTE_FLOW_ITEM_TYPE_IPV4,
824 RTE_FLOW_ITEM_TYPE_VF,
825 RTE_FLOW_ITEM_TYPE_END,
828 static enum rte_flow_item_type pattern_fdir_ipv4_udp_vf[] = {
829 RTE_FLOW_ITEM_TYPE_ETH,
830 RTE_FLOW_ITEM_TYPE_IPV4,
831 RTE_FLOW_ITEM_TYPE_UDP,
832 RTE_FLOW_ITEM_TYPE_VF,
833 RTE_FLOW_ITEM_TYPE_END,
836 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_vf[] = {
837 RTE_FLOW_ITEM_TYPE_ETH,
838 RTE_FLOW_ITEM_TYPE_IPV4,
839 RTE_FLOW_ITEM_TYPE_TCP,
840 RTE_FLOW_ITEM_TYPE_VF,
841 RTE_FLOW_ITEM_TYPE_END,
844 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_vf[] = {
845 RTE_FLOW_ITEM_TYPE_ETH,
846 RTE_FLOW_ITEM_TYPE_IPV4,
847 RTE_FLOW_ITEM_TYPE_SCTP,
848 RTE_FLOW_ITEM_TYPE_VF,
849 RTE_FLOW_ITEM_TYPE_END,
852 static enum rte_flow_item_type pattern_fdir_ipv6_vf[] = {
853 RTE_FLOW_ITEM_TYPE_ETH,
854 RTE_FLOW_ITEM_TYPE_IPV6,
855 RTE_FLOW_ITEM_TYPE_VF,
856 RTE_FLOW_ITEM_TYPE_END,
859 static enum rte_flow_item_type pattern_fdir_ipv6_udp_vf[] = {
860 RTE_FLOW_ITEM_TYPE_ETH,
861 RTE_FLOW_ITEM_TYPE_IPV6,
862 RTE_FLOW_ITEM_TYPE_UDP,
863 RTE_FLOW_ITEM_TYPE_VF,
864 RTE_FLOW_ITEM_TYPE_END,
867 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_vf[] = {
868 RTE_FLOW_ITEM_TYPE_ETH,
869 RTE_FLOW_ITEM_TYPE_IPV6,
870 RTE_FLOW_ITEM_TYPE_TCP,
871 RTE_FLOW_ITEM_TYPE_VF,
872 RTE_FLOW_ITEM_TYPE_END,
875 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_vf[] = {
876 RTE_FLOW_ITEM_TYPE_ETH,
877 RTE_FLOW_ITEM_TYPE_IPV6,
878 RTE_FLOW_ITEM_TYPE_SCTP,
879 RTE_FLOW_ITEM_TYPE_VF,
880 RTE_FLOW_ITEM_TYPE_END,
883 static enum rte_flow_item_type pattern_fdir_ethertype_raw_1_vf[] = {
884 RTE_FLOW_ITEM_TYPE_ETH,
885 RTE_FLOW_ITEM_TYPE_RAW,
886 RTE_FLOW_ITEM_TYPE_VF,
887 RTE_FLOW_ITEM_TYPE_END,
890 static enum rte_flow_item_type pattern_fdir_ethertype_raw_2_vf[] = {
891 RTE_FLOW_ITEM_TYPE_ETH,
892 RTE_FLOW_ITEM_TYPE_RAW,
893 RTE_FLOW_ITEM_TYPE_RAW,
894 RTE_FLOW_ITEM_TYPE_VF,
895 RTE_FLOW_ITEM_TYPE_END,
898 static enum rte_flow_item_type pattern_fdir_ethertype_raw_3_vf[] = {
899 RTE_FLOW_ITEM_TYPE_ETH,
900 RTE_FLOW_ITEM_TYPE_RAW,
901 RTE_FLOW_ITEM_TYPE_RAW,
902 RTE_FLOW_ITEM_TYPE_RAW,
903 RTE_FLOW_ITEM_TYPE_VF,
904 RTE_FLOW_ITEM_TYPE_END,
907 static enum rte_flow_item_type pattern_fdir_ipv4_raw_1_vf[] = {
908 RTE_FLOW_ITEM_TYPE_ETH,
909 RTE_FLOW_ITEM_TYPE_IPV4,
910 RTE_FLOW_ITEM_TYPE_RAW,
911 RTE_FLOW_ITEM_TYPE_VF,
912 RTE_FLOW_ITEM_TYPE_END,
915 static enum rte_flow_item_type pattern_fdir_ipv4_raw_2_vf[] = {
916 RTE_FLOW_ITEM_TYPE_ETH,
917 RTE_FLOW_ITEM_TYPE_IPV4,
918 RTE_FLOW_ITEM_TYPE_RAW,
919 RTE_FLOW_ITEM_TYPE_RAW,
920 RTE_FLOW_ITEM_TYPE_VF,
921 RTE_FLOW_ITEM_TYPE_END,
924 static enum rte_flow_item_type pattern_fdir_ipv4_raw_3_vf[] = {
925 RTE_FLOW_ITEM_TYPE_ETH,
926 RTE_FLOW_ITEM_TYPE_IPV4,
927 RTE_FLOW_ITEM_TYPE_RAW,
928 RTE_FLOW_ITEM_TYPE_RAW,
929 RTE_FLOW_ITEM_TYPE_RAW,
930 RTE_FLOW_ITEM_TYPE_VF,
931 RTE_FLOW_ITEM_TYPE_END,
934 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_1_vf[] = {
935 RTE_FLOW_ITEM_TYPE_ETH,
936 RTE_FLOW_ITEM_TYPE_IPV4,
937 RTE_FLOW_ITEM_TYPE_UDP,
938 RTE_FLOW_ITEM_TYPE_RAW,
939 RTE_FLOW_ITEM_TYPE_VF,
940 RTE_FLOW_ITEM_TYPE_END,
943 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_2_vf[] = {
944 RTE_FLOW_ITEM_TYPE_ETH,
945 RTE_FLOW_ITEM_TYPE_IPV4,
946 RTE_FLOW_ITEM_TYPE_UDP,
947 RTE_FLOW_ITEM_TYPE_RAW,
948 RTE_FLOW_ITEM_TYPE_RAW,
949 RTE_FLOW_ITEM_TYPE_VF,
950 RTE_FLOW_ITEM_TYPE_END,
953 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_3_vf[] = {
954 RTE_FLOW_ITEM_TYPE_ETH,
955 RTE_FLOW_ITEM_TYPE_IPV4,
956 RTE_FLOW_ITEM_TYPE_UDP,
957 RTE_FLOW_ITEM_TYPE_RAW,
958 RTE_FLOW_ITEM_TYPE_RAW,
959 RTE_FLOW_ITEM_TYPE_RAW,
960 RTE_FLOW_ITEM_TYPE_VF,
961 RTE_FLOW_ITEM_TYPE_END,
964 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_1_vf[] = {
965 RTE_FLOW_ITEM_TYPE_ETH,
966 RTE_FLOW_ITEM_TYPE_IPV4,
967 RTE_FLOW_ITEM_TYPE_TCP,
968 RTE_FLOW_ITEM_TYPE_RAW,
969 RTE_FLOW_ITEM_TYPE_VF,
970 RTE_FLOW_ITEM_TYPE_END,
973 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_2_vf[] = {
974 RTE_FLOW_ITEM_TYPE_ETH,
975 RTE_FLOW_ITEM_TYPE_IPV4,
976 RTE_FLOW_ITEM_TYPE_TCP,
977 RTE_FLOW_ITEM_TYPE_RAW,
978 RTE_FLOW_ITEM_TYPE_RAW,
979 RTE_FLOW_ITEM_TYPE_VF,
980 RTE_FLOW_ITEM_TYPE_END,
983 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_3_vf[] = {
984 RTE_FLOW_ITEM_TYPE_ETH,
985 RTE_FLOW_ITEM_TYPE_IPV4,
986 RTE_FLOW_ITEM_TYPE_TCP,
987 RTE_FLOW_ITEM_TYPE_RAW,
988 RTE_FLOW_ITEM_TYPE_RAW,
989 RTE_FLOW_ITEM_TYPE_RAW,
990 RTE_FLOW_ITEM_TYPE_VF,
991 RTE_FLOW_ITEM_TYPE_END,
994 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_1_vf[] = {
995 RTE_FLOW_ITEM_TYPE_ETH,
996 RTE_FLOW_ITEM_TYPE_IPV4,
997 RTE_FLOW_ITEM_TYPE_SCTP,
998 RTE_FLOW_ITEM_TYPE_RAW,
999 RTE_FLOW_ITEM_TYPE_VF,
1000 RTE_FLOW_ITEM_TYPE_END,
1003 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_2_vf[] = {
1004 RTE_FLOW_ITEM_TYPE_ETH,
1005 RTE_FLOW_ITEM_TYPE_IPV4,
1006 RTE_FLOW_ITEM_TYPE_SCTP,
1007 RTE_FLOW_ITEM_TYPE_RAW,
1008 RTE_FLOW_ITEM_TYPE_RAW,
1009 RTE_FLOW_ITEM_TYPE_VF,
1010 RTE_FLOW_ITEM_TYPE_END,
1013 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_3_vf[] = {
1014 RTE_FLOW_ITEM_TYPE_ETH,
1015 RTE_FLOW_ITEM_TYPE_IPV4,
1016 RTE_FLOW_ITEM_TYPE_SCTP,
1017 RTE_FLOW_ITEM_TYPE_RAW,
1018 RTE_FLOW_ITEM_TYPE_RAW,
1019 RTE_FLOW_ITEM_TYPE_RAW,
1020 RTE_FLOW_ITEM_TYPE_VF,
1021 RTE_FLOW_ITEM_TYPE_END,
1024 static enum rte_flow_item_type pattern_fdir_ipv6_raw_1_vf[] = {
1025 RTE_FLOW_ITEM_TYPE_ETH,
1026 RTE_FLOW_ITEM_TYPE_IPV6,
1027 RTE_FLOW_ITEM_TYPE_RAW,
1028 RTE_FLOW_ITEM_TYPE_VF,
1029 RTE_FLOW_ITEM_TYPE_END,
1032 static enum rte_flow_item_type pattern_fdir_ipv6_raw_2_vf[] = {
1033 RTE_FLOW_ITEM_TYPE_ETH,
1034 RTE_FLOW_ITEM_TYPE_IPV6,
1035 RTE_FLOW_ITEM_TYPE_RAW,
1036 RTE_FLOW_ITEM_TYPE_RAW,
1037 RTE_FLOW_ITEM_TYPE_VF,
1038 RTE_FLOW_ITEM_TYPE_END,
1041 static enum rte_flow_item_type pattern_fdir_ipv6_raw_3_vf[] = {
1042 RTE_FLOW_ITEM_TYPE_ETH,
1043 RTE_FLOW_ITEM_TYPE_IPV6,
1044 RTE_FLOW_ITEM_TYPE_RAW,
1045 RTE_FLOW_ITEM_TYPE_RAW,
1046 RTE_FLOW_ITEM_TYPE_RAW,
1047 RTE_FLOW_ITEM_TYPE_VF,
1048 RTE_FLOW_ITEM_TYPE_END,
1051 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_1_vf[] = {
1052 RTE_FLOW_ITEM_TYPE_ETH,
1053 RTE_FLOW_ITEM_TYPE_IPV6,
1054 RTE_FLOW_ITEM_TYPE_UDP,
1055 RTE_FLOW_ITEM_TYPE_RAW,
1056 RTE_FLOW_ITEM_TYPE_VF,
1057 RTE_FLOW_ITEM_TYPE_END,
1060 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_2_vf[] = {
1061 RTE_FLOW_ITEM_TYPE_ETH,
1062 RTE_FLOW_ITEM_TYPE_IPV6,
1063 RTE_FLOW_ITEM_TYPE_UDP,
1064 RTE_FLOW_ITEM_TYPE_RAW,
1065 RTE_FLOW_ITEM_TYPE_RAW,
1066 RTE_FLOW_ITEM_TYPE_VF,
1067 RTE_FLOW_ITEM_TYPE_END,
1070 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_3_vf[] = {
1071 RTE_FLOW_ITEM_TYPE_ETH,
1072 RTE_FLOW_ITEM_TYPE_IPV6,
1073 RTE_FLOW_ITEM_TYPE_UDP,
1074 RTE_FLOW_ITEM_TYPE_RAW,
1075 RTE_FLOW_ITEM_TYPE_RAW,
1076 RTE_FLOW_ITEM_TYPE_RAW,
1077 RTE_FLOW_ITEM_TYPE_VF,
1078 RTE_FLOW_ITEM_TYPE_END,
1081 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_1_vf[] = {
1082 RTE_FLOW_ITEM_TYPE_ETH,
1083 RTE_FLOW_ITEM_TYPE_IPV6,
1084 RTE_FLOW_ITEM_TYPE_TCP,
1085 RTE_FLOW_ITEM_TYPE_RAW,
1086 RTE_FLOW_ITEM_TYPE_VF,
1087 RTE_FLOW_ITEM_TYPE_END,
1090 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_2_vf[] = {
1091 RTE_FLOW_ITEM_TYPE_ETH,
1092 RTE_FLOW_ITEM_TYPE_IPV6,
1093 RTE_FLOW_ITEM_TYPE_TCP,
1094 RTE_FLOW_ITEM_TYPE_RAW,
1095 RTE_FLOW_ITEM_TYPE_RAW,
1096 RTE_FLOW_ITEM_TYPE_VF,
1097 RTE_FLOW_ITEM_TYPE_END,
1100 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_3_vf[] = {
1101 RTE_FLOW_ITEM_TYPE_ETH,
1102 RTE_FLOW_ITEM_TYPE_IPV6,
1103 RTE_FLOW_ITEM_TYPE_TCP,
1104 RTE_FLOW_ITEM_TYPE_RAW,
1105 RTE_FLOW_ITEM_TYPE_RAW,
1106 RTE_FLOW_ITEM_TYPE_RAW,
1107 RTE_FLOW_ITEM_TYPE_VF,
1108 RTE_FLOW_ITEM_TYPE_END,
1111 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_1_vf[] = {
1112 RTE_FLOW_ITEM_TYPE_ETH,
1113 RTE_FLOW_ITEM_TYPE_IPV6,
1114 RTE_FLOW_ITEM_TYPE_SCTP,
1115 RTE_FLOW_ITEM_TYPE_RAW,
1116 RTE_FLOW_ITEM_TYPE_VF,
1117 RTE_FLOW_ITEM_TYPE_END,
1120 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_2_vf[] = {
1121 RTE_FLOW_ITEM_TYPE_ETH,
1122 RTE_FLOW_ITEM_TYPE_IPV6,
1123 RTE_FLOW_ITEM_TYPE_SCTP,
1124 RTE_FLOW_ITEM_TYPE_RAW,
1125 RTE_FLOW_ITEM_TYPE_RAW,
1126 RTE_FLOW_ITEM_TYPE_VF,
1127 RTE_FLOW_ITEM_TYPE_END,
1130 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_3_vf[] = {
1131 RTE_FLOW_ITEM_TYPE_ETH,
1132 RTE_FLOW_ITEM_TYPE_IPV6,
1133 RTE_FLOW_ITEM_TYPE_SCTP,
1134 RTE_FLOW_ITEM_TYPE_RAW,
1135 RTE_FLOW_ITEM_TYPE_RAW,
1136 RTE_FLOW_ITEM_TYPE_RAW,
1137 RTE_FLOW_ITEM_TYPE_VF,
1138 RTE_FLOW_ITEM_TYPE_END,
1141 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_vf[] = {
1142 RTE_FLOW_ITEM_TYPE_ETH,
1143 RTE_FLOW_ITEM_TYPE_VLAN,
1144 RTE_FLOW_ITEM_TYPE_VF,
1145 RTE_FLOW_ITEM_TYPE_END,
1148 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_vf[] = {
1149 RTE_FLOW_ITEM_TYPE_ETH,
1150 RTE_FLOW_ITEM_TYPE_VLAN,
1151 RTE_FLOW_ITEM_TYPE_IPV4,
1152 RTE_FLOW_ITEM_TYPE_VF,
1153 RTE_FLOW_ITEM_TYPE_END,
1156 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_vf[] = {
1157 RTE_FLOW_ITEM_TYPE_ETH,
1158 RTE_FLOW_ITEM_TYPE_VLAN,
1159 RTE_FLOW_ITEM_TYPE_IPV4,
1160 RTE_FLOW_ITEM_TYPE_UDP,
1161 RTE_FLOW_ITEM_TYPE_VF,
1162 RTE_FLOW_ITEM_TYPE_END,
1165 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_vf[] = {
1166 RTE_FLOW_ITEM_TYPE_ETH,
1167 RTE_FLOW_ITEM_TYPE_VLAN,
1168 RTE_FLOW_ITEM_TYPE_IPV4,
1169 RTE_FLOW_ITEM_TYPE_TCP,
1170 RTE_FLOW_ITEM_TYPE_VF,
1171 RTE_FLOW_ITEM_TYPE_END,
1174 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_vf[] = {
1175 RTE_FLOW_ITEM_TYPE_ETH,
1176 RTE_FLOW_ITEM_TYPE_VLAN,
1177 RTE_FLOW_ITEM_TYPE_IPV4,
1178 RTE_FLOW_ITEM_TYPE_SCTP,
1179 RTE_FLOW_ITEM_TYPE_VF,
1180 RTE_FLOW_ITEM_TYPE_END,
1183 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_vf[] = {
1184 RTE_FLOW_ITEM_TYPE_ETH,
1185 RTE_FLOW_ITEM_TYPE_VLAN,
1186 RTE_FLOW_ITEM_TYPE_IPV6,
1187 RTE_FLOW_ITEM_TYPE_VF,
1188 RTE_FLOW_ITEM_TYPE_END,
1191 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_vf[] = {
1192 RTE_FLOW_ITEM_TYPE_ETH,
1193 RTE_FLOW_ITEM_TYPE_VLAN,
1194 RTE_FLOW_ITEM_TYPE_IPV6,
1195 RTE_FLOW_ITEM_TYPE_UDP,
1196 RTE_FLOW_ITEM_TYPE_VF,
1197 RTE_FLOW_ITEM_TYPE_END,
1200 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_vf[] = {
1201 RTE_FLOW_ITEM_TYPE_ETH,
1202 RTE_FLOW_ITEM_TYPE_VLAN,
1203 RTE_FLOW_ITEM_TYPE_IPV6,
1204 RTE_FLOW_ITEM_TYPE_TCP,
1205 RTE_FLOW_ITEM_TYPE_VF,
1206 RTE_FLOW_ITEM_TYPE_END,
1209 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_vf[] = {
1210 RTE_FLOW_ITEM_TYPE_ETH,
1211 RTE_FLOW_ITEM_TYPE_VLAN,
1212 RTE_FLOW_ITEM_TYPE_IPV6,
1213 RTE_FLOW_ITEM_TYPE_SCTP,
1214 RTE_FLOW_ITEM_TYPE_VF,
1215 RTE_FLOW_ITEM_TYPE_END,
1218 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_1_vf[] = {
1219 RTE_FLOW_ITEM_TYPE_ETH,
1220 RTE_FLOW_ITEM_TYPE_VLAN,
1221 RTE_FLOW_ITEM_TYPE_RAW,
1222 RTE_FLOW_ITEM_TYPE_VF,
1223 RTE_FLOW_ITEM_TYPE_END,
1226 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_2_vf[] = {
1227 RTE_FLOW_ITEM_TYPE_ETH,
1228 RTE_FLOW_ITEM_TYPE_VLAN,
1229 RTE_FLOW_ITEM_TYPE_RAW,
1230 RTE_FLOW_ITEM_TYPE_RAW,
1231 RTE_FLOW_ITEM_TYPE_VF,
1232 RTE_FLOW_ITEM_TYPE_END,
1235 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_3_vf[] = {
1236 RTE_FLOW_ITEM_TYPE_ETH,
1237 RTE_FLOW_ITEM_TYPE_VLAN,
1238 RTE_FLOW_ITEM_TYPE_RAW,
1239 RTE_FLOW_ITEM_TYPE_RAW,
1240 RTE_FLOW_ITEM_TYPE_RAW,
1241 RTE_FLOW_ITEM_TYPE_VF,
1242 RTE_FLOW_ITEM_TYPE_END,
1245 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_1_vf[] = {
1246 RTE_FLOW_ITEM_TYPE_ETH,
1247 RTE_FLOW_ITEM_TYPE_VLAN,
1248 RTE_FLOW_ITEM_TYPE_IPV4,
1249 RTE_FLOW_ITEM_TYPE_RAW,
1250 RTE_FLOW_ITEM_TYPE_VF,
1251 RTE_FLOW_ITEM_TYPE_END,
1254 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_2_vf[] = {
1255 RTE_FLOW_ITEM_TYPE_ETH,
1256 RTE_FLOW_ITEM_TYPE_VLAN,
1257 RTE_FLOW_ITEM_TYPE_IPV4,
1258 RTE_FLOW_ITEM_TYPE_RAW,
1259 RTE_FLOW_ITEM_TYPE_RAW,
1260 RTE_FLOW_ITEM_TYPE_VF,
1261 RTE_FLOW_ITEM_TYPE_END,
1264 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_3_vf[] = {
1265 RTE_FLOW_ITEM_TYPE_ETH,
1266 RTE_FLOW_ITEM_TYPE_VLAN,
1267 RTE_FLOW_ITEM_TYPE_IPV4,
1268 RTE_FLOW_ITEM_TYPE_RAW,
1269 RTE_FLOW_ITEM_TYPE_RAW,
1270 RTE_FLOW_ITEM_TYPE_RAW,
1271 RTE_FLOW_ITEM_TYPE_VF,
1272 RTE_FLOW_ITEM_TYPE_END,
1275 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_1_vf[] = {
1276 RTE_FLOW_ITEM_TYPE_ETH,
1277 RTE_FLOW_ITEM_TYPE_VLAN,
1278 RTE_FLOW_ITEM_TYPE_IPV4,
1279 RTE_FLOW_ITEM_TYPE_UDP,
1280 RTE_FLOW_ITEM_TYPE_RAW,
1281 RTE_FLOW_ITEM_TYPE_VF,
1282 RTE_FLOW_ITEM_TYPE_END,
1285 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_2_vf[] = {
1286 RTE_FLOW_ITEM_TYPE_ETH,
1287 RTE_FLOW_ITEM_TYPE_VLAN,
1288 RTE_FLOW_ITEM_TYPE_IPV4,
1289 RTE_FLOW_ITEM_TYPE_UDP,
1290 RTE_FLOW_ITEM_TYPE_RAW,
1291 RTE_FLOW_ITEM_TYPE_RAW,
1292 RTE_FLOW_ITEM_TYPE_VF,
1293 RTE_FLOW_ITEM_TYPE_END,
1296 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_3_vf[] = {
1297 RTE_FLOW_ITEM_TYPE_ETH,
1298 RTE_FLOW_ITEM_TYPE_VLAN,
1299 RTE_FLOW_ITEM_TYPE_IPV4,
1300 RTE_FLOW_ITEM_TYPE_UDP,
1301 RTE_FLOW_ITEM_TYPE_RAW,
1302 RTE_FLOW_ITEM_TYPE_RAW,
1303 RTE_FLOW_ITEM_TYPE_RAW,
1304 RTE_FLOW_ITEM_TYPE_VF,
1305 RTE_FLOW_ITEM_TYPE_END,
1308 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_1_vf[] = {
1309 RTE_FLOW_ITEM_TYPE_ETH,
1310 RTE_FLOW_ITEM_TYPE_VLAN,
1311 RTE_FLOW_ITEM_TYPE_IPV4,
1312 RTE_FLOW_ITEM_TYPE_TCP,
1313 RTE_FLOW_ITEM_TYPE_RAW,
1314 RTE_FLOW_ITEM_TYPE_VF,
1315 RTE_FLOW_ITEM_TYPE_END,
1318 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_2_vf[] = {
1319 RTE_FLOW_ITEM_TYPE_ETH,
1320 RTE_FLOW_ITEM_TYPE_VLAN,
1321 RTE_FLOW_ITEM_TYPE_IPV4,
1322 RTE_FLOW_ITEM_TYPE_TCP,
1323 RTE_FLOW_ITEM_TYPE_RAW,
1324 RTE_FLOW_ITEM_TYPE_RAW,
1325 RTE_FLOW_ITEM_TYPE_VF,
1326 RTE_FLOW_ITEM_TYPE_END,
1329 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_3_vf[] = {
1330 RTE_FLOW_ITEM_TYPE_ETH,
1331 RTE_FLOW_ITEM_TYPE_VLAN,
1332 RTE_FLOW_ITEM_TYPE_IPV4,
1333 RTE_FLOW_ITEM_TYPE_TCP,
1334 RTE_FLOW_ITEM_TYPE_RAW,
1335 RTE_FLOW_ITEM_TYPE_RAW,
1336 RTE_FLOW_ITEM_TYPE_RAW,
1337 RTE_FLOW_ITEM_TYPE_VF,
1338 RTE_FLOW_ITEM_TYPE_END,
1341 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_1_vf[] = {
1342 RTE_FLOW_ITEM_TYPE_ETH,
1343 RTE_FLOW_ITEM_TYPE_VLAN,
1344 RTE_FLOW_ITEM_TYPE_IPV4,
1345 RTE_FLOW_ITEM_TYPE_SCTP,
1346 RTE_FLOW_ITEM_TYPE_RAW,
1347 RTE_FLOW_ITEM_TYPE_VF,
1348 RTE_FLOW_ITEM_TYPE_END,
1351 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_2_vf[] = {
1352 RTE_FLOW_ITEM_TYPE_ETH,
1353 RTE_FLOW_ITEM_TYPE_VLAN,
1354 RTE_FLOW_ITEM_TYPE_IPV4,
1355 RTE_FLOW_ITEM_TYPE_SCTP,
1356 RTE_FLOW_ITEM_TYPE_RAW,
1357 RTE_FLOW_ITEM_TYPE_RAW,
1358 RTE_FLOW_ITEM_TYPE_VF,
1359 RTE_FLOW_ITEM_TYPE_END,
1362 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_3_vf[] = {
1363 RTE_FLOW_ITEM_TYPE_ETH,
1364 RTE_FLOW_ITEM_TYPE_VLAN,
1365 RTE_FLOW_ITEM_TYPE_IPV4,
1366 RTE_FLOW_ITEM_TYPE_SCTP,
1367 RTE_FLOW_ITEM_TYPE_RAW,
1368 RTE_FLOW_ITEM_TYPE_RAW,
1369 RTE_FLOW_ITEM_TYPE_RAW,
1370 RTE_FLOW_ITEM_TYPE_VF,
1371 RTE_FLOW_ITEM_TYPE_END,
1374 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_1_vf[] = {
1375 RTE_FLOW_ITEM_TYPE_ETH,
1376 RTE_FLOW_ITEM_TYPE_VLAN,
1377 RTE_FLOW_ITEM_TYPE_IPV6,
1378 RTE_FLOW_ITEM_TYPE_RAW,
1379 RTE_FLOW_ITEM_TYPE_VF,
1380 RTE_FLOW_ITEM_TYPE_END,
1383 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_2_vf[] = {
1384 RTE_FLOW_ITEM_TYPE_ETH,
1385 RTE_FLOW_ITEM_TYPE_VLAN,
1386 RTE_FLOW_ITEM_TYPE_IPV6,
1387 RTE_FLOW_ITEM_TYPE_RAW,
1388 RTE_FLOW_ITEM_TYPE_RAW,
1389 RTE_FLOW_ITEM_TYPE_VF,
1390 RTE_FLOW_ITEM_TYPE_END,
1393 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_3_vf[] = {
1394 RTE_FLOW_ITEM_TYPE_ETH,
1395 RTE_FLOW_ITEM_TYPE_VLAN,
1396 RTE_FLOW_ITEM_TYPE_IPV6,
1397 RTE_FLOW_ITEM_TYPE_RAW,
1398 RTE_FLOW_ITEM_TYPE_RAW,
1399 RTE_FLOW_ITEM_TYPE_RAW,
1400 RTE_FLOW_ITEM_TYPE_VF,
1401 RTE_FLOW_ITEM_TYPE_END,
1404 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_1_vf[] = {
1405 RTE_FLOW_ITEM_TYPE_ETH,
1406 RTE_FLOW_ITEM_TYPE_VLAN,
1407 RTE_FLOW_ITEM_TYPE_IPV6,
1408 RTE_FLOW_ITEM_TYPE_UDP,
1409 RTE_FLOW_ITEM_TYPE_RAW,
1410 RTE_FLOW_ITEM_TYPE_VF,
1411 RTE_FLOW_ITEM_TYPE_END,
1414 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_2_vf[] = {
1415 RTE_FLOW_ITEM_TYPE_ETH,
1416 RTE_FLOW_ITEM_TYPE_VLAN,
1417 RTE_FLOW_ITEM_TYPE_IPV6,
1418 RTE_FLOW_ITEM_TYPE_UDP,
1419 RTE_FLOW_ITEM_TYPE_RAW,
1420 RTE_FLOW_ITEM_TYPE_RAW,
1421 RTE_FLOW_ITEM_TYPE_VF,
1422 RTE_FLOW_ITEM_TYPE_END,
1425 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_3_vf[] = {
1426 RTE_FLOW_ITEM_TYPE_ETH,
1427 RTE_FLOW_ITEM_TYPE_VLAN,
1428 RTE_FLOW_ITEM_TYPE_IPV6,
1429 RTE_FLOW_ITEM_TYPE_UDP,
1430 RTE_FLOW_ITEM_TYPE_RAW,
1431 RTE_FLOW_ITEM_TYPE_RAW,
1432 RTE_FLOW_ITEM_TYPE_RAW,
1433 RTE_FLOW_ITEM_TYPE_VF,
1434 RTE_FLOW_ITEM_TYPE_END,
1437 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_1_vf[] = {
1438 RTE_FLOW_ITEM_TYPE_ETH,
1439 RTE_FLOW_ITEM_TYPE_VLAN,
1440 RTE_FLOW_ITEM_TYPE_IPV6,
1441 RTE_FLOW_ITEM_TYPE_TCP,
1442 RTE_FLOW_ITEM_TYPE_RAW,
1443 RTE_FLOW_ITEM_TYPE_VF,
1444 RTE_FLOW_ITEM_TYPE_END,
1447 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_2_vf[] = {
1448 RTE_FLOW_ITEM_TYPE_ETH,
1449 RTE_FLOW_ITEM_TYPE_VLAN,
1450 RTE_FLOW_ITEM_TYPE_IPV6,
1451 RTE_FLOW_ITEM_TYPE_TCP,
1452 RTE_FLOW_ITEM_TYPE_RAW,
1453 RTE_FLOW_ITEM_TYPE_RAW,
1454 RTE_FLOW_ITEM_TYPE_VF,
1455 RTE_FLOW_ITEM_TYPE_END,
1458 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_3_vf[] = {
1459 RTE_FLOW_ITEM_TYPE_ETH,
1460 RTE_FLOW_ITEM_TYPE_VLAN,
1461 RTE_FLOW_ITEM_TYPE_IPV6,
1462 RTE_FLOW_ITEM_TYPE_TCP,
1463 RTE_FLOW_ITEM_TYPE_RAW,
1464 RTE_FLOW_ITEM_TYPE_RAW,
1465 RTE_FLOW_ITEM_TYPE_RAW,
1466 RTE_FLOW_ITEM_TYPE_VF,
1467 RTE_FLOW_ITEM_TYPE_END,
1470 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_1_vf[] = {
1471 RTE_FLOW_ITEM_TYPE_ETH,
1472 RTE_FLOW_ITEM_TYPE_VLAN,
1473 RTE_FLOW_ITEM_TYPE_IPV6,
1474 RTE_FLOW_ITEM_TYPE_SCTP,
1475 RTE_FLOW_ITEM_TYPE_RAW,
1476 RTE_FLOW_ITEM_TYPE_VF,
1477 RTE_FLOW_ITEM_TYPE_END,
1480 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_2_vf[] = {
1481 RTE_FLOW_ITEM_TYPE_ETH,
1482 RTE_FLOW_ITEM_TYPE_VLAN,
1483 RTE_FLOW_ITEM_TYPE_IPV6,
1484 RTE_FLOW_ITEM_TYPE_SCTP,
1485 RTE_FLOW_ITEM_TYPE_RAW,
1486 RTE_FLOW_ITEM_TYPE_RAW,
1487 RTE_FLOW_ITEM_TYPE_VF,
1488 RTE_FLOW_ITEM_TYPE_END,
1491 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_3_vf[] = {
1492 RTE_FLOW_ITEM_TYPE_ETH,
1493 RTE_FLOW_ITEM_TYPE_VLAN,
1494 RTE_FLOW_ITEM_TYPE_IPV6,
1495 RTE_FLOW_ITEM_TYPE_SCTP,
1496 RTE_FLOW_ITEM_TYPE_RAW,
1497 RTE_FLOW_ITEM_TYPE_RAW,
1498 RTE_FLOW_ITEM_TYPE_RAW,
1499 RTE_FLOW_ITEM_TYPE_VF,
1500 RTE_FLOW_ITEM_TYPE_END,
1503 /* Pattern matched tunnel filter */
1504 static enum rte_flow_item_type pattern_vxlan_1[] = {
1505 RTE_FLOW_ITEM_TYPE_ETH,
1506 RTE_FLOW_ITEM_TYPE_IPV4,
1507 RTE_FLOW_ITEM_TYPE_UDP,
1508 RTE_FLOW_ITEM_TYPE_VXLAN,
1509 RTE_FLOW_ITEM_TYPE_ETH,
1510 RTE_FLOW_ITEM_TYPE_END,
1513 static enum rte_flow_item_type pattern_vxlan_2[] = {
1514 RTE_FLOW_ITEM_TYPE_ETH,
1515 RTE_FLOW_ITEM_TYPE_IPV6,
1516 RTE_FLOW_ITEM_TYPE_UDP,
1517 RTE_FLOW_ITEM_TYPE_VXLAN,
1518 RTE_FLOW_ITEM_TYPE_ETH,
1519 RTE_FLOW_ITEM_TYPE_END,
1522 static enum rte_flow_item_type pattern_vxlan_3[] = {
1523 RTE_FLOW_ITEM_TYPE_ETH,
1524 RTE_FLOW_ITEM_TYPE_IPV4,
1525 RTE_FLOW_ITEM_TYPE_UDP,
1526 RTE_FLOW_ITEM_TYPE_VXLAN,
1527 RTE_FLOW_ITEM_TYPE_ETH,
1528 RTE_FLOW_ITEM_TYPE_VLAN,
1529 RTE_FLOW_ITEM_TYPE_END,
1532 static enum rte_flow_item_type pattern_vxlan_4[] = {
1533 RTE_FLOW_ITEM_TYPE_ETH,
1534 RTE_FLOW_ITEM_TYPE_IPV6,
1535 RTE_FLOW_ITEM_TYPE_UDP,
1536 RTE_FLOW_ITEM_TYPE_VXLAN,
1537 RTE_FLOW_ITEM_TYPE_ETH,
1538 RTE_FLOW_ITEM_TYPE_VLAN,
1539 RTE_FLOW_ITEM_TYPE_END,
1542 static enum rte_flow_item_type pattern_nvgre_1[] = {
1543 RTE_FLOW_ITEM_TYPE_ETH,
1544 RTE_FLOW_ITEM_TYPE_IPV4,
1545 RTE_FLOW_ITEM_TYPE_NVGRE,
1546 RTE_FLOW_ITEM_TYPE_ETH,
1547 RTE_FLOW_ITEM_TYPE_END,
1550 static enum rte_flow_item_type pattern_nvgre_2[] = {
1551 RTE_FLOW_ITEM_TYPE_ETH,
1552 RTE_FLOW_ITEM_TYPE_IPV6,
1553 RTE_FLOW_ITEM_TYPE_NVGRE,
1554 RTE_FLOW_ITEM_TYPE_ETH,
1555 RTE_FLOW_ITEM_TYPE_END,
1558 static enum rte_flow_item_type pattern_nvgre_3[] = {
1559 RTE_FLOW_ITEM_TYPE_ETH,
1560 RTE_FLOW_ITEM_TYPE_IPV4,
1561 RTE_FLOW_ITEM_TYPE_NVGRE,
1562 RTE_FLOW_ITEM_TYPE_ETH,
1563 RTE_FLOW_ITEM_TYPE_VLAN,
1564 RTE_FLOW_ITEM_TYPE_END,
1567 static enum rte_flow_item_type pattern_nvgre_4[] = {
1568 RTE_FLOW_ITEM_TYPE_ETH,
1569 RTE_FLOW_ITEM_TYPE_IPV6,
1570 RTE_FLOW_ITEM_TYPE_NVGRE,
1571 RTE_FLOW_ITEM_TYPE_ETH,
1572 RTE_FLOW_ITEM_TYPE_VLAN,
1573 RTE_FLOW_ITEM_TYPE_END,
1576 static enum rte_flow_item_type pattern_mpls_1[] = {
1577 RTE_FLOW_ITEM_TYPE_ETH,
1578 RTE_FLOW_ITEM_TYPE_IPV4,
1579 RTE_FLOW_ITEM_TYPE_UDP,
1580 RTE_FLOW_ITEM_TYPE_MPLS,
1581 RTE_FLOW_ITEM_TYPE_END,
1584 static enum rte_flow_item_type pattern_mpls_2[] = {
1585 RTE_FLOW_ITEM_TYPE_ETH,
1586 RTE_FLOW_ITEM_TYPE_IPV6,
1587 RTE_FLOW_ITEM_TYPE_UDP,
1588 RTE_FLOW_ITEM_TYPE_MPLS,
1589 RTE_FLOW_ITEM_TYPE_END,
1592 static enum rte_flow_item_type pattern_mpls_3[] = {
1593 RTE_FLOW_ITEM_TYPE_ETH,
1594 RTE_FLOW_ITEM_TYPE_IPV4,
1595 RTE_FLOW_ITEM_TYPE_GRE,
1596 RTE_FLOW_ITEM_TYPE_MPLS,
1597 RTE_FLOW_ITEM_TYPE_END,
1600 static enum rte_flow_item_type pattern_mpls_4[] = {
1601 RTE_FLOW_ITEM_TYPE_ETH,
1602 RTE_FLOW_ITEM_TYPE_IPV6,
1603 RTE_FLOW_ITEM_TYPE_GRE,
1604 RTE_FLOW_ITEM_TYPE_MPLS,
1605 RTE_FLOW_ITEM_TYPE_END,
1608 static enum rte_flow_item_type pattern_qinq_1[] = {
1609 RTE_FLOW_ITEM_TYPE_ETH,
1610 RTE_FLOW_ITEM_TYPE_VLAN,
1611 RTE_FLOW_ITEM_TYPE_VLAN,
1612 RTE_FLOW_ITEM_TYPE_END,
1615 static struct i40e_valid_pattern i40e_supported_patterns[] = {
1617 { pattern_ethertype, i40e_flow_parse_ethertype_filter },
1618 /* FDIR - support default flow type without flexible payload*/
1619 { pattern_ethertype, i40e_flow_parse_fdir_filter },
1620 { pattern_fdir_ipv4, i40e_flow_parse_fdir_filter },
1621 { pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
1622 { pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
1623 { pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
1624 { pattern_fdir_ipv4_gtpc, i40e_flow_parse_fdir_filter },
1625 { pattern_fdir_ipv4_gtpu, i40e_flow_parse_fdir_filter },
1626 { pattern_fdir_ipv4_gtpu_ipv4, i40e_flow_parse_fdir_filter },
1627 { pattern_fdir_ipv4_gtpu_ipv6, i40e_flow_parse_fdir_filter },
1628 { pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
1629 { pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
1630 { pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
1631 { pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
1632 { pattern_fdir_ipv6_gtpc, i40e_flow_parse_fdir_filter },
1633 { pattern_fdir_ipv6_gtpu, i40e_flow_parse_fdir_filter },
1634 { pattern_fdir_ipv6_gtpu_ipv4, i40e_flow_parse_fdir_filter },
1635 { pattern_fdir_ipv6_gtpu_ipv6, i40e_flow_parse_fdir_filter },
1636 /* FDIR - support default flow type with flexible payload */
1637 { pattern_fdir_ethertype_raw_1, i40e_flow_parse_fdir_filter },
1638 { pattern_fdir_ethertype_raw_2, i40e_flow_parse_fdir_filter },
1639 { pattern_fdir_ethertype_raw_3, i40e_flow_parse_fdir_filter },
1640 { pattern_fdir_ipv4_raw_1, i40e_flow_parse_fdir_filter },
1641 { pattern_fdir_ipv4_raw_2, i40e_flow_parse_fdir_filter },
1642 { pattern_fdir_ipv4_raw_3, i40e_flow_parse_fdir_filter },
1643 { pattern_fdir_ipv4_udp_raw_1, i40e_flow_parse_fdir_filter },
1644 { pattern_fdir_ipv4_udp_raw_2, i40e_flow_parse_fdir_filter },
1645 { pattern_fdir_ipv4_udp_raw_3, i40e_flow_parse_fdir_filter },
1646 { pattern_fdir_ipv4_tcp_raw_1, i40e_flow_parse_fdir_filter },
1647 { pattern_fdir_ipv4_tcp_raw_2, i40e_flow_parse_fdir_filter },
1648 { pattern_fdir_ipv4_tcp_raw_3, i40e_flow_parse_fdir_filter },
1649 { pattern_fdir_ipv4_sctp_raw_1, i40e_flow_parse_fdir_filter },
1650 { pattern_fdir_ipv4_sctp_raw_2, i40e_flow_parse_fdir_filter },
1651 { pattern_fdir_ipv4_sctp_raw_3, i40e_flow_parse_fdir_filter },
1652 { pattern_fdir_ipv6_raw_1, i40e_flow_parse_fdir_filter },
1653 { pattern_fdir_ipv6_raw_2, i40e_flow_parse_fdir_filter },
1654 { pattern_fdir_ipv6_raw_3, i40e_flow_parse_fdir_filter },
1655 { pattern_fdir_ipv6_udp_raw_1, i40e_flow_parse_fdir_filter },
1656 { pattern_fdir_ipv6_udp_raw_2, i40e_flow_parse_fdir_filter },
1657 { pattern_fdir_ipv6_udp_raw_3, i40e_flow_parse_fdir_filter },
1658 { pattern_fdir_ipv6_tcp_raw_1, i40e_flow_parse_fdir_filter },
1659 { pattern_fdir_ipv6_tcp_raw_2, i40e_flow_parse_fdir_filter },
1660 { pattern_fdir_ipv6_tcp_raw_3, i40e_flow_parse_fdir_filter },
1661 { pattern_fdir_ipv6_sctp_raw_1, i40e_flow_parse_fdir_filter },
1662 { pattern_fdir_ipv6_sctp_raw_2, i40e_flow_parse_fdir_filter },
1663 { pattern_fdir_ipv6_sctp_raw_3, i40e_flow_parse_fdir_filter },
1664 /* FDIR - support single vlan input set */
1665 { pattern_fdir_ethertype_vlan, i40e_flow_parse_fdir_filter },
1666 { pattern_fdir_vlan_ipv4, i40e_flow_parse_fdir_filter },
1667 { pattern_fdir_vlan_ipv4_udp, i40e_flow_parse_fdir_filter },
1668 { pattern_fdir_vlan_ipv4_tcp, i40e_flow_parse_fdir_filter },
1669 { pattern_fdir_vlan_ipv4_sctp, i40e_flow_parse_fdir_filter },
1670 { pattern_fdir_vlan_ipv6, i40e_flow_parse_fdir_filter },
1671 { pattern_fdir_vlan_ipv6_udp, i40e_flow_parse_fdir_filter },
1672 { pattern_fdir_vlan_ipv6_tcp, i40e_flow_parse_fdir_filter },
1673 { pattern_fdir_vlan_ipv6_sctp, i40e_flow_parse_fdir_filter },
1674 { pattern_fdir_ethertype_vlan_raw_1, i40e_flow_parse_fdir_filter },
1675 { pattern_fdir_ethertype_vlan_raw_2, i40e_flow_parse_fdir_filter },
1676 { pattern_fdir_ethertype_vlan_raw_3, i40e_flow_parse_fdir_filter },
1677 { pattern_fdir_vlan_ipv4_raw_1, i40e_flow_parse_fdir_filter },
1678 { pattern_fdir_vlan_ipv4_raw_2, i40e_flow_parse_fdir_filter },
1679 { pattern_fdir_vlan_ipv4_raw_3, i40e_flow_parse_fdir_filter },
1680 { pattern_fdir_vlan_ipv4_udp_raw_1, i40e_flow_parse_fdir_filter },
1681 { pattern_fdir_vlan_ipv4_udp_raw_2, i40e_flow_parse_fdir_filter },
1682 { pattern_fdir_vlan_ipv4_udp_raw_3, i40e_flow_parse_fdir_filter },
1683 { pattern_fdir_vlan_ipv4_tcp_raw_1, i40e_flow_parse_fdir_filter },
1684 { pattern_fdir_vlan_ipv4_tcp_raw_2, i40e_flow_parse_fdir_filter },
1685 { pattern_fdir_vlan_ipv4_tcp_raw_3, i40e_flow_parse_fdir_filter },
1686 { pattern_fdir_vlan_ipv4_sctp_raw_1, i40e_flow_parse_fdir_filter },
1687 { pattern_fdir_vlan_ipv4_sctp_raw_2, i40e_flow_parse_fdir_filter },
1688 { pattern_fdir_vlan_ipv4_sctp_raw_3, i40e_flow_parse_fdir_filter },
1689 { pattern_fdir_vlan_ipv6_raw_1, i40e_flow_parse_fdir_filter },
1690 { pattern_fdir_vlan_ipv6_raw_2, i40e_flow_parse_fdir_filter },
1691 { pattern_fdir_vlan_ipv6_raw_3, i40e_flow_parse_fdir_filter },
1692 { pattern_fdir_vlan_ipv6_udp_raw_1, i40e_flow_parse_fdir_filter },
1693 { pattern_fdir_vlan_ipv6_udp_raw_2, i40e_flow_parse_fdir_filter },
1694 { pattern_fdir_vlan_ipv6_udp_raw_3, i40e_flow_parse_fdir_filter },
1695 { pattern_fdir_vlan_ipv6_tcp_raw_1, i40e_flow_parse_fdir_filter },
1696 { pattern_fdir_vlan_ipv6_tcp_raw_2, i40e_flow_parse_fdir_filter },
1697 { pattern_fdir_vlan_ipv6_tcp_raw_3, i40e_flow_parse_fdir_filter },
1698 { pattern_fdir_vlan_ipv6_sctp_raw_1, i40e_flow_parse_fdir_filter },
1699 { pattern_fdir_vlan_ipv6_sctp_raw_2, i40e_flow_parse_fdir_filter },
1700 { pattern_fdir_vlan_ipv6_sctp_raw_3, i40e_flow_parse_fdir_filter },
1701 /* FDIR - support VF item */
1702 { pattern_fdir_ipv4_vf, i40e_flow_parse_fdir_filter },
1703 { pattern_fdir_ipv4_udp_vf, i40e_flow_parse_fdir_filter },
1704 { pattern_fdir_ipv4_tcp_vf, i40e_flow_parse_fdir_filter },
1705 { pattern_fdir_ipv4_sctp_vf, i40e_flow_parse_fdir_filter },
1706 { pattern_fdir_ipv6_vf, i40e_flow_parse_fdir_filter },
1707 { pattern_fdir_ipv6_udp_vf, i40e_flow_parse_fdir_filter },
1708 { pattern_fdir_ipv6_tcp_vf, i40e_flow_parse_fdir_filter },
1709 { pattern_fdir_ipv6_sctp_vf, i40e_flow_parse_fdir_filter },
1710 { pattern_fdir_ethertype_raw_1_vf, i40e_flow_parse_fdir_filter },
1711 { pattern_fdir_ethertype_raw_2_vf, i40e_flow_parse_fdir_filter },
1712 { pattern_fdir_ethertype_raw_3_vf, i40e_flow_parse_fdir_filter },
1713 { pattern_fdir_ipv4_raw_1_vf, i40e_flow_parse_fdir_filter },
1714 { pattern_fdir_ipv4_raw_2_vf, i40e_flow_parse_fdir_filter },
1715 { pattern_fdir_ipv4_raw_3_vf, i40e_flow_parse_fdir_filter },
1716 { pattern_fdir_ipv4_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1717 { pattern_fdir_ipv4_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1718 { pattern_fdir_ipv4_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1719 { pattern_fdir_ipv4_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1720 { pattern_fdir_ipv4_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1721 { pattern_fdir_ipv4_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1722 { pattern_fdir_ipv4_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1723 { pattern_fdir_ipv4_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1724 { pattern_fdir_ipv4_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1725 { pattern_fdir_ipv6_raw_1_vf, i40e_flow_parse_fdir_filter },
1726 { pattern_fdir_ipv6_raw_2_vf, i40e_flow_parse_fdir_filter },
1727 { pattern_fdir_ipv6_raw_3_vf, i40e_flow_parse_fdir_filter },
1728 { pattern_fdir_ipv6_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1729 { pattern_fdir_ipv6_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1730 { pattern_fdir_ipv6_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1731 { pattern_fdir_ipv6_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1732 { pattern_fdir_ipv6_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1733 { pattern_fdir_ipv6_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1734 { pattern_fdir_ipv6_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1735 { pattern_fdir_ipv6_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1736 { pattern_fdir_ipv6_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1737 { pattern_fdir_ethertype_vlan_vf, i40e_flow_parse_fdir_filter },
1738 { pattern_fdir_vlan_ipv4_vf, i40e_flow_parse_fdir_filter },
1739 { pattern_fdir_vlan_ipv4_udp_vf, i40e_flow_parse_fdir_filter },
1740 { pattern_fdir_vlan_ipv4_tcp_vf, i40e_flow_parse_fdir_filter },
1741 { pattern_fdir_vlan_ipv4_sctp_vf, i40e_flow_parse_fdir_filter },
1742 { pattern_fdir_vlan_ipv6_vf, i40e_flow_parse_fdir_filter },
1743 { pattern_fdir_vlan_ipv6_udp_vf, i40e_flow_parse_fdir_filter },
1744 { pattern_fdir_vlan_ipv6_tcp_vf, i40e_flow_parse_fdir_filter },
1745 { pattern_fdir_vlan_ipv6_sctp_vf, i40e_flow_parse_fdir_filter },
1746 { pattern_fdir_ethertype_vlan_raw_1_vf, i40e_flow_parse_fdir_filter },
1747 { pattern_fdir_ethertype_vlan_raw_2_vf, i40e_flow_parse_fdir_filter },
1748 { pattern_fdir_ethertype_vlan_raw_3_vf, i40e_flow_parse_fdir_filter },
1749 { pattern_fdir_vlan_ipv4_raw_1_vf, i40e_flow_parse_fdir_filter },
1750 { pattern_fdir_vlan_ipv4_raw_2_vf, i40e_flow_parse_fdir_filter },
1751 { pattern_fdir_vlan_ipv4_raw_3_vf, i40e_flow_parse_fdir_filter },
1752 { pattern_fdir_vlan_ipv4_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1753 { pattern_fdir_vlan_ipv4_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1754 { pattern_fdir_vlan_ipv4_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1755 { pattern_fdir_vlan_ipv4_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1756 { pattern_fdir_vlan_ipv4_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1757 { pattern_fdir_vlan_ipv4_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1758 { pattern_fdir_vlan_ipv4_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1759 { pattern_fdir_vlan_ipv4_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1760 { pattern_fdir_vlan_ipv4_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1761 { pattern_fdir_vlan_ipv6_raw_1_vf, i40e_flow_parse_fdir_filter },
1762 { pattern_fdir_vlan_ipv6_raw_2_vf, i40e_flow_parse_fdir_filter },
1763 { pattern_fdir_vlan_ipv6_raw_3_vf, i40e_flow_parse_fdir_filter },
1764 { pattern_fdir_vlan_ipv6_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1765 { pattern_fdir_vlan_ipv6_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1766 { pattern_fdir_vlan_ipv6_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1767 { pattern_fdir_vlan_ipv6_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1768 { pattern_fdir_vlan_ipv6_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1769 { pattern_fdir_vlan_ipv6_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1770 { pattern_fdir_vlan_ipv6_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1771 { pattern_fdir_vlan_ipv6_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1772 { pattern_fdir_vlan_ipv6_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1774 { pattern_vxlan_1, i40e_flow_parse_vxlan_filter },
1775 { pattern_vxlan_2, i40e_flow_parse_vxlan_filter },
1776 { pattern_vxlan_3, i40e_flow_parse_vxlan_filter },
1777 { pattern_vxlan_4, i40e_flow_parse_vxlan_filter },
1779 { pattern_nvgre_1, i40e_flow_parse_nvgre_filter },
1780 { pattern_nvgre_2, i40e_flow_parse_nvgre_filter },
1781 { pattern_nvgre_3, i40e_flow_parse_nvgre_filter },
1782 { pattern_nvgre_4, i40e_flow_parse_nvgre_filter },
1783 /* MPLSoUDP & MPLSoGRE */
1784 { pattern_mpls_1, i40e_flow_parse_mpls_filter },
1785 { pattern_mpls_2, i40e_flow_parse_mpls_filter },
1786 { pattern_mpls_3, i40e_flow_parse_mpls_filter },
1787 { pattern_mpls_4, i40e_flow_parse_mpls_filter },
1789 { pattern_fdir_ipv4_gtpc, i40e_flow_parse_gtp_filter },
1790 { pattern_fdir_ipv4_gtpu, i40e_flow_parse_gtp_filter },
1791 { pattern_fdir_ipv6_gtpc, i40e_flow_parse_gtp_filter },
1792 { pattern_fdir_ipv6_gtpu, i40e_flow_parse_gtp_filter },
1794 { pattern_qinq_1, i40e_flow_parse_qinq_filter },
1797 #define NEXT_ITEM_OF_ACTION(act, actions, index) \
1799 act = actions + index; \
1800 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) { \
1802 act = actions + index; \
1806 /* Find the first VOID or non-VOID item pointer */
1807 static const struct rte_flow_item *
1808 i40e_find_first_item(const struct rte_flow_item *item, bool is_void)
1812 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
1814 is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
1816 is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
1824 /* Skip all VOID items of the pattern */
1826 i40e_pattern_skip_void_item(struct rte_flow_item *items,
1827 const struct rte_flow_item *pattern)
1829 uint32_t cpy_count = 0;
1830 const struct rte_flow_item *pb = pattern, *pe = pattern;
1833 /* Find a non-void item first */
1834 pb = i40e_find_first_item(pb, false);
1835 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
1840 /* Find a void item */
1841 pe = i40e_find_first_item(pb + 1, true);
1843 cpy_count = pe - pb;
1844 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
1848 if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
1855 /* Copy the END item. */
1856 rte_memcpy(items, pe, sizeof(struct rte_flow_item));
1859 /* Check if the pattern matches a supported item type array */
1861 i40e_match_pattern(enum rte_flow_item_type *item_array,
1862 struct rte_flow_item *pattern)
1864 struct rte_flow_item *item = pattern;
1866 while ((*item_array == item->type) &&
1867 (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
1872 return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
1873 item->type == RTE_FLOW_ITEM_TYPE_END);
1876 /* Find if there's parse filter function matched */
1877 static parse_filter_t
1878 i40e_find_parse_filter_func(struct rte_flow_item *pattern, uint32_t *idx)
1880 parse_filter_t parse_filter = NULL;
1883 for (; i < RTE_DIM(i40e_supported_patterns); i++) {
1884 if (i40e_match_pattern(i40e_supported_patterns[i].items,
1886 parse_filter = i40e_supported_patterns[i].parse_filter;
1893 return parse_filter;
1896 /* Parse attributes */
1898 i40e_flow_parse_attr(const struct rte_flow_attr *attr,
1899 struct rte_flow_error *error)
1901 /* Must be input direction */
1902 if (!attr->ingress) {
1903 rte_flow_error_set(error, EINVAL,
1904 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1905 attr, "Only support ingress.");
1911 rte_flow_error_set(error, EINVAL,
1912 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1913 attr, "Not support egress.");
1918 if (attr->priority) {
1919 rte_flow_error_set(error, EINVAL,
1920 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1921 attr, "Not support priority.");
1927 rte_flow_error_set(error, EINVAL,
1928 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1929 attr, "Not support group.");
1937 i40e_get_outer_vlan(struct rte_eth_dev *dev)
1939 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
1940 int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend;
1950 i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
1953 tpid = (reg_r >> I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT) & 0xFFFF;
1958 /* 1. Last in item should be NULL as range is not supported.
1959 * 2. Supported filter types: MAC_ETHTYPE and ETHTYPE.
1960 * 3. SRC mac_addr mask should be 00:00:00:00:00:00.
1961 * 4. DST mac_addr mask should be 00:00:00:00:00:00 or
1963 * 5. Ether_type mask should be 0xFFFF.
1966 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
1967 const struct rte_flow_item *pattern,
1968 struct rte_flow_error *error,
1969 struct rte_eth_ethertype_filter *filter)
1971 const struct rte_flow_item *item = pattern;
1972 const struct rte_flow_item_eth *eth_spec;
1973 const struct rte_flow_item_eth *eth_mask;
1974 enum rte_flow_item_type item_type;
1975 uint16_t outer_tpid;
1977 outer_tpid = i40e_get_outer_vlan(dev);
1979 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
1981 rte_flow_error_set(error, EINVAL,
1982 RTE_FLOW_ERROR_TYPE_ITEM,
1984 "Not support range");
1987 item_type = item->type;
1988 switch (item_type) {
1989 case RTE_FLOW_ITEM_TYPE_ETH:
1990 eth_spec = (const struct rte_flow_item_eth *)item->spec;
1991 eth_mask = (const struct rte_flow_item_eth *)item->mask;
1992 /* Get the MAC info. */
1993 if (!eth_spec || !eth_mask) {
1994 rte_flow_error_set(error, EINVAL,
1995 RTE_FLOW_ERROR_TYPE_ITEM,
1997 "NULL ETH spec/mask");
2001 /* Mask bits of source MAC address must be full of 0.
2002 * Mask bits of destination MAC address must be full
2003 * of 1 or full of 0.
2005 if (!is_zero_ether_addr(ð_mask->src) ||
2006 (!is_zero_ether_addr(ð_mask->dst) &&
2007 !is_broadcast_ether_addr(ð_mask->dst))) {
2008 rte_flow_error_set(error, EINVAL,
2009 RTE_FLOW_ERROR_TYPE_ITEM,
2011 "Invalid MAC_addr mask");
2015 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
2016 rte_flow_error_set(error, EINVAL,
2017 RTE_FLOW_ERROR_TYPE_ITEM,
2019 "Invalid ethertype mask");
2023 /* If mask bits of destination MAC address
2024 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
2026 if (is_broadcast_ether_addr(ð_mask->dst)) {
2027 filter->mac_addr = eth_spec->dst;
2028 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
2030 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
2032 filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
2034 if (filter->ether_type == ETHER_TYPE_IPv4 ||
2035 filter->ether_type == ETHER_TYPE_IPv6 ||
2036 filter->ether_type == ETHER_TYPE_LLDP ||
2037 filter->ether_type == outer_tpid) {
2038 rte_flow_error_set(error, EINVAL,
2039 RTE_FLOW_ERROR_TYPE_ITEM,
2041 "Unsupported ether_type in"
2042 " control packet filter.");
2054 /* Ethertype action only supports QUEUE or DROP. */
2056 i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
2057 const struct rte_flow_action *actions,
2058 struct rte_flow_error *error,
2059 struct rte_eth_ethertype_filter *filter)
2061 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2062 const struct rte_flow_action *act;
2063 const struct rte_flow_action_queue *act_q;
2066 /* Check if the first non-void action is QUEUE or DROP. */
2067 NEXT_ITEM_OF_ACTION(act, actions, index);
2068 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
2069 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
2070 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
2071 act, "Not supported action.");
2075 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
2076 act_q = (const struct rte_flow_action_queue *)act->conf;
2077 filter->queue = act_q->index;
2078 if (filter->queue >= pf->dev_data->nb_rx_queues) {
2079 rte_flow_error_set(error, EINVAL,
2080 RTE_FLOW_ERROR_TYPE_ACTION,
2081 act, "Invalid queue ID for"
2082 " ethertype_filter.");
2086 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
2089 /* Check if the next non-void item is END */
2091 NEXT_ITEM_OF_ACTION(act, actions, index);
2092 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
2093 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
2094 act, "Not supported action.");
2102 i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
2103 const struct rte_flow_attr *attr,
2104 const struct rte_flow_item pattern[],
2105 const struct rte_flow_action actions[],
2106 struct rte_flow_error *error,
2107 union i40e_filter_t *filter)
2109 struct rte_eth_ethertype_filter *ethertype_filter =
2110 &filter->ethertype_filter;
2113 ret = i40e_flow_parse_ethertype_pattern(dev, pattern, error,
2118 ret = i40e_flow_parse_ethertype_action(dev, actions, error,
2123 ret = i40e_flow_parse_attr(attr, error);
2127 cons_filter_type = RTE_ETH_FILTER_ETHERTYPE;
2133 i40e_flow_check_raw_item(const struct rte_flow_item *item,
2134 const struct rte_flow_item_raw *raw_spec,
2135 struct rte_flow_error *error)
2137 if (!raw_spec->relative) {
2138 rte_flow_error_set(error, EINVAL,
2139 RTE_FLOW_ERROR_TYPE_ITEM,
2141 "Relative should be 1.");
2145 if (raw_spec->offset % sizeof(uint16_t)) {
2146 rte_flow_error_set(error, EINVAL,
2147 RTE_FLOW_ERROR_TYPE_ITEM,
2149 "Offset should be even.");
2153 if (raw_spec->search || raw_spec->limit) {
2154 rte_flow_error_set(error, EINVAL,
2155 RTE_FLOW_ERROR_TYPE_ITEM,
2157 "search or limit is not supported.");
2161 if (raw_spec->offset < 0) {
2162 rte_flow_error_set(error, EINVAL,
2163 RTE_FLOW_ERROR_TYPE_ITEM,
2165 "Offset should be non-negative.");
2172 i40e_flow_store_flex_pit(struct i40e_pf *pf,
2173 struct i40e_fdir_flex_pit *flex_pit,
2174 enum i40e_flxpld_layer_idx layer_idx,
2179 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + raw_id;
2180 /* Check if the configuration is conflicted */
2181 if (pf->fdir.flex_pit_flag[layer_idx] &&
2182 (pf->fdir.flex_set[field_idx].src_offset != flex_pit->src_offset ||
2183 pf->fdir.flex_set[field_idx].size != flex_pit->size ||
2184 pf->fdir.flex_set[field_idx].dst_offset != flex_pit->dst_offset))
2187 /* Check if the configuration exists. */
2188 if (pf->fdir.flex_pit_flag[layer_idx] &&
2189 (pf->fdir.flex_set[field_idx].src_offset == flex_pit->src_offset &&
2190 pf->fdir.flex_set[field_idx].size == flex_pit->size &&
2191 pf->fdir.flex_set[field_idx].dst_offset == flex_pit->dst_offset))
2194 pf->fdir.flex_set[field_idx].src_offset =
2195 flex_pit->src_offset;
2196 pf->fdir.flex_set[field_idx].size =
2198 pf->fdir.flex_set[field_idx].dst_offset =
2199 flex_pit->dst_offset;
2205 i40e_flow_store_flex_mask(struct i40e_pf *pf,
2206 enum i40e_filter_pctype pctype,
2209 struct i40e_fdir_flex_mask flex_mask;
2211 uint8_t i, nb_bitmask = 0;
2213 memset(&flex_mask, 0, sizeof(struct i40e_fdir_flex_mask));
2214 for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i += sizeof(uint16_t)) {
2215 mask_tmp = I40E_WORD(mask[i], mask[i + 1]);
2217 flex_mask.word_mask |=
2218 I40E_FLEX_WORD_MASK(i / sizeof(uint16_t));
2219 if (mask_tmp != UINT16_MAX) {
2220 flex_mask.bitmask[nb_bitmask].mask = ~mask_tmp;
2221 flex_mask.bitmask[nb_bitmask].offset =
2222 i / sizeof(uint16_t);
2224 if (nb_bitmask > I40E_FDIR_BITMASK_NUM_WORD)
2229 flex_mask.nb_bitmask = nb_bitmask;
2231 if (pf->fdir.flex_mask_flag[pctype] &&
2232 (memcmp(&flex_mask, &pf->fdir.flex_mask[pctype],
2233 sizeof(struct i40e_fdir_flex_mask))))
2235 else if (pf->fdir.flex_mask_flag[pctype] &&
2236 !(memcmp(&flex_mask, &pf->fdir.flex_mask[pctype],
2237 sizeof(struct i40e_fdir_flex_mask))))
2240 memcpy(&pf->fdir.flex_mask[pctype], &flex_mask,
2241 sizeof(struct i40e_fdir_flex_mask));
2246 i40e_flow_set_fdir_flex_pit(struct i40e_pf *pf,
2247 enum i40e_flxpld_layer_idx layer_idx,
2250 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2251 uint32_t flx_pit, flx_ort;
2253 uint16_t min_next_off = 0; /* in words */
2257 flx_ort = (1 << I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) |
2258 (raw_id << I40E_GLQF_ORT_FIELD_CNT_SHIFT) |
2259 (layer_idx * I40E_MAX_FLXPLD_FIED);
2260 I40E_WRITE_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort);
2264 for (i = 0; i < raw_id; i++) {
2265 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
2266 flx_pit = MK_FLX_PIT(pf->fdir.flex_set[field_idx].src_offset,
2267 pf->fdir.flex_set[field_idx].size,
2268 pf->fdir.flex_set[field_idx].dst_offset);
2270 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
2271 min_next_off = pf->fdir.flex_set[field_idx].src_offset +
2272 pf->fdir.flex_set[field_idx].size;
2275 for (; i < I40E_MAX_FLXPLD_FIED; i++) {
2276 /* set the non-used register obeying register's constrain */
2277 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
2278 flx_pit = MK_FLX_PIT(min_next_off, NONUSE_FLX_PIT_FSIZE,
2279 NONUSE_FLX_PIT_DEST_OFF);
2280 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
2284 pf->fdir.flex_pit_flag[layer_idx] = 1;
2288 i40e_flow_set_fdir_flex_msk(struct i40e_pf *pf,
2289 enum i40e_filter_pctype pctype)
2291 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2292 struct i40e_fdir_flex_mask *flex_mask;
2293 uint32_t flxinset, fd_mask;
2297 flex_mask = &pf->fdir.flex_mask[pctype];
2298 flxinset = (flex_mask->word_mask <<
2299 I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) &
2300 I40E_PRTQF_FD_FLXINSET_INSET_MASK;
2301 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), flxinset);
2303 for (i = 0; i < flex_mask->nb_bitmask; i++) {
2304 fd_mask = (flex_mask->bitmask[i].mask <<
2305 I40E_PRTQF_FD_MSK_MASK_SHIFT) &
2306 I40E_PRTQF_FD_MSK_MASK_MASK;
2307 fd_mask |= ((flex_mask->bitmask[i].offset +
2308 I40E_FLX_OFFSET_IN_FIELD_VECTOR) <<
2309 I40E_PRTQF_FD_MSK_OFFSET_SHIFT) &
2310 I40E_PRTQF_FD_MSK_OFFSET_MASK;
2311 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), fd_mask);
2314 pf->fdir.flex_mask_flag[pctype] = 1;
2318 i40e_flow_set_fdir_inset(struct i40e_pf *pf,
2319 enum i40e_filter_pctype pctype,
2322 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2323 uint64_t inset_reg = 0;
2324 uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
2327 /* Check if the input set is valid */
2328 if (i40e_validate_input_set(pctype, RTE_ETH_FILTER_FDIR,
2330 PMD_DRV_LOG(ERR, "Invalid input set");
2334 /* Check if the configuration is conflicted */
2335 if (pf->fdir.inset_flag[pctype] &&
2336 memcmp(&pf->fdir.input_set[pctype], &input_set, sizeof(uint64_t)))
2339 if (pf->fdir.inset_flag[pctype] &&
2340 !memcmp(&pf->fdir.input_set[pctype], &input_set, sizeof(uint64_t)))
2343 num = i40e_generate_inset_mask_reg(input_set, mask_reg,
2344 I40E_INSET_MASK_NUM_REG);
2348 inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
2350 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
2351 (uint32_t)(inset_reg & UINT32_MAX));
2352 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
2353 (uint32_t)((inset_reg >>
2354 I40E_32_BIT_WIDTH) & UINT32_MAX));
2356 for (i = 0; i < num; i++)
2357 i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
2360 /*clear unused mask registers of the pctype */
2361 for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
2362 i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype), 0);
2363 I40E_WRITE_FLUSH(hw);
2365 pf->fdir.input_set[pctype] = input_set;
2366 pf->fdir.inset_flag[pctype] = 1;
2371 i40e_flow_fdir_get_pctype_value(struct i40e_pf *pf,
2372 enum rte_flow_item_type item_type,
2373 struct i40e_fdir_filter_conf *filter)
2375 struct i40e_customized_pctype *cus_pctype = NULL;
2377 switch (item_type) {
2378 case RTE_FLOW_ITEM_TYPE_GTPC:
2379 cus_pctype = i40e_find_customized_pctype(pf,
2380 I40E_CUSTOMIZED_GTPC);
2382 case RTE_FLOW_ITEM_TYPE_GTPU:
2383 if (!filter->input.flow_ext.inner_ip)
2384 cus_pctype = i40e_find_customized_pctype(pf,
2385 I40E_CUSTOMIZED_GTPU);
2386 else if (filter->input.flow_ext.iip_type ==
2387 I40E_FDIR_IPTYPE_IPV4)
2388 cus_pctype = i40e_find_customized_pctype(pf,
2389 I40E_CUSTOMIZED_GTPU_IPV4);
2390 else if (filter->input.flow_ext.iip_type ==
2391 I40E_FDIR_IPTYPE_IPV6)
2392 cus_pctype = i40e_find_customized_pctype(pf,
2393 I40E_CUSTOMIZED_GTPU_IPV6);
2396 PMD_DRV_LOG(ERR, "Unsupported item type");
2401 return cus_pctype->pctype;
2403 return I40E_FILTER_PCTYPE_INVALID;
2406 /* 1. Last in item should be NULL as range is not supported.
2407 * 2. Supported patterns: refer to array i40e_supported_patterns.
2408 * 3. Default supported flow type and input set: refer to array
2409 * valid_fdir_inset_table in i40e_ethdev.c.
2410 * 4. Mask of fields which need to be matched should be
2412 * 5. Mask of fields which needn't to be matched should be
2414 * 6. GTP profile supports GTPv1 only.
2415 * 7. GTP-C response message ('source_port' = 2123) is not supported.
2418 i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
2419 const struct rte_flow_item *pattern,
2420 struct rte_flow_error *error,
2421 struct i40e_fdir_filter_conf *filter)
2423 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2424 const struct rte_flow_item *item = pattern;
2425 const struct rte_flow_item_eth *eth_spec, *eth_mask;
2426 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
2427 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
2428 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
2429 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
2430 const struct rte_flow_item_udp *udp_spec, *udp_mask;
2431 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
2432 const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
2433 const struct rte_flow_item_raw *raw_spec, *raw_mask;
2434 const struct rte_flow_item_vf *vf_spec;
2437 uint64_t input_set = I40E_INSET_NONE;
2439 enum rte_flow_item_type item_type;
2440 enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
2441 enum rte_flow_item_type cus_proto = RTE_FLOW_ITEM_TYPE_END;
2443 uint8_t ipv6_addr_mask[16] = {
2444 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
2445 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
2446 enum i40e_flxpld_layer_idx layer_idx = I40E_FLXPLD_L2_IDX;
2448 int32_t off_arr[I40E_MAX_FLXPLD_FIED];
2449 uint16_t len_arr[I40E_MAX_FLXPLD_FIED];
2450 struct i40e_fdir_flex_pit flex_pit;
2451 uint8_t next_dst_off = 0;
2452 uint8_t flex_mask[I40E_FDIR_MAX_FLEX_LEN];
2454 bool cfg_flex_pit = true;
2455 bool cfg_flex_msk = true;
2456 uint16_t outer_tpid;
2457 uint16_t ether_type;
2458 uint32_t vtc_flow_cpu;
2459 bool outer_ip = true;
2462 memset(off_arr, 0, sizeof(off_arr));
2463 memset(len_arr, 0, sizeof(len_arr));
2464 memset(flex_mask, 0, I40E_FDIR_MAX_FLEX_LEN);
2465 outer_tpid = i40e_get_outer_vlan(dev);
2466 filter->input.flow_ext.customized_pctype = false;
2467 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
2469 rte_flow_error_set(error, EINVAL,
2470 RTE_FLOW_ERROR_TYPE_ITEM,
2472 "Not support range");
2475 item_type = item->type;
2476 switch (item_type) {
2477 case RTE_FLOW_ITEM_TYPE_ETH:
2478 eth_spec = (const struct rte_flow_item_eth *)item->spec;
2479 eth_mask = (const struct rte_flow_item_eth *)item->mask;
2481 if (eth_spec && eth_mask) {
2482 if (!is_zero_ether_addr(ð_mask->src) ||
2483 !is_zero_ether_addr(ð_mask->dst)) {
2484 rte_flow_error_set(error, EINVAL,
2485 RTE_FLOW_ERROR_TYPE_ITEM,
2487 "Invalid MAC_addr mask.");
2491 if ((eth_mask->type & UINT16_MAX) ==
2493 input_set |= I40E_INSET_LAST_ETHER_TYPE;
2494 filter->input.flow.l2_flow.ether_type =
2498 ether_type = rte_be_to_cpu_16(eth_spec->type);
2499 if (ether_type == ETHER_TYPE_IPv4 ||
2500 ether_type == ETHER_TYPE_IPv6 ||
2501 ether_type == ETHER_TYPE_ARP ||
2502 ether_type == outer_tpid) {
2503 rte_flow_error_set(error, EINVAL,
2504 RTE_FLOW_ERROR_TYPE_ITEM,
2506 "Unsupported ether_type.");
2511 pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
2512 layer_idx = I40E_FLXPLD_L2_IDX;
2515 case RTE_FLOW_ITEM_TYPE_VLAN:
2517 (const struct rte_flow_item_vlan *)item->spec;
2519 (const struct rte_flow_item_vlan *)item->mask;
2520 if (vlan_spec && vlan_mask) {
2521 if (vlan_mask->tci ==
2522 rte_cpu_to_be_16(I40E_TCI_MASK)) {
2523 input_set |= I40E_INSET_VLAN_INNER;
2524 filter->input.flow_ext.vlan_tci =
2529 pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
2530 layer_idx = I40E_FLXPLD_L2_IDX;
2533 case RTE_FLOW_ITEM_TYPE_IPV4:
2534 l3 = RTE_FLOW_ITEM_TYPE_IPV4;
2536 (const struct rte_flow_item_ipv4 *)item->spec;
2538 (const struct rte_flow_item_ipv4 *)item->mask;
2539 pctype = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
2540 layer_idx = I40E_FLXPLD_L3_IDX;
2542 if (ipv4_spec && ipv4_mask && outer_ip) {
2543 /* Check IPv4 mask and update input set */
2544 if (ipv4_mask->hdr.version_ihl ||
2545 ipv4_mask->hdr.total_length ||
2546 ipv4_mask->hdr.packet_id ||
2547 ipv4_mask->hdr.fragment_offset ||
2548 ipv4_mask->hdr.hdr_checksum) {
2549 rte_flow_error_set(error, EINVAL,
2550 RTE_FLOW_ERROR_TYPE_ITEM,
2552 "Invalid IPv4 mask.");
2556 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
2557 input_set |= I40E_INSET_IPV4_SRC;
2558 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
2559 input_set |= I40E_INSET_IPV4_DST;
2560 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
2561 input_set |= I40E_INSET_IPV4_TOS;
2562 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
2563 input_set |= I40E_INSET_IPV4_TTL;
2564 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
2565 input_set |= I40E_INSET_IPV4_PROTO;
2567 /* Check if it is fragment. */
2568 frag_off = ipv4_spec->hdr.fragment_offset;
2569 frag_off = rte_be_to_cpu_16(frag_off);
2570 if (frag_off & IPV4_HDR_OFFSET_MASK ||
2571 frag_off & IPV4_HDR_MF_FLAG)
2572 pctype = I40E_FILTER_PCTYPE_FRAG_IPV4;
2574 /* Get the filter info */
2575 filter->input.flow.ip4_flow.proto =
2576 ipv4_spec->hdr.next_proto_id;
2577 filter->input.flow.ip4_flow.tos =
2578 ipv4_spec->hdr.type_of_service;
2579 filter->input.flow.ip4_flow.ttl =
2580 ipv4_spec->hdr.time_to_live;
2581 filter->input.flow.ip4_flow.src_ip =
2582 ipv4_spec->hdr.src_addr;
2583 filter->input.flow.ip4_flow.dst_ip =
2584 ipv4_spec->hdr.dst_addr;
2585 } else if (!ipv4_spec && !ipv4_mask && !outer_ip) {
2586 filter->input.flow_ext.inner_ip = true;
2587 filter->input.flow_ext.iip_type =
2588 I40E_FDIR_IPTYPE_IPV4;
2589 } else if ((ipv4_spec || ipv4_mask) && !outer_ip) {
2590 rte_flow_error_set(error, EINVAL,
2591 RTE_FLOW_ERROR_TYPE_ITEM,
2593 "Invalid inner IPv4 mask.");
2601 case RTE_FLOW_ITEM_TYPE_IPV6:
2602 l3 = RTE_FLOW_ITEM_TYPE_IPV6;
2604 (const struct rte_flow_item_ipv6 *)item->spec;
2606 (const struct rte_flow_item_ipv6 *)item->mask;
2607 pctype = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
2608 layer_idx = I40E_FLXPLD_L3_IDX;
2610 if (ipv6_spec && ipv6_mask && outer_ip) {
2611 /* Check IPv6 mask and update input set */
2612 if (ipv6_mask->hdr.payload_len) {
2613 rte_flow_error_set(error, EINVAL,
2614 RTE_FLOW_ERROR_TYPE_ITEM,
2616 "Invalid IPv6 mask");
2620 if (!memcmp(ipv6_mask->hdr.src_addr,
2622 RTE_DIM(ipv6_mask->hdr.src_addr)))
2623 input_set |= I40E_INSET_IPV6_SRC;
2624 if (!memcmp(ipv6_mask->hdr.dst_addr,
2626 RTE_DIM(ipv6_mask->hdr.dst_addr)))
2627 input_set |= I40E_INSET_IPV6_DST;
2629 if ((ipv6_mask->hdr.vtc_flow &
2630 rte_cpu_to_be_32(I40E_IPV6_TC_MASK))
2631 == rte_cpu_to_be_32(I40E_IPV6_TC_MASK))
2632 input_set |= I40E_INSET_IPV6_TC;
2633 if (ipv6_mask->hdr.proto == UINT8_MAX)
2634 input_set |= I40E_INSET_IPV6_NEXT_HDR;
2635 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
2636 input_set |= I40E_INSET_IPV6_HOP_LIMIT;
2638 /* Get filter info */
2640 rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
2641 filter->input.flow.ipv6_flow.tc =
2642 (uint8_t)(vtc_flow_cpu >>
2643 I40E_FDIR_IPv6_TC_OFFSET);
2644 filter->input.flow.ipv6_flow.proto =
2645 ipv6_spec->hdr.proto;
2646 filter->input.flow.ipv6_flow.hop_limits =
2647 ipv6_spec->hdr.hop_limits;
2649 rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
2650 ipv6_spec->hdr.src_addr, 16);
2651 rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
2652 ipv6_spec->hdr.dst_addr, 16);
2654 /* Check if it is fragment. */
2655 if (ipv6_spec->hdr.proto ==
2656 I40E_IPV6_FRAG_HEADER)
2657 pctype = I40E_FILTER_PCTYPE_FRAG_IPV6;
2658 } else if (!ipv6_spec && !ipv6_mask && !outer_ip) {
2659 filter->input.flow_ext.inner_ip = true;
2660 filter->input.flow_ext.iip_type =
2661 I40E_FDIR_IPTYPE_IPV6;
2662 } else if ((ipv6_spec || ipv6_mask) && !outer_ip) {
2663 rte_flow_error_set(error, EINVAL,
2664 RTE_FLOW_ERROR_TYPE_ITEM,
2666 "Invalid inner IPv6 mask");
2673 case RTE_FLOW_ITEM_TYPE_TCP:
2674 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
2675 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
2677 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2679 I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
2680 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2682 I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
2683 if (tcp_spec && tcp_mask) {
2684 /* Check TCP mask and update input set */
2685 if (tcp_mask->hdr.sent_seq ||
2686 tcp_mask->hdr.recv_ack ||
2687 tcp_mask->hdr.data_off ||
2688 tcp_mask->hdr.tcp_flags ||
2689 tcp_mask->hdr.rx_win ||
2690 tcp_mask->hdr.cksum ||
2691 tcp_mask->hdr.tcp_urp) {
2692 rte_flow_error_set(error, EINVAL,
2693 RTE_FLOW_ERROR_TYPE_ITEM,
2695 "Invalid TCP mask");
2699 if (tcp_mask->hdr.src_port == UINT16_MAX)
2700 input_set |= I40E_INSET_SRC_PORT;
2701 if (tcp_mask->hdr.dst_port == UINT16_MAX)
2702 input_set |= I40E_INSET_DST_PORT;
2704 /* Get filter info */
2705 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
2706 filter->input.flow.tcp4_flow.src_port =
2707 tcp_spec->hdr.src_port;
2708 filter->input.flow.tcp4_flow.dst_port =
2709 tcp_spec->hdr.dst_port;
2710 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
2711 filter->input.flow.tcp6_flow.src_port =
2712 tcp_spec->hdr.src_port;
2713 filter->input.flow.tcp6_flow.dst_port =
2714 tcp_spec->hdr.dst_port;
2718 layer_idx = I40E_FLXPLD_L4_IDX;
2721 case RTE_FLOW_ITEM_TYPE_UDP:
2722 udp_spec = (const struct rte_flow_item_udp *)item->spec;
2723 udp_mask = (const struct rte_flow_item_udp *)item->mask;
2725 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2727 I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
2728 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2730 I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
2732 if (udp_spec && udp_mask) {
2733 /* Check UDP mask and update input set*/
2734 if (udp_mask->hdr.dgram_len ||
2735 udp_mask->hdr.dgram_cksum) {
2736 rte_flow_error_set(error, EINVAL,
2737 RTE_FLOW_ERROR_TYPE_ITEM,
2739 "Invalid UDP mask");
2743 if (udp_mask->hdr.src_port == UINT16_MAX)
2744 input_set |= I40E_INSET_SRC_PORT;
2745 if (udp_mask->hdr.dst_port == UINT16_MAX)
2746 input_set |= I40E_INSET_DST_PORT;
2748 /* Get filter info */
2749 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
2750 filter->input.flow.udp4_flow.src_port =
2751 udp_spec->hdr.src_port;
2752 filter->input.flow.udp4_flow.dst_port =
2753 udp_spec->hdr.dst_port;
2754 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
2755 filter->input.flow.udp6_flow.src_port =
2756 udp_spec->hdr.src_port;
2757 filter->input.flow.udp6_flow.dst_port =
2758 udp_spec->hdr.dst_port;
2762 layer_idx = I40E_FLXPLD_L4_IDX;
2765 case RTE_FLOW_ITEM_TYPE_GTPC:
2766 case RTE_FLOW_ITEM_TYPE_GTPU:
2767 if (!pf->gtp_support) {
2768 rte_flow_error_set(error, EINVAL,
2769 RTE_FLOW_ERROR_TYPE_ITEM,
2771 "Unsupported protocol");
2775 gtp_spec = (const struct rte_flow_item_gtp *)item->spec;
2776 gtp_mask = (const struct rte_flow_item_gtp *)item->mask;
2778 if (gtp_spec && gtp_mask) {
2779 if (gtp_mask->v_pt_rsv_flags ||
2780 gtp_mask->msg_type ||
2781 gtp_mask->msg_len ||
2782 gtp_mask->teid != UINT32_MAX) {
2783 rte_flow_error_set(error, EINVAL,
2784 RTE_FLOW_ERROR_TYPE_ITEM,
2786 "Invalid GTP mask");
2790 filter->input.flow.gtp_flow.teid =
2792 filter->input.flow_ext.customized_pctype = true;
2793 cus_proto = item_type;
2796 case RTE_FLOW_ITEM_TYPE_SCTP:
2798 (const struct rte_flow_item_sctp *)item->spec;
2800 (const struct rte_flow_item_sctp *)item->mask;
2802 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2804 I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
2805 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2807 I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
2809 if (sctp_spec && sctp_mask) {
2810 /* Check SCTP mask and update input set */
2811 if (sctp_mask->hdr.cksum) {
2812 rte_flow_error_set(error, EINVAL,
2813 RTE_FLOW_ERROR_TYPE_ITEM,
2815 "Invalid UDP mask");
2819 if (sctp_mask->hdr.src_port == UINT16_MAX)
2820 input_set |= I40E_INSET_SRC_PORT;
2821 if (sctp_mask->hdr.dst_port == UINT16_MAX)
2822 input_set |= I40E_INSET_DST_PORT;
2823 if (sctp_mask->hdr.tag == UINT32_MAX)
2824 input_set |= I40E_INSET_SCTP_VT;
2826 /* Get filter info */
2827 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
2828 filter->input.flow.sctp4_flow.src_port =
2829 sctp_spec->hdr.src_port;
2830 filter->input.flow.sctp4_flow.dst_port =
2831 sctp_spec->hdr.dst_port;
2832 filter->input.flow.sctp4_flow.verify_tag
2833 = sctp_spec->hdr.tag;
2834 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
2835 filter->input.flow.sctp6_flow.src_port =
2836 sctp_spec->hdr.src_port;
2837 filter->input.flow.sctp6_flow.dst_port =
2838 sctp_spec->hdr.dst_port;
2839 filter->input.flow.sctp6_flow.verify_tag
2840 = sctp_spec->hdr.tag;
2844 layer_idx = I40E_FLXPLD_L4_IDX;
2847 case RTE_FLOW_ITEM_TYPE_RAW:
2848 raw_spec = (const struct rte_flow_item_raw *)item->spec;
2849 raw_mask = (const struct rte_flow_item_raw *)item->mask;
2851 if (!raw_spec || !raw_mask) {
2852 rte_flow_error_set(error, EINVAL,
2853 RTE_FLOW_ERROR_TYPE_ITEM,
2855 "NULL RAW spec/mask");
2859 ret = i40e_flow_check_raw_item(item, raw_spec, error);
2863 off_arr[raw_id] = raw_spec->offset;
2864 len_arr[raw_id] = raw_spec->length;
2867 memset(&flex_pit, 0, sizeof(struct i40e_fdir_flex_pit));
2869 raw_spec->length / sizeof(uint16_t);
2870 flex_pit.dst_offset =
2871 next_dst_off / sizeof(uint16_t);
2873 for (i = 0; i <= raw_id; i++) {
2875 flex_pit.src_offset +=
2879 flex_pit.src_offset +=
2880 (off_arr[i] + len_arr[i]) /
2882 flex_size += len_arr[i];
2884 if (((flex_pit.src_offset + flex_pit.size) >=
2885 I40E_MAX_FLX_SOURCE_OFF / sizeof(uint16_t)) ||
2886 flex_size > I40E_FDIR_MAX_FLEXLEN) {
2887 rte_flow_error_set(error, EINVAL,
2888 RTE_FLOW_ERROR_TYPE_ITEM,
2890 "Exceeds maxmial payload limit.");
2894 /* Store flex pit to SW */
2895 ret = i40e_flow_store_flex_pit(pf, &flex_pit,
2898 rte_flow_error_set(error, EINVAL,
2899 RTE_FLOW_ERROR_TYPE_ITEM,
2901 "Conflict with the first flexible rule.");
2904 cfg_flex_pit = false;
2906 for (i = 0; i < raw_spec->length; i++) {
2907 j = i + next_dst_off;
2908 filter->input.flow_ext.flexbytes[j] =
2909 raw_spec->pattern[i];
2910 flex_mask[j] = raw_mask->pattern[i];
2913 next_dst_off += raw_spec->length;
2916 case RTE_FLOW_ITEM_TYPE_VF:
2917 vf_spec = (const struct rte_flow_item_vf *)item->spec;
2918 filter->input.flow_ext.is_vf = 1;
2919 filter->input.flow_ext.dst_id = vf_spec->id;
2920 if (filter->input.flow_ext.is_vf &&
2921 filter->input.flow_ext.dst_id >= pf->vf_num) {
2922 rte_flow_error_set(error, EINVAL,
2923 RTE_FLOW_ERROR_TYPE_ITEM,
2925 "Invalid VF ID for FDIR.");
2934 /* Get customized pctype value */
2935 if (filter->input.flow_ext.customized_pctype) {
2936 pctype = i40e_flow_fdir_get_pctype_value(pf, cus_proto, filter);
2937 if (pctype == I40E_FILTER_PCTYPE_INVALID) {
2938 rte_flow_error_set(error, EINVAL,
2939 RTE_FLOW_ERROR_TYPE_ITEM,
2941 "Unsupported pctype");
2946 /* If customized pctype is not used, set fdir configuration.*/
2947 if (!filter->input.flow_ext.customized_pctype) {
2948 ret = i40e_flow_set_fdir_inset(pf, pctype, input_set);
2950 rte_flow_error_set(error, EINVAL,
2951 RTE_FLOW_ERROR_TYPE_ITEM, item,
2952 "Conflict with the first rule's input set.");
2954 } else if (ret == -EINVAL) {
2955 rte_flow_error_set(error, EINVAL,
2956 RTE_FLOW_ERROR_TYPE_ITEM, item,
2957 "Invalid pattern mask.");
2961 /* Store flex mask to SW */
2962 ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
2964 rte_flow_error_set(error, EINVAL,
2965 RTE_FLOW_ERROR_TYPE_ITEM,
2967 "Exceed maximal number of bitmasks");
2969 } else if (ret == -2) {
2970 rte_flow_error_set(error, EINVAL,
2971 RTE_FLOW_ERROR_TYPE_ITEM,
2973 "Conflict with the first flexible rule");
2976 cfg_flex_msk = false;
2979 i40e_flow_set_fdir_flex_pit(pf, layer_idx, raw_id);
2982 i40e_flow_set_fdir_flex_msk(pf, pctype);
2985 filter->input.pctype = pctype;
2990 /* Parse to get the action info of a FDIR filter.
2991 * FDIR action supports QUEUE or (QUEUE + MARK).
2994 i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
2995 const struct rte_flow_action *actions,
2996 struct rte_flow_error *error,
2997 struct i40e_fdir_filter_conf *filter)
2999 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3000 const struct rte_flow_action *act;
3001 const struct rte_flow_action_queue *act_q;
3002 const struct rte_flow_action_mark *mark_spec;
3005 /* Check if the first non-void action is QUEUE or DROP or PASSTHRU. */
3006 NEXT_ITEM_OF_ACTION(act, actions, index);
3007 switch (act->type) {
3008 case RTE_FLOW_ACTION_TYPE_QUEUE:
3009 act_q = (const struct rte_flow_action_queue *)act->conf;
3010 filter->action.rx_queue = act_q->index;
3011 if ((!filter->input.flow_ext.is_vf &&
3012 filter->action.rx_queue >= pf->dev_data->nb_rx_queues) ||
3013 (filter->input.flow_ext.is_vf &&
3014 filter->action.rx_queue >= pf->vf_nb_qps)) {
3015 rte_flow_error_set(error, EINVAL,
3016 RTE_FLOW_ERROR_TYPE_ACTION, act,
3017 "Invalid queue ID for FDIR.");
3020 filter->action.behavior = I40E_FDIR_ACCEPT;
3022 case RTE_FLOW_ACTION_TYPE_DROP:
3023 filter->action.behavior = I40E_FDIR_REJECT;
3025 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
3026 filter->action.behavior = I40E_FDIR_PASSTHRU;
3029 rte_flow_error_set(error, EINVAL,
3030 RTE_FLOW_ERROR_TYPE_ACTION, act,
3035 /* Check if the next non-void item is MARK or FLAG or END. */
3037 NEXT_ITEM_OF_ACTION(act, actions, index);
3038 switch (act->type) {
3039 case RTE_FLOW_ACTION_TYPE_MARK:
3040 mark_spec = (const struct rte_flow_action_mark *)act->conf;
3041 filter->action.report_status = I40E_FDIR_REPORT_ID;
3042 filter->soft_id = mark_spec->id;
3044 case RTE_FLOW_ACTION_TYPE_FLAG:
3045 filter->action.report_status = I40E_FDIR_NO_REPORT_STATUS;
3047 case RTE_FLOW_ACTION_TYPE_END:
3050 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
3051 act, "Invalid action.");
3055 /* Check if the next non-void item is END */
3057 NEXT_ITEM_OF_ACTION(act, actions, index);
3058 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
3059 rte_flow_error_set(error, EINVAL,
3060 RTE_FLOW_ERROR_TYPE_ACTION,
3061 act, "Invalid action.");
3069 i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
3070 const struct rte_flow_attr *attr,
3071 const struct rte_flow_item pattern[],
3072 const struct rte_flow_action actions[],
3073 struct rte_flow_error *error,
3074 union i40e_filter_t *filter)
3076 struct i40e_fdir_filter_conf *fdir_filter =
3077 &filter->fdir_filter;
3080 ret = i40e_flow_parse_fdir_pattern(dev, pattern, error, fdir_filter);
3084 ret = i40e_flow_parse_fdir_action(dev, actions, error, fdir_filter);
3088 ret = i40e_flow_parse_attr(attr, error);
3092 cons_filter_type = RTE_ETH_FILTER_FDIR;
3094 if (dev->data->dev_conf.fdir_conf.mode !=
3095 RTE_FDIR_MODE_PERFECT) {
3096 rte_flow_error_set(error, ENOTSUP,
3097 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3099 "Check the mode in fdir_conf.");
3106 /* Parse to get the action info of a tunnel filter
3107 * Tunnel action only supports PF, VF and QUEUE.
3110 i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
3111 const struct rte_flow_action *actions,
3112 struct rte_flow_error *error,
3113 struct i40e_tunnel_filter_conf *filter)
3115 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3116 const struct rte_flow_action *act;
3117 const struct rte_flow_action_queue *act_q;
3118 const struct rte_flow_action_vf *act_vf;
3121 /* Check if the first non-void action is PF or VF. */
3122 NEXT_ITEM_OF_ACTION(act, actions, index);
3123 if (act->type != RTE_FLOW_ACTION_TYPE_PF &&
3124 act->type != RTE_FLOW_ACTION_TYPE_VF) {
3125 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
3126 act, "Not supported action.");
3130 if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
3131 act_vf = (const struct rte_flow_action_vf *)act->conf;
3132 filter->vf_id = act_vf->id;
3133 filter->is_to_vf = 1;
3134 if (filter->vf_id >= pf->vf_num) {
3135 rte_flow_error_set(error, EINVAL,
3136 RTE_FLOW_ERROR_TYPE_ACTION,
3137 act, "Invalid VF ID for tunnel filter");
3142 /* Check if the next non-void item is QUEUE */
3144 NEXT_ITEM_OF_ACTION(act, actions, index);
3145 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
3146 act_q = (const struct rte_flow_action_queue *)act->conf;
3147 filter->queue_id = act_q->index;
3148 if ((!filter->is_to_vf) &&
3149 (filter->queue_id >= pf->dev_data->nb_rx_queues)) {
3150 rte_flow_error_set(error, EINVAL,
3151 RTE_FLOW_ERROR_TYPE_ACTION,
3152 act, "Invalid queue ID for tunnel filter");
3154 } else if (filter->is_to_vf &&
3155 (filter->queue_id >= pf->vf_nb_qps)) {
3156 rte_flow_error_set(error, EINVAL,
3157 RTE_FLOW_ERROR_TYPE_ACTION,
3158 act, "Invalid queue ID for tunnel filter");
3163 /* Check if the next non-void item is END */
3165 NEXT_ITEM_OF_ACTION(act, actions, index);
3166 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
3167 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
3168 act, "Not supported action.");
3175 static uint16_t i40e_supported_tunnel_filter_types[] = {
3176 ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID |
3177 ETH_TUNNEL_FILTER_IVLAN,
3178 ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
3179 ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID,
3180 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID |
3181 ETH_TUNNEL_FILTER_IMAC,
3182 ETH_TUNNEL_FILTER_IMAC,
3186 i40e_check_tunnel_filter_type(uint8_t filter_type)
3190 for (i = 0; i < RTE_DIM(i40e_supported_tunnel_filter_types); i++) {
3191 if (filter_type == i40e_supported_tunnel_filter_types[i])
3198 /* 1. Last in item should be NULL as range is not supported.
3199 * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
3200 * IMAC_TENID, OMAC_TENID_IMAC and IMAC.
3201 * 3. Mask of fields which need to be matched should be
3203 * 4. Mask of fields which needn't to be matched should be
3207 i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
3208 const struct rte_flow_item *pattern,
3209 struct rte_flow_error *error,
3210 struct i40e_tunnel_filter_conf *filter)
3212 const struct rte_flow_item *item = pattern;
3213 const struct rte_flow_item_eth *eth_spec;
3214 const struct rte_flow_item_eth *eth_mask;
3215 const struct rte_flow_item_vxlan *vxlan_spec;
3216 const struct rte_flow_item_vxlan *vxlan_mask;
3217 const struct rte_flow_item_vlan *vlan_spec;
3218 const struct rte_flow_item_vlan *vlan_mask;
3219 uint8_t filter_type = 0;
3220 bool is_vni_masked = 0;
3221 uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
3222 enum rte_flow_item_type item_type;
3223 bool vxlan_flag = 0;
3224 uint32_t tenant_id_be = 0;
3227 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3229 rte_flow_error_set(error, EINVAL,
3230 RTE_FLOW_ERROR_TYPE_ITEM,
3232 "Not support range");
3235 item_type = item->type;
3236 switch (item_type) {
3237 case RTE_FLOW_ITEM_TYPE_ETH:
3238 eth_spec = (const struct rte_flow_item_eth *)item->spec;
3239 eth_mask = (const struct rte_flow_item_eth *)item->mask;
3241 /* Check if ETH item is used for place holder.
3242 * If yes, both spec and mask should be NULL.
3243 * If no, both spec and mask shouldn't be NULL.
3245 if ((!eth_spec && eth_mask) ||
3246 (eth_spec && !eth_mask)) {
3247 rte_flow_error_set(error, EINVAL,
3248 RTE_FLOW_ERROR_TYPE_ITEM,
3250 "Invalid ether spec/mask");
3254 if (eth_spec && eth_mask) {
3255 /* DST address of inner MAC shouldn't be masked.
3256 * SRC address of Inner MAC should be masked.
3258 if (!is_broadcast_ether_addr(ð_mask->dst) ||
3259 !is_zero_ether_addr(ð_mask->src) ||
3261 rte_flow_error_set(error, EINVAL,
3262 RTE_FLOW_ERROR_TYPE_ITEM,
3264 "Invalid ether spec/mask");
3269 rte_memcpy(&filter->outer_mac,
3272 filter_type |= ETH_TUNNEL_FILTER_OMAC;
3274 rte_memcpy(&filter->inner_mac,
3277 filter_type |= ETH_TUNNEL_FILTER_IMAC;
3281 case RTE_FLOW_ITEM_TYPE_VLAN:
3283 (const struct rte_flow_item_vlan *)item->spec;
3285 (const struct rte_flow_item_vlan *)item->mask;
3286 if (!(vlan_spec && vlan_mask)) {
3287 rte_flow_error_set(error, EINVAL,
3288 RTE_FLOW_ERROR_TYPE_ITEM,
3290 "Invalid vlan item");
3294 if (vlan_spec && vlan_mask) {
3295 if (vlan_mask->tci ==
3296 rte_cpu_to_be_16(I40E_TCI_MASK))
3297 filter->inner_vlan =
3298 rte_be_to_cpu_16(vlan_spec->tci) &
3300 filter_type |= ETH_TUNNEL_FILTER_IVLAN;
3303 case RTE_FLOW_ITEM_TYPE_IPV4:
3304 filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
3305 /* IPv4 is used to describe protocol,
3306 * spec and mask should be NULL.
3308 if (item->spec || item->mask) {
3309 rte_flow_error_set(error, EINVAL,
3310 RTE_FLOW_ERROR_TYPE_ITEM,
3312 "Invalid IPv4 item");
3316 case RTE_FLOW_ITEM_TYPE_IPV6:
3317 filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
3318 /* IPv6 is used to describe protocol,
3319 * spec and mask should be NULL.
3321 if (item->spec || item->mask) {
3322 rte_flow_error_set(error, EINVAL,
3323 RTE_FLOW_ERROR_TYPE_ITEM,
3325 "Invalid IPv6 item");
3329 case RTE_FLOW_ITEM_TYPE_UDP:
3330 /* UDP is used to describe protocol,
3331 * spec and mask should be NULL.
3333 if (item->spec || item->mask) {
3334 rte_flow_error_set(error, EINVAL,
3335 RTE_FLOW_ERROR_TYPE_ITEM,
3337 "Invalid UDP item");
3341 case RTE_FLOW_ITEM_TYPE_VXLAN:
3343 (const struct rte_flow_item_vxlan *)item->spec;
3345 (const struct rte_flow_item_vxlan *)item->mask;
3346 /* Check if VXLAN item is used to describe protocol.
3347 * If yes, both spec and mask should be NULL.
3348 * If no, both spec and mask shouldn't be NULL.
3350 if ((!vxlan_spec && vxlan_mask) ||
3351 (vxlan_spec && !vxlan_mask)) {
3352 rte_flow_error_set(error, EINVAL,
3353 RTE_FLOW_ERROR_TYPE_ITEM,
3355 "Invalid VXLAN item");
3359 /* Check if VNI is masked. */
3360 if (vxlan_spec && vxlan_mask) {
3362 !!memcmp(vxlan_mask->vni, vni_mask,
3364 if (is_vni_masked) {
3365 rte_flow_error_set(error, EINVAL,
3366 RTE_FLOW_ERROR_TYPE_ITEM,
3368 "Invalid VNI mask");
3372 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
3373 vxlan_spec->vni, 3);
3375 rte_be_to_cpu_32(tenant_id_be);
3376 filter_type |= ETH_TUNNEL_FILTER_TENID;
3386 ret = i40e_check_tunnel_filter_type(filter_type);
3388 rte_flow_error_set(error, EINVAL,
3389 RTE_FLOW_ERROR_TYPE_ITEM,
3391 "Invalid filter type");
3394 filter->filter_type = filter_type;
3396 filter->tunnel_type = I40E_TUNNEL_TYPE_VXLAN;
3402 i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
3403 const struct rte_flow_attr *attr,
3404 const struct rte_flow_item pattern[],
3405 const struct rte_flow_action actions[],
3406 struct rte_flow_error *error,
3407 union i40e_filter_t *filter)
3409 struct i40e_tunnel_filter_conf *tunnel_filter =
3410 &filter->consistent_tunnel_filter;
3413 ret = i40e_flow_parse_vxlan_pattern(dev, pattern,
3414 error, tunnel_filter);
3418 ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
3422 ret = i40e_flow_parse_attr(attr, error);
3426 cons_filter_type = RTE_ETH_FILTER_TUNNEL;
3431 /* 1. Last in item should be NULL as range is not supported.
3432 * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
3433 * IMAC_TENID, OMAC_TENID_IMAC and IMAC.
3434 * 3. Mask of fields which need to be matched should be
3436 * 4. Mask of fields which needn't to be matched should be
3440 i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
3441 const struct rte_flow_item *pattern,
3442 struct rte_flow_error *error,
3443 struct i40e_tunnel_filter_conf *filter)
3445 const struct rte_flow_item *item = pattern;
3446 const struct rte_flow_item_eth *eth_spec;
3447 const struct rte_flow_item_eth *eth_mask;
3448 const struct rte_flow_item_nvgre *nvgre_spec;
3449 const struct rte_flow_item_nvgre *nvgre_mask;
3450 const struct rte_flow_item_vlan *vlan_spec;
3451 const struct rte_flow_item_vlan *vlan_mask;
3452 enum rte_flow_item_type item_type;
3453 uint8_t filter_type = 0;
3454 bool is_tni_masked = 0;
3455 uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
3456 bool nvgre_flag = 0;
3457 uint32_t tenant_id_be = 0;
3460 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3462 rte_flow_error_set(error, EINVAL,
3463 RTE_FLOW_ERROR_TYPE_ITEM,
3465 "Not support range");
3468 item_type = item->type;
3469 switch (item_type) {
3470 case RTE_FLOW_ITEM_TYPE_ETH:
3471 eth_spec = (const struct rte_flow_item_eth *)item->spec;
3472 eth_mask = (const struct rte_flow_item_eth *)item->mask;
3474 /* Check if ETH item is used for place holder.
3475 * If yes, both spec and mask should be NULL.
3476 * If no, both spec and mask shouldn't be NULL.
3478 if ((!eth_spec && eth_mask) ||
3479 (eth_spec && !eth_mask)) {
3480 rte_flow_error_set(error, EINVAL,
3481 RTE_FLOW_ERROR_TYPE_ITEM,
3483 "Invalid ether spec/mask");
3487 if (eth_spec && eth_mask) {
3488 /* DST address of inner MAC shouldn't be masked.
3489 * SRC address of Inner MAC should be masked.
3491 if (!is_broadcast_ether_addr(ð_mask->dst) ||
3492 !is_zero_ether_addr(ð_mask->src) ||
3494 rte_flow_error_set(error, EINVAL,
3495 RTE_FLOW_ERROR_TYPE_ITEM,
3497 "Invalid ether spec/mask");
3502 rte_memcpy(&filter->outer_mac,
3505 filter_type |= ETH_TUNNEL_FILTER_OMAC;
3507 rte_memcpy(&filter->inner_mac,
3510 filter_type |= ETH_TUNNEL_FILTER_IMAC;
3515 case RTE_FLOW_ITEM_TYPE_VLAN:
3517 (const struct rte_flow_item_vlan *)item->spec;
3519 (const struct rte_flow_item_vlan *)item->mask;
3520 if (!(vlan_spec && vlan_mask)) {
3521 rte_flow_error_set(error, EINVAL,
3522 RTE_FLOW_ERROR_TYPE_ITEM,
3524 "Invalid vlan item");
3528 if (vlan_spec && vlan_mask) {
3529 if (vlan_mask->tci ==
3530 rte_cpu_to_be_16(I40E_TCI_MASK))
3531 filter->inner_vlan =
3532 rte_be_to_cpu_16(vlan_spec->tci) &
3534 filter_type |= ETH_TUNNEL_FILTER_IVLAN;
3537 case RTE_FLOW_ITEM_TYPE_IPV4:
3538 filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
3539 /* IPv4 is used to describe protocol,
3540 * spec and mask should be NULL.
3542 if (item->spec || item->mask) {
3543 rte_flow_error_set(error, EINVAL,
3544 RTE_FLOW_ERROR_TYPE_ITEM,
3546 "Invalid IPv4 item");
3550 case RTE_FLOW_ITEM_TYPE_IPV6:
3551 filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
3552 /* IPv6 is used to describe protocol,
3553 * spec and mask should be NULL.
3555 if (item->spec || item->mask) {
3556 rte_flow_error_set(error, EINVAL,
3557 RTE_FLOW_ERROR_TYPE_ITEM,
3559 "Invalid IPv6 item");
3563 case RTE_FLOW_ITEM_TYPE_NVGRE:
3565 (const struct rte_flow_item_nvgre *)item->spec;
3567 (const struct rte_flow_item_nvgre *)item->mask;
3568 /* Check if NVGRE item is used to describe protocol.
3569 * If yes, both spec and mask should be NULL.
3570 * If no, both spec and mask shouldn't be NULL.
3572 if ((!nvgre_spec && nvgre_mask) ||
3573 (nvgre_spec && !nvgre_mask)) {
3574 rte_flow_error_set(error, EINVAL,
3575 RTE_FLOW_ERROR_TYPE_ITEM,
3577 "Invalid NVGRE item");
3581 if (nvgre_spec && nvgre_mask) {
3583 !!memcmp(nvgre_mask->tni, tni_mask,
3585 if (is_tni_masked) {
3586 rte_flow_error_set(error, EINVAL,
3587 RTE_FLOW_ERROR_TYPE_ITEM,
3589 "Invalid TNI mask");
3592 if (nvgre_mask->protocol &&
3593 nvgre_mask->protocol != 0xFFFF) {
3594 rte_flow_error_set(error, EINVAL,
3595 RTE_FLOW_ERROR_TYPE_ITEM,
3597 "Invalid NVGRE item");
3600 if (nvgre_mask->c_k_s_rsvd0_ver &&
3601 nvgre_mask->c_k_s_rsvd0_ver !=
3602 rte_cpu_to_be_16(0xFFFF)) {
3603 rte_flow_error_set(error, EINVAL,
3604 RTE_FLOW_ERROR_TYPE_ITEM,
3606 "Invalid NVGRE item");
3609 if (nvgre_spec->c_k_s_rsvd0_ver !=
3610 rte_cpu_to_be_16(0x2000) &&
3611 nvgre_mask->c_k_s_rsvd0_ver) {
3612 rte_flow_error_set(error, EINVAL,
3613 RTE_FLOW_ERROR_TYPE_ITEM,
3615 "Invalid NVGRE item");
3618 if (nvgre_mask->protocol &&
3619 nvgre_spec->protocol !=
3620 rte_cpu_to_be_16(0x6558)) {
3621 rte_flow_error_set(error, EINVAL,
3622 RTE_FLOW_ERROR_TYPE_ITEM,
3624 "Invalid NVGRE item");
3627 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
3628 nvgre_spec->tni, 3);
3630 rte_be_to_cpu_32(tenant_id_be);
3631 filter_type |= ETH_TUNNEL_FILTER_TENID;
3641 ret = i40e_check_tunnel_filter_type(filter_type);
3643 rte_flow_error_set(error, EINVAL,
3644 RTE_FLOW_ERROR_TYPE_ITEM,
3646 "Invalid filter type");
3649 filter->filter_type = filter_type;
3651 filter->tunnel_type = I40E_TUNNEL_TYPE_NVGRE;
3657 i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
3658 const struct rte_flow_attr *attr,
3659 const struct rte_flow_item pattern[],
3660 const struct rte_flow_action actions[],
3661 struct rte_flow_error *error,
3662 union i40e_filter_t *filter)
3664 struct i40e_tunnel_filter_conf *tunnel_filter =
3665 &filter->consistent_tunnel_filter;
3668 ret = i40e_flow_parse_nvgre_pattern(dev, pattern,
3669 error, tunnel_filter);
3673 ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
3677 ret = i40e_flow_parse_attr(attr, error);
3681 cons_filter_type = RTE_ETH_FILTER_TUNNEL;
3686 /* 1. Last in item should be NULL as range is not supported.
3687 * 2. Supported filter types: MPLS label.
3688 * 3. Mask of fields which need to be matched should be
3690 * 4. Mask of fields which needn't to be matched should be
3694 i40e_flow_parse_mpls_pattern(__rte_unused struct rte_eth_dev *dev,
3695 const struct rte_flow_item *pattern,
3696 struct rte_flow_error *error,
3697 struct i40e_tunnel_filter_conf *filter)
3699 const struct rte_flow_item *item = pattern;
3700 const struct rte_flow_item_mpls *mpls_spec;
3701 const struct rte_flow_item_mpls *mpls_mask;
3702 enum rte_flow_item_type item_type;
3703 bool is_mplsoudp = 0; /* 1 - MPLSoUDP, 0 - MPLSoGRE */
3704 const uint8_t label_mask[3] = {0xFF, 0xFF, 0xF0};
3705 uint32_t label_be = 0;
3707 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3709 rte_flow_error_set(error, EINVAL,
3710 RTE_FLOW_ERROR_TYPE_ITEM,
3712 "Not support range");
3715 item_type = item->type;
3716 switch (item_type) {
3717 case RTE_FLOW_ITEM_TYPE_ETH:
3718 if (item->spec || item->mask) {
3719 rte_flow_error_set(error, EINVAL,
3720 RTE_FLOW_ERROR_TYPE_ITEM,
3722 "Invalid ETH item");
3726 case RTE_FLOW_ITEM_TYPE_IPV4:
3727 filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
3728 /* IPv4 is used to describe protocol,
3729 * spec and mask should be NULL.
3731 if (item->spec || item->mask) {
3732 rte_flow_error_set(error, EINVAL,
3733 RTE_FLOW_ERROR_TYPE_ITEM,
3735 "Invalid IPv4 item");
3739 case RTE_FLOW_ITEM_TYPE_IPV6:
3740 filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
3741 /* IPv6 is used to describe protocol,
3742 * spec and mask should be NULL.
3744 if (item->spec || item->mask) {
3745 rte_flow_error_set(error, EINVAL,
3746 RTE_FLOW_ERROR_TYPE_ITEM,
3748 "Invalid IPv6 item");
3752 case RTE_FLOW_ITEM_TYPE_UDP:
3753 /* UDP is used to describe protocol,
3754 * spec and mask should be NULL.
3756 if (item->spec || item->mask) {
3757 rte_flow_error_set(error, EINVAL,
3758 RTE_FLOW_ERROR_TYPE_ITEM,
3760 "Invalid UDP item");
3765 case RTE_FLOW_ITEM_TYPE_GRE:
3766 /* GRE is used to describe protocol,
3767 * spec and mask should be NULL.
3769 if (item->spec || item->mask) {
3770 rte_flow_error_set(error, EINVAL,
3771 RTE_FLOW_ERROR_TYPE_ITEM,
3773 "Invalid GRE item");
3777 case RTE_FLOW_ITEM_TYPE_MPLS:
3779 (const struct rte_flow_item_mpls *)item->spec;
3781 (const struct rte_flow_item_mpls *)item->mask;
3783 if (!mpls_spec || !mpls_mask) {
3784 rte_flow_error_set(error, EINVAL,
3785 RTE_FLOW_ERROR_TYPE_ITEM,
3787 "Invalid MPLS item");
3791 if (memcmp(mpls_mask->label_tc_s, label_mask, 3)) {
3792 rte_flow_error_set(error, EINVAL,
3793 RTE_FLOW_ERROR_TYPE_ITEM,
3795 "Invalid MPLS label mask");
3798 rte_memcpy(((uint8_t *)&label_be + 1),
3799 mpls_spec->label_tc_s, 3);
3800 filter->tenant_id = rte_be_to_cpu_32(label_be) >> 4;
3808 filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoUDP;
3810 filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoGRE;
3816 i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
3817 const struct rte_flow_attr *attr,
3818 const struct rte_flow_item pattern[],
3819 const struct rte_flow_action actions[],
3820 struct rte_flow_error *error,
3821 union i40e_filter_t *filter)
3823 struct i40e_tunnel_filter_conf *tunnel_filter =
3824 &filter->consistent_tunnel_filter;
3827 ret = i40e_flow_parse_mpls_pattern(dev, pattern,
3828 error, tunnel_filter);
3832 ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
3836 ret = i40e_flow_parse_attr(attr, error);
3840 cons_filter_type = RTE_ETH_FILTER_TUNNEL;
3845 /* 1. Last in item should be NULL as range is not supported.
3846 * 2. Supported filter types: GTP TEID.
3847 * 3. Mask of fields which need to be matched should be
3849 * 4. Mask of fields which needn't to be matched should be
3851 * 5. GTP profile supports GTPv1 only.
3852 * 6. GTP-C response message ('source_port' = 2123) is not supported.
3855 i40e_flow_parse_gtp_pattern(struct rte_eth_dev *dev,
3856 const struct rte_flow_item *pattern,
3857 struct rte_flow_error *error,
3858 struct i40e_tunnel_filter_conf *filter)
3860 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3861 const struct rte_flow_item *item = pattern;
3862 const struct rte_flow_item_gtp *gtp_spec;
3863 const struct rte_flow_item_gtp *gtp_mask;
3864 enum rte_flow_item_type item_type;
3866 if (!pf->gtp_support) {
3867 rte_flow_error_set(error, EINVAL,
3868 RTE_FLOW_ERROR_TYPE_ITEM,
3870 "GTP is not supported by default.");
3874 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3876 rte_flow_error_set(error, EINVAL,
3877 RTE_FLOW_ERROR_TYPE_ITEM,
3879 "Not support range");
3882 item_type = item->type;
3883 switch (item_type) {
3884 case RTE_FLOW_ITEM_TYPE_ETH:
3885 if (item->spec || item->mask) {
3886 rte_flow_error_set(error, EINVAL,
3887 RTE_FLOW_ERROR_TYPE_ITEM,
3889 "Invalid ETH item");
3893 case RTE_FLOW_ITEM_TYPE_IPV4:
3894 filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
3895 /* IPv4 is used to describe protocol,
3896 * spec and mask should be NULL.
3898 if (item->spec || item->mask) {
3899 rte_flow_error_set(error, EINVAL,
3900 RTE_FLOW_ERROR_TYPE_ITEM,
3902 "Invalid IPv4 item");
3906 case RTE_FLOW_ITEM_TYPE_UDP:
3907 if (item->spec || item->mask) {
3908 rte_flow_error_set(error, EINVAL,
3909 RTE_FLOW_ERROR_TYPE_ITEM,
3911 "Invalid UDP item");
3915 case RTE_FLOW_ITEM_TYPE_GTPC:
3916 case RTE_FLOW_ITEM_TYPE_GTPU:
3918 (const struct rte_flow_item_gtp *)item->spec;
3920 (const struct rte_flow_item_gtp *)item->mask;
3922 if (!gtp_spec || !gtp_mask) {
3923 rte_flow_error_set(error, EINVAL,
3924 RTE_FLOW_ERROR_TYPE_ITEM,
3926 "Invalid GTP item");
3930 if (gtp_mask->v_pt_rsv_flags ||
3931 gtp_mask->msg_type ||
3932 gtp_mask->msg_len ||
3933 gtp_mask->teid != UINT32_MAX) {
3934 rte_flow_error_set(error, EINVAL,
3935 RTE_FLOW_ERROR_TYPE_ITEM,
3937 "Invalid GTP mask");
3941 if (item_type == RTE_FLOW_ITEM_TYPE_GTPC)
3942 filter->tunnel_type = I40E_TUNNEL_TYPE_GTPC;
3943 else if (item_type == RTE_FLOW_ITEM_TYPE_GTPU)
3944 filter->tunnel_type = I40E_TUNNEL_TYPE_GTPU;
3946 filter->tenant_id = rte_be_to_cpu_32(gtp_spec->teid);
3958 i40e_flow_parse_gtp_filter(struct rte_eth_dev *dev,
3959 const struct rte_flow_attr *attr,
3960 const struct rte_flow_item pattern[],
3961 const struct rte_flow_action actions[],
3962 struct rte_flow_error *error,
3963 union i40e_filter_t *filter)
3965 struct i40e_tunnel_filter_conf *tunnel_filter =
3966 &filter->consistent_tunnel_filter;
3969 ret = i40e_flow_parse_gtp_pattern(dev, pattern,
3970 error, tunnel_filter);
3974 ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
3978 ret = i40e_flow_parse_attr(attr, error);
3982 cons_filter_type = RTE_ETH_FILTER_TUNNEL;
3987 /* 1. Last in item should be NULL as range is not supported.
3988 * 2. Supported filter types: QINQ.
3989 * 3. Mask of fields which need to be matched should be
3991 * 4. Mask of fields which needn't to be matched should be
3995 i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev,
3996 const struct rte_flow_item *pattern,
3997 struct rte_flow_error *error,
3998 struct i40e_tunnel_filter_conf *filter)
4000 const struct rte_flow_item *item = pattern;
4001 const struct rte_flow_item_vlan *vlan_spec = NULL;
4002 const struct rte_flow_item_vlan *vlan_mask = NULL;
4003 const struct rte_flow_item_vlan *i_vlan_spec = NULL;
4004 const struct rte_flow_item_vlan *i_vlan_mask = NULL;
4005 const struct rte_flow_item_vlan *o_vlan_spec = NULL;
4006 const struct rte_flow_item_vlan *o_vlan_mask = NULL;
4008 enum rte_flow_item_type item_type;
4011 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
4013 rte_flow_error_set(error, EINVAL,
4014 RTE_FLOW_ERROR_TYPE_ITEM,
4016 "Not support range");
4019 item_type = item->type;
4020 switch (item_type) {
4021 case RTE_FLOW_ITEM_TYPE_ETH:
4022 if (item->spec || item->mask) {
4023 rte_flow_error_set(error, EINVAL,
4024 RTE_FLOW_ERROR_TYPE_ITEM,
4026 "Invalid ETH item");
4030 case RTE_FLOW_ITEM_TYPE_VLAN:
4032 (const struct rte_flow_item_vlan *)item->spec;
4034 (const struct rte_flow_item_vlan *)item->mask;
4036 if (!(vlan_spec && vlan_mask)) {
4037 rte_flow_error_set(error, EINVAL,
4038 RTE_FLOW_ERROR_TYPE_ITEM,
4040 "Invalid vlan item");
4045 o_vlan_spec = vlan_spec;
4046 o_vlan_mask = vlan_mask;
4049 i_vlan_spec = vlan_spec;
4050 i_vlan_mask = vlan_mask;
4060 /* Get filter specification */
4061 if ((o_vlan_mask != NULL) && (o_vlan_mask->tci ==
4062 rte_cpu_to_be_16(I40E_TCI_MASK)) &&
4063 (i_vlan_mask != NULL) &&
4064 (i_vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
4065 filter->outer_vlan = rte_be_to_cpu_16(o_vlan_spec->tci)
4067 filter->inner_vlan = rte_be_to_cpu_16(i_vlan_spec->tci)
4070 rte_flow_error_set(error, EINVAL,
4071 RTE_FLOW_ERROR_TYPE_ITEM,
4073 "Invalid filter type");
4077 filter->tunnel_type = I40E_TUNNEL_TYPE_QINQ;
4082 i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
4083 const struct rte_flow_attr *attr,
4084 const struct rte_flow_item pattern[],
4085 const struct rte_flow_action actions[],
4086 struct rte_flow_error *error,
4087 union i40e_filter_t *filter)
4089 struct i40e_tunnel_filter_conf *tunnel_filter =
4090 &filter->consistent_tunnel_filter;
4093 ret = i40e_flow_parse_qinq_pattern(dev, pattern,
4094 error, tunnel_filter);
4098 ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
4102 ret = i40e_flow_parse_attr(attr, error);
4106 cons_filter_type = RTE_ETH_FILTER_TUNNEL;
4112 i40e_flow_validate(struct rte_eth_dev *dev,
4113 const struct rte_flow_attr *attr,
4114 const struct rte_flow_item pattern[],
4115 const struct rte_flow_action actions[],
4116 struct rte_flow_error *error)
4118 struct rte_flow_item *items; /* internal pattern w/o VOID items */
4119 parse_filter_t parse_filter;
4120 uint32_t item_num = 0; /* non-void item number of pattern*/
4123 int ret = I40E_NOT_SUPPORTED;
4126 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
4127 NULL, "NULL pattern.");
4132 rte_flow_error_set(error, EINVAL,
4133 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
4134 NULL, "NULL action.");
4139 rte_flow_error_set(error, EINVAL,
4140 RTE_FLOW_ERROR_TYPE_ATTR,
4141 NULL, "NULL attribute.");
4145 memset(&cons_filter, 0, sizeof(cons_filter));
4147 /* Get the non-void item number of pattern */
4148 while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
4149 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
4155 items = rte_zmalloc("i40e_pattern",
4156 item_num * sizeof(struct rte_flow_item), 0);
4158 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
4159 NULL, "No memory for PMD internal items.");
4163 i40e_pattern_skip_void_item(items, pattern);
4167 parse_filter = i40e_find_parse_filter_func(items, &i);
4168 if (!parse_filter && !flag) {
4169 rte_flow_error_set(error, EINVAL,
4170 RTE_FLOW_ERROR_TYPE_ITEM,
4171 pattern, "Unsupported pattern");
4176 ret = parse_filter(dev, attr, items, actions,
4177 error, &cons_filter);
4179 } while ((ret < 0) && (i < RTE_DIM(i40e_supported_patterns)));
4186 static struct rte_flow *
4187 i40e_flow_create(struct rte_eth_dev *dev,
4188 const struct rte_flow_attr *attr,
4189 const struct rte_flow_item pattern[],
4190 const struct rte_flow_action actions[],
4191 struct rte_flow_error *error)
4193 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4194 struct rte_flow *flow;
4197 flow = rte_zmalloc("i40e_flow", sizeof(struct rte_flow), 0);
4199 rte_flow_error_set(error, ENOMEM,
4200 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
4201 "Failed to allocate memory");
4205 ret = i40e_flow_validate(dev, attr, pattern, actions, error);
4209 switch (cons_filter_type) {
4210 case RTE_ETH_FILTER_ETHERTYPE:
4211 ret = i40e_ethertype_filter_set(pf,
4212 &cons_filter.ethertype_filter, 1);
4215 flow->rule = TAILQ_LAST(&pf->ethertype.ethertype_list,
4216 i40e_ethertype_filter_list);
4218 case RTE_ETH_FILTER_FDIR:
4219 ret = i40e_flow_add_del_fdir_filter(dev,
4220 &cons_filter.fdir_filter, 1);
4223 flow->rule = TAILQ_LAST(&pf->fdir.fdir_list,
4224 i40e_fdir_filter_list);
4226 case RTE_ETH_FILTER_TUNNEL:
4227 ret = i40e_dev_consistent_tunnel_filter_set(pf,
4228 &cons_filter.consistent_tunnel_filter, 1);
4231 flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
4232 i40e_tunnel_filter_list);
4238 flow->filter_type = cons_filter_type;
4239 TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
4243 rte_flow_error_set(error, -ret,
4244 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
4245 "Failed to create flow.");
4251 i40e_flow_destroy(struct rte_eth_dev *dev,
4252 struct rte_flow *flow,
4253 struct rte_flow_error *error)
4255 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4256 enum rte_filter_type filter_type = flow->filter_type;
4259 switch (filter_type) {
4260 case RTE_ETH_FILTER_ETHERTYPE:
4261 ret = i40e_flow_destroy_ethertype_filter(pf,
4262 (struct i40e_ethertype_filter *)flow->rule);
4264 case RTE_ETH_FILTER_TUNNEL:
4265 ret = i40e_flow_destroy_tunnel_filter(pf,
4266 (struct i40e_tunnel_filter *)flow->rule);
4268 case RTE_ETH_FILTER_FDIR:
4269 ret = i40e_flow_add_del_fdir_filter(dev,
4270 &((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
4273 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
4280 TAILQ_REMOVE(&pf->flow_list, flow, node);
4283 rte_flow_error_set(error, -ret,
4284 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
4285 "Failed to destroy flow.");
4291 i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
4292 struct i40e_ethertype_filter *filter)
4294 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4295 struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
4296 struct i40e_ethertype_filter *node;
4297 struct i40e_control_filter_stats stats;
4301 if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
4302 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
4303 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
4304 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
4305 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
4307 memset(&stats, 0, sizeof(stats));
4308 ret = i40e_aq_add_rem_control_packet_filter(hw,
4309 filter->input.mac_addr.addr_bytes,
4310 filter->input.ether_type,
4311 flags, pf->main_vsi->seid,
4312 filter->queue, 0, &stats, NULL);
4316 node = i40e_sw_ethertype_filter_lookup(ethertype_rule, &filter->input);
4320 ret = i40e_sw_ethertype_filter_del(pf, &node->input);
4326 i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
4327 struct i40e_tunnel_filter *filter)
4329 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
4330 struct i40e_vsi *vsi;
4331 struct i40e_pf_vf *vf;
4332 struct i40e_aqc_add_rm_cloud_filt_elem_ext cld_filter;
4333 struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
4334 struct i40e_tunnel_filter *node;
4335 bool big_buffer = 0;
4338 memset(&cld_filter, 0, sizeof(cld_filter));
4339 ether_addr_copy((struct ether_addr *)&filter->input.outer_mac,
4340 (struct ether_addr *)&cld_filter.element.outer_mac);
4341 ether_addr_copy((struct ether_addr *)&filter->input.inner_mac,
4342 (struct ether_addr *)&cld_filter.element.inner_mac);
4343 cld_filter.element.inner_vlan = filter->input.inner_vlan;
4344 cld_filter.element.flags = filter->input.flags;
4345 cld_filter.element.tenant_id = filter->input.tenant_id;
4346 cld_filter.element.queue_number = filter->queue;
4347 rte_memcpy(cld_filter.general_fields,
4348 filter->input.general_fields,
4349 sizeof(cld_filter.general_fields));
4351 if (!filter->is_to_vf)
4354 vf = &pf->vfs[filter->vf_id];
4358 if (((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
4359 I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
4360 ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
4361 I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
4362 ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
4363 I40E_AQC_ADD_CLOUD_FILTER_0X10))
4367 ret = i40e_aq_remove_cloud_filters_big_buffer(hw, vsi->seid,
4370 ret = i40e_aq_remove_cloud_filters(hw, vsi->seid,
4371 &cld_filter.element, 1);
4375 node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &filter->input);
4379 ret = i40e_sw_tunnel_filter_del(pf, &node->input);
4385 i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
4387 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4390 ret = i40e_flow_flush_fdir_filter(pf);
4392 rte_flow_error_set(error, -ret,
4393 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
4394 "Failed to flush FDIR flows.");
4398 ret = i40e_flow_flush_ethertype_filter(pf);
4400 rte_flow_error_set(error, -ret,
4401 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
4402 "Failed to ethertype flush flows.");
4406 ret = i40e_flow_flush_tunnel_filter(pf);
4408 rte_flow_error_set(error, -ret,
4409 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
4410 "Failed to flush tunnel flows.");
4418 i40e_flow_flush_fdir_filter(struct i40e_pf *pf)
4420 struct rte_eth_dev *dev = pf->adapter->eth_dev;
4421 struct i40e_fdir_info *fdir_info = &pf->fdir;
4422 struct i40e_fdir_filter *fdir_filter;
4423 enum i40e_filter_pctype pctype;
4424 struct rte_flow *flow;
4428 ret = i40e_fdir_flush(dev);
4430 /* Delete FDIR filters in FDIR list. */
4431 while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
4432 ret = i40e_sw_fdir_filter_del(pf,
4433 &fdir_filter->fdir.input);
4438 /* Delete FDIR flows in flow list. */
4439 TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
4440 if (flow->filter_type == RTE_ETH_FILTER_FDIR) {
4441 TAILQ_REMOVE(&pf->flow_list, flow, node);
4446 for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
4447 pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++)
4448 pf->fdir.inset_flag[pctype] = 0;
4454 /* Flush all ethertype filters */
4456 i40e_flow_flush_ethertype_filter(struct i40e_pf *pf)
4458 struct i40e_ethertype_filter_list
4459 *ethertype_list = &pf->ethertype.ethertype_list;
4460 struct i40e_ethertype_filter *filter;
4461 struct rte_flow *flow;
4465 while ((filter = TAILQ_FIRST(ethertype_list))) {
4466 ret = i40e_flow_destroy_ethertype_filter(pf, filter);
4471 /* Delete ethertype flows in flow list. */
4472 TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
4473 if (flow->filter_type == RTE_ETH_FILTER_ETHERTYPE) {
4474 TAILQ_REMOVE(&pf->flow_list, flow, node);
4482 /* Flush all tunnel filters */
4484 i40e_flow_flush_tunnel_filter(struct i40e_pf *pf)
4486 struct i40e_tunnel_filter_list
4487 *tunnel_list = &pf->tunnel.tunnel_list;
4488 struct i40e_tunnel_filter *filter;
4489 struct rte_flow *flow;
4493 while ((filter = TAILQ_FIRST(tunnel_list))) {
4494 ret = i40e_flow_destroy_tunnel_filter(pf, filter);
4499 /* Delete tunnel flows in flow list. */
4500 TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
4501 if (flow->filter_type == RTE_ETH_FILTER_TUNNEL) {
4502 TAILQ_REMOVE(&pf->flow_list, flow, node);