1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2016-2017 Intel Corporation
13 #include <rte_debug.h>
14 #include <rte_ether.h>
15 #include <rte_ethdev_driver.h>
17 #include <rte_malloc.h>
18 #include <rte_tailq.h>
19 #include <rte_flow_driver.h>
20 #include <rte_bitmap.h>
22 #include "i40e_logs.h"
23 #include "base/i40e_type.h"
24 #include "base/i40e_prototype.h"
25 #include "i40e_ethdev.h"
27 #define I40E_IPV6_TC_MASK (0xFF << I40E_FDIR_IPv6_TC_OFFSET)
28 #define I40E_IPV6_FRAG_HEADER 44
29 #define I40E_TENANT_ARRAY_NUM 3
30 #define I40E_TCI_MASK 0xFFFF
32 static int i40e_flow_validate(struct rte_eth_dev *dev,
33 const struct rte_flow_attr *attr,
34 const struct rte_flow_item pattern[],
35 const struct rte_flow_action actions[],
36 struct rte_flow_error *error);
37 static struct rte_flow *i40e_flow_create(struct rte_eth_dev *dev,
38 const struct rte_flow_attr *attr,
39 const struct rte_flow_item pattern[],
40 const struct rte_flow_action actions[],
41 struct rte_flow_error *error);
42 static int i40e_flow_destroy(struct rte_eth_dev *dev,
43 struct rte_flow *flow,
44 struct rte_flow_error *error);
45 static int i40e_flow_flush(struct rte_eth_dev *dev,
46 struct rte_flow_error *error);
47 static int i40e_flow_query(struct rte_eth_dev *dev,
48 struct rte_flow *flow,
49 const struct rte_flow_action *actions,
50 void *data, struct rte_flow_error *error);
52 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
53 const struct rte_flow_item *pattern,
54 struct rte_flow_error *error,
55 struct rte_eth_ethertype_filter *filter);
56 static int i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
57 const struct rte_flow_action *actions,
58 struct rte_flow_error *error,
59 struct rte_eth_ethertype_filter *filter);
60 static int i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
61 const struct rte_flow_attr *attr,
62 const struct rte_flow_item *pattern,
63 struct rte_flow_error *error,
64 struct i40e_fdir_filter_conf *filter);
65 static int i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
66 const struct rte_flow_action *actions,
67 struct rte_flow_error *error,
68 struct i40e_fdir_filter_conf *filter);
69 static int i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
70 const struct rte_flow_action *actions,
71 struct rte_flow_error *error,
72 struct i40e_tunnel_filter_conf *filter);
73 static int i40e_flow_parse_attr(const struct rte_flow_attr *attr,
74 struct rte_flow_error *error);
75 static int i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
76 const struct rte_flow_attr *attr,
77 const struct rte_flow_item pattern[],
78 const struct rte_flow_action actions[],
79 struct rte_flow_error *error,
80 union i40e_filter_t *filter);
81 static int i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
82 const struct rte_flow_attr *attr,
83 const struct rte_flow_item pattern[],
84 const struct rte_flow_action actions[],
85 struct rte_flow_error *error,
86 union i40e_filter_t *filter);
87 static int i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
88 const struct rte_flow_attr *attr,
89 const struct rte_flow_item pattern[],
90 const struct rte_flow_action actions[],
91 struct rte_flow_error *error,
92 union i40e_filter_t *filter);
93 static int i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
94 const struct rte_flow_attr *attr,
95 const struct rte_flow_item pattern[],
96 const struct rte_flow_action actions[],
97 struct rte_flow_error *error,
98 union i40e_filter_t *filter);
99 static int i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
100 const struct rte_flow_attr *attr,
101 const struct rte_flow_item pattern[],
102 const struct rte_flow_action actions[],
103 struct rte_flow_error *error,
104 union i40e_filter_t *filter);
105 static int i40e_flow_parse_gtp_filter(struct rte_eth_dev *dev,
106 const struct rte_flow_attr *attr,
107 const struct rte_flow_item pattern[],
108 const struct rte_flow_action actions[],
109 struct rte_flow_error *error,
110 union i40e_filter_t *filter);
111 static int i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
112 struct i40e_ethertype_filter *filter);
113 static int i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
114 struct i40e_tunnel_filter *filter);
115 static int i40e_flow_flush_fdir_filter(struct i40e_pf *pf);
116 static int i40e_flow_flush_ethertype_filter(struct i40e_pf *pf);
117 static int i40e_flow_flush_tunnel_filter(struct i40e_pf *pf);
118 static int i40e_flow_flush_rss_filter(struct rte_eth_dev *dev);
120 i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
121 const struct rte_flow_attr *attr,
122 const struct rte_flow_item pattern[],
123 const struct rte_flow_action actions[],
124 struct rte_flow_error *error,
125 union i40e_filter_t *filter);
127 i40e_flow_parse_qinq_pattern(struct rte_eth_dev *dev,
128 const struct rte_flow_item *pattern,
129 struct rte_flow_error *error,
130 struct i40e_tunnel_filter_conf *filter);
132 static int i40e_flow_parse_l4_cloud_filter(struct rte_eth_dev *dev,
133 const struct rte_flow_attr *attr,
134 const struct rte_flow_item pattern[],
135 const struct rte_flow_action actions[],
136 struct rte_flow_error *error,
137 union i40e_filter_t *filter);
138 const struct rte_flow_ops i40e_flow_ops = {
139 .validate = i40e_flow_validate,
140 .create = i40e_flow_create,
141 .destroy = i40e_flow_destroy,
142 .flush = i40e_flow_flush,
143 .query = i40e_flow_query,
146 static union i40e_filter_t cons_filter;
147 static enum rte_filter_type cons_filter_type = RTE_ETH_FILTER_NONE;
149 /* Pattern matched ethertype filter */
150 static enum rte_flow_item_type pattern_ethertype[] = {
151 RTE_FLOW_ITEM_TYPE_ETH,
152 RTE_FLOW_ITEM_TYPE_END,
155 /* Pattern matched flow director filter */
156 static enum rte_flow_item_type pattern_fdir_ipv4[] = {
157 RTE_FLOW_ITEM_TYPE_ETH,
158 RTE_FLOW_ITEM_TYPE_IPV4,
159 RTE_FLOW_ITEM_TYPE_END,
162 static enum rte_flow_item_type pattern_fdir_ipv4_udp[] = {
163 RTE_FLOW_ITEM_TYPE_ETH,
164 RTE_FLOW_ITEM_TYPE_IPV4,
165 RTE_FLOW_ITEM_TYPE_UDP,
166 RTE_FLOW_ITEM_TYPE_END,
169 static enum rte_flow_item_type pattern_fdir_ipv4_tcp[] = {
170 RTE_FLOW_ITEM_TYPE_ETH,
171 RTE_FLOW_ITEM_TYPE_IPV4,
172 RTE_FLOW_ITEM_TYPE_TCP,
173 RTE_FLOW_ITEM_TYPE_END,
176 static enum rte_flow_item_type pattern_fdir_ipv4_sctp[] = {
177 RTE_FLOW_ITEM_TYPE_ETH,
178 RTE_FLOW_ITEM_TYPE_IPV4,
179 RTE_FLOW_ITEM_TYPE_SCTP,
180 RTE_FLOW_ITEM_TYPE_END,
183 static enum rte_flow_item_type pattern_fdir_ipv4_gtpc[] = {
184 RTE_FLOW_ITEM_TYPE_ETH,
185 RTE_FLOW_ITEM_TYPE_IPV4,
186 RTE_FLOW_ITEM_TYPE_UDP,
187 RTE_FLOW_ITEM_TYPE_GTPC,
188 RTE_FLOW_ITEM_TYPE_END,
191 static enum rte_flow_item_type pattern_fdir_ipv4_gtpu[] = {
192 RTE_FLOW_ITEM_TYPE_ETH,
193 RTE_FLOW_ITEM_TYPE_IPV4,
194 RTE_FLOW_ITEM_TYPE_UDP,
195 RTE_FLOW_ITEM_TYPE_GTPU,
196 RTE_FLOW_ITEM_TYPE_END,
199 static enum rte_flow_item_type pattern_fdir_ipv4_gtpu_ipv4[] = {
200 RTE_FLOW_ITEM_TYPE_ETH,
201 RTE_FLOW_ITEM_TYPE_IPV4,
202 RTE_FLOW_ITEM_TYPE_UDP,
203 RTE_FLOW_ITEM_TYPE_GTPU,
204 RTE_FLOW_ITEM_TYPE_IPV4,
205 RTE_FLOW_ITEM_TYPE_END,
208 static enum rte_flow_item_type pattern_fdir_ipv4_gtpu_ipv6[] = {
209 RTE_FLOW_ITEM_TYPE_ETH,
210 RTE_FLOW_ITEM_TYPE_IPV4,
211 RTE_FLOW_ITEM_TYPE_UDP,
212 RTE_FLOW_ITEM_TYPE_GTPU,
213 RTE_FLOW_ITEM_TYPE_IPV6,
214 RTE_FLOW_ITEM_TYPE_END,
217 static enum rte_flow_item_type pattern_fdir_ipv6[] = {
218 RTE_FLOW_ITEM_TYPE_ETH,
219 RTE_FLOW_ITEM_TYPE_IPV6,
220 RTE_FLOW_ITEM_TYPE_END,
223 static enum rte_flow_item_type pattern_fdir_ipv6_udp[] = {
224 RTE_FLOW_ITEM_TYPE_ETH,
225 RTE_FLOW_ITEM_TYPE_IPV6,
226 RTE_FLOW_ITEM_TYPE_UDP,
227 RTE_FLOW_ITEM_TYPE_END,
230 static enum rte_flow_item_type pattern_fdir_ipv6_tcp[] = {
231 RTE_FLOW_ITEM_TYPE_ETH,
232 RTE_FLOW_ITEM_TYPE_IPV6,
233 RTE_FLOW_ITEM_TYPE_TCP,
234 RTE_FLOW_ITEM_TYPE_END,
237 static enum rte_flow_item_type pattern_fdir_ipv6_sctp[] = {
238 RTE_FLOW_ITEM_TYPE_ETH,
239 RTE_FLOW_ITEM_TYPE_IPV6,
240 RTE_FLOW_ITEM_TYPE_SCTP,
241 RTE_FLOW_ITEM_TYPE_END,
244 static enum rte_flow_item_type pattern_fdir_ipv6_gtpc[] = {
245 RTE_FLOW_ITEM_TYPE_ETH,
246 RTE_FLOW_ITEM_TYPE_IPV6,
247 RTE_FLOW_ITEM_TYPE_UDP,
248 RTE_FLOW_ITEM_TYPE_GTPC,
249 RTE_FLOW_ITEM_TYPE_END,
252 static enum rte_flow_item_type pattern_fdir_ipv6_gtpu[] = {
253 RTE_FLOW_ITEM_TYPE_ETH,
254 RTE_FLOW_ITEM_TYPE_IPV6,
255 RTE_FLOW_ITEM_TYPE_UDP,
256 RTE_FLOW_ITEM_TYPE_GTPU,
257 RTE_FLOW_ITEM_TYPE_END,
260 static enum rte_flow_item_type pattern_fdir_ipv6_gtpu_ipv4[] = {
261 RTE_FLOW_ITEM_TYPE_ETH,
262 RTE_FLOW_ITEM_TYPE_IPV6,
263 RTE_FLOW_ITEM_TYPE_UDP,
264 RTE_FLOW_ITEM_TYPE_GTPU,
265 RTE_FLOW_ITEM_TYPE_IPV4,
266 RTE_FLOW_ITEM_TYPE_END,
269 static enum rte_flow_item_type pattern_fdir_ipv6_gtpu_ipv6[] = {
270 RTE_FLOW_ITEM_TYPE_ETH,
271 RTE_FLOW_ITEM_TYPE_IPV6,
272 RTE_FLOW_ITEM_TYPE_UDP,
273 RTE_FLOW_ITEM_TYPE_GTPU,
274 RTE_FLOW_ITEM_TYPE_IPV6,
275 RTE_FLOW_ITEM_TYPE_END,
278 static enum rte_flow_item_type pattern_fdir_ethertype_raw_1[] = {
279 RTE_FLOW_ITEM_TYPE_ETH,
280 RTE_FLOW_ITEM_TYPE_RAW,
281 RTE_FLOW_ITEM_TYPE_END,
284 static enum rte_flow_item_type pattern_fdir_ethertype_raw_2[] = {
285 RTE_FLOW_ITEM_TYPE_ETH,
286 RTE_FLOW_ITEM_TYPE_RAW,
287 RTE_FLOW_ITEM_TYPE_RAW,
288 RTE_FLOW_ITEM_TYPE_END,
291 static enum rte_flow_item_type pattern_fdir_ethertype_raw_3[] = {
292 RTE_FLOW_ITEM_TYPE_ETH,
293 RTE_FLOW_ITEM_TYPE_RAW,
294 RTE_FLOW_ITEM_TYPE_RAW,
295 RTE_FLOW_ITEM_TYPE_RAW,
296 RTE_FLOW_ITEM_TYPE_END,
299 static enum rte_flow_item_type pattern_fdir_ipv4_raw_1[] = {
300 RTE_FLOW_ITEM_TYPE_ETH,
301 RTE_FLOW_ITEM_TYPE_IPV4,
302 RTE_FLOW_ITEM_TYPE_RAW,
303 RTE_FLOW_ITEM_TYPE_END,
306 static enum rte_flow_item_type pattern_fdir_ipv4_raw_2[] = {
307 RTE_FLOW_ITEM_TYPE_ETH,
308 RTE_FLOW_ITEM_TYPE_IPV4,
309 RTE_FLOW_ITEM_TYPE_RAW,
310 RTE_FLOW_ITEM_TYPE_RAW,
311 RTE_FLOW_ITEM_TYPE_END,
314 static enum rte_flow_item_type pattern_fdir_ipv4_raw_3[] = {
315 RTE_FLOW_ITEM_TYPE_ETH,
316 RTE_FLOW_ITEM_TYPE_IPV4,
317 RTE_FLOW_ITEM_TYPE_RAW,
318 RTE_FLOW_ITEM_TYPE_RAW,
319 RTE_FLOW_ITEM_TYPE_RAW,
320 RTE_FLOW_ITEM_TYPE_END,
323 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_1[] = {
324 RTE_FLOW_ITEM_TYPE_ETH,
325 RTE_FLOW_ITEM_TYPE_IPV4,
326 RTE_FLOW_ITEM_TYPE_UDP,
327 RTE_FLOW_ITEM_TYPE_RAW,
328 RTE_FLOW_ITEM_TYPE_END,
331 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_2[] = {
332 RTE_FLOW_ITEM_TYPE_ETH,
333 RTE_FLOW_ITEM_TYPE_IPV4,
334 RTE_FLOW_ITEM_TYPE_UDP,
335 RTE_FLOW_ITEM_TYPE_RAW,
336 RTE_FLOW_ITEM_TYPE_RAW,
337 RTE_FLOW_ITEM_TYPE_END,
340 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_3[] = {
341 RTE_FLOW_ITEM_TYPE_ETH,
342 RTE_FLOW_ITEM_TYPE_IPV4,
343 RTE_FLOW_ITEM_TYPE_UDP,
344 RTE_FLOW_ITEM_TYPE_RAW,
345 RTE_FLOW_ITEM_TYPE_RAW,
346 RTE_FLOW_ITEM_TYPE_RAW,
347 RTE_FLOW_ITEM_TYPE_END,
350 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_1[] = {
351 RTE_FLOW_ITEM_TYPE_ETH,
352 RTE_FLOW_ITEM_TYPE_IPV4,
353 RTE_FLOW_ITEM_TYPE_TCP,
354 RTE_FLOW_ITEM_TYPE_RAW,
355 RTE_FLOW_ITEM_TYPE_END,
358 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_2[] = {
359 RTE_FLOW_ITEM_TYPE_ETH,
360 RTE_FLOW_ITEM_TYPE_IPV4,
361 RTE_FLOW_ITEM_TYPE_TCP,
362 RTE_FLOW_ITEM_TYPE_RAW,
363 RTE_FLOW_ITEM_TYPE_RAW,
364 RTE_FLOW_ITEM_TYPE_END,
367 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_3[] = {
368 RTE_FLOW_ITEM_TYPE_ETH,
369 RTE_FLOW_ITEM_TYPE_IPV4,
370 RTE_FLOW_ITEM_TYPE_TCP,
371 RTE_FLOW_ITEM_TYPE_RAW,
372 RTE_FLOW_ITEM_TYPE_RAW,
373 RTE_FLOW_ITEM_TYPE_RAW,
374 RTE_FLOW_ITEM_TYPE_END,
377 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_1[] = {
378 RTE_FLOW_ITEM_TYPE_ETH,
379 RTE_FLOW_ITEM_TYPE_IPV4,
380 RTE_FLOW_ITEM_TYPE_SCTP,
381 RTE_FLOW_ITEM_TYPE_RAW,
382 RTE_FLOW_ITEM_TYPE_END,
385 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_2[] = {
386 RTE_FLOW_ITEM_TYPE_ETH,
387 RTE_FLOW_ITEM_TYPE_IPV4,
388 RTE_FLOW_ITEM_TYPE_SCTP,
389 RTE_FLOW_ITEM_TYPE_RAW,
390 RTE_FLOW_ITEM_TYPE_RAW,
391 RTE_FLOW_ITEM_TYPE_END,
394 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_3[] = {
395 RTE_FLOW_ITEM_TYPE_ETH,
396 RTE_FLOW_ITEM_TYPE_IPV4,
397 RTE_FLOW_ITEM_TYPE_SCTP,
398 RTE_FLOW_ITEM_TYPE_RAW,
399 RTE_FLOW_ITEM_TYPE_RAW,
400 RTE_FLOW_ITEM_TYPE_RAW,
401 RTE_FLOW_ITEM_TYPE_END,
404 static enum rte_flow_item_type pattern_fdir_ipv6_raw_1[] = {
405 RTE_FLOW_ITEM_TYPE_ETH,
406 RTE_FLOW_ITEM_TYPE_IPV6,
407 RTE_FLOW_ITEM_TYPE_RAW,
408 RTE_FLOW_ITEM_TYPE_END,
411 static enum rte_flow_item_type pattern_fdir_ipv6_raw_2[] = {
412 RTE_FLOW_ITEM_TYPE_ETH,
413 RTE_FLOW_ITEM_TYPE_IPV6,
414 RTE_FLOW_ITEM_TYPE_RAW,
415 RTE_FLOW_ITEM_TYPE_RAW,
416 RTE_FLOW_ITEM_TYPE_END,
419 static enum rte_flow_item_type pattern_fdir_ipv6_raw_3[] = {
420 RTE_FLOW_ITEM_TYPE_ETH,
421 RTE_FLOW_ITEM_TYPE_IPV6,
422 RTE_FLOW_ITEM_TYPE_RAW,
423 RTE_FLOW_ITEM_TYPE_RAW,
424 RTE_FLOW_ITEM_TYPE_RAW,
425 RTE_FLOW_ITEM_TYPE_END,
428 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_1[] = {
429 RTE_FLOW_ITEM_TYPE_ETH,
430 RTE_FLOW_ITEM_TYPE_IPV6,
431 RTE_FLOW_ITEM_TYPE_UDP,
432 RTE_FLOW_ITEM_TYPE_RAW,
433 RTE_FLOW_ITEM_TYPE_END,
436 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_2[] = {
437 RTE_FLOW_ITEM_TYPE_ETH,
438 RTE_FLOW_ITEM_TYPE_IPV6,
439 RTE_FLOW_ITEM_TYPE_UDP,
440 RTE_FLOW_ITEM_TYPE_RAW,
441 RTE_FLOW_ITEM_TYPE_RAW,
442 RTE_FLOW_ITEM_TYPE_END,
445 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_3[] = {
446 RTE_FLOW_ITEM_TYPE_ETH,
447 RTE_FLOW_ITEM_TYPE_IPV6,
448 RTE_FLOW_ITEM_TYPE_UDP,
449 RTE_FLOW_ITEM_TYPE_RAW,
450 RTE_FLOW_ITEM_TYPE_RAW,
451 RTE_FLOW_ITEM_TYPE_RAW,
452 RTE_FLOW_ITEM_TYPE_END,
455 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_1[] = {
456 RTE_FLOW_ITEM_TYPE_ETH,
457 RTE_FLOW_ITEM_TYPE_IPV6,
458 RTE_FLOW_ITEM_TYPE_TCP,
459 RTE_FLOW_ITEM_TYPE_RAW,
460 RTE_FLOW_ITEM_TYPE_END,
463 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_2[] = {
464 RTE_FLOW_ITEM_TYPE_ETH,
465 RTE_FLOW_ITEM_TYPE_IPV6,
466 RTE_FLOW_ITEM_TYPE_TCP,
467 RTE_FLOW_ITEM_TYPE_RAW,
468 RTE_FLOW_ITEM_TYPE_RAW,
469 RTE_FLOW_ITEM_TYPE_END,
472 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_3[] = {
473 RTE_FLOW_ITEM_TYPE_ETH,
474 RTE_FLOW_ITEM_TYPE_IPV6,
475 RTE_FLOW_ITEM_TYPE_TCP,
476 RTE_FLOW_ITEM_TYPE_RAW,
477 RTE_FLOW_ITEM_TYPE_RAW,
478 RTE_FLOW_ITEM_TYPE_RAW,
479 RTE_FLOW_ITEM_TYPE_END,
482 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_1[] = {
483 RTE_FLOW_ITEM_TYPE_ETH,
484 RTE_FLOW_ITEM_TYPE_IPV6,
485 RTE_FLOW_ITEM_TYPE_SCTP,
486 RTE_FLOW_ITEM_TYPE_RAW,
487 RTE_FLOW_ITEM_TYPE_END,
490 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_2[] = {
491 RTE_FLOW_ITEM_TYPE_ETH,
492 RTE_FLOW_ITEM_TYPE_IPV6,
493 RTE_FLOW_ITEM_TYPE_SCTP,
494 RTE_FLOW_ITEM_TYPE_RAW,
495 RTE_FLOW_ITEM_TYPE_RAW,
496 RTE_FLOW_ITEM_TYPE_END,
499 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_3[] = {
500 RTE_FLOW_ITEM_TYPE_ETH,
501 RTE_FLOW_ITEM_TYPE_IPV6,
502 RTE_FLOW_ITEM_TYPE_SCTP,
503 RTE_FLOW_ITEM_TYPE_RAW,
504 RTE_FLOW_ITEM_TYPE_RAW,
505 RTE_FLOW_ITEM_TYPE_RAW,
506 RTE_FLOW_ITEM_TYPE_END,
509 static enum rte_flow_item_type pattern_fdir_ethertype_vlan[] = {
510 RTE_FLOW_ITEM_TYPE_ETH,
511 RTE_FLOW_ITEM_TYPE_VLAN,
512 RTE_FLOW_ITEM_TYPE_END,
515 static enum rte_flow_item_type pattern_fdir_vlan_ipv4[] = {
516 RTE_FLOW_ITEM_TYPE_ETH,
517 RTE_FLOW_ITEM_TYPE_VLAN,
518 RTE_FLOW_ITEM_TYPE_IPV4,
519 RTE_FLOW_ITEM_TYPE_END,
522 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp[] = {
523 RTE_FLOW_ITEM_TYPE_ETH,
524 RTE_FLOW_ITEM_TYPE_VLAN,
525 RTE_FLOW_ITEM_TYPE_IPV4,
526 RTE_FLOW_ITEM_TYPE_UDP,
527 RTE_FLOW_ITEM_TYPE_END,
530 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp[] = {
531 RTE_FLOW_ITEM_TYPE_ETH,
532 RTE_FLOW_ITEM_TYPE_VLAN,
533 RTE_FLOW_ITEM_TYPE_IPV4,
534 RTE_FLOW_ITEM_TYPE_TCP,
535 RTE_FLOW_ITEM_TYPE_END,
538 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp[] = {
539 RTE_FLOW_ITEM_TYPE_ETH,
540 RTE_FLOW_ITEM_TYPE_VLAN,
541 RTE_FLOW_ITEM_TYPE_IPV4,
542 RTE_FLOW_ITEM_TYPE_SCTP,
543 RTE_FLOW_ITEM_TYPE_END,
546 static enum rte_flow_item_type pattern_fdir_vlan_ipv6[] = {
547 RTE_FLOW_ITEM_TYPE_ETH,
548 RTE_FLOW_ITEM_TYPE_VLAN,
549 RTE_FLOW_ITEM_TYPE_IPV6,
550 RTE_FLOW_ITEM_TYPE_END,
553 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp[] = {
554 RTE_FLOW_ITEM_TYPE_ETH,
555 RTE_FLOW_ITEM_TYPE_VLAN,
556 RTE_FLOW_ITEM_TYPE_IPV6,
557 RTE_FLOW_ITEM_TYPE_UDP,
558 RTE_FLOW_ITEM_TYPE_END,
561 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp[] = {
562 RTE_FLOW_ITEM_TYPE_ETH,
563 RTE_FLOW_ITEM_TYPE_VLAN,
564 RTE_FLOW_ITEM_TYPE_IPV6,
565 RTE_FLOW_ITEM_TYPE_TCP,
566 RTE_FLOW_ITEM_TYPE_END,
569 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp[] = {
570 RTE_FLOW_ITEM_TYPE_ETH,
571 RTE_FLOW_ITEM_TYPE_VLAN,
572 RTE_FLOW_ITEM_TYPE_IPV6,
573 RTE_FLOW_ITEM_TYPE_SCTP,
574 RTE_FLOW_ITEM_TYPE_END,
577 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_1[] = {
578 RTE_FLOW_ITEM_TYPE_ETH,
579 RTE_FLOW_ITEM_TYPE_VLAN,
580 RTE_FLOW_ITEM_TYPE_RAW,
581 RTE_FLOW_ITEM_TYPE_END,
584 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_2[] = {
585 RTE_FLOW_ITEM_TYPE_ETH,
586 RTE_FLOW_ITEM_TYPE_VLAN,
587 RTE_FLOW_ITEM_TYPE_RAW,
588 RTE_FLOW_ITEM_TYPE_RAW,
589 RTE_FLOW_ITEM_TYPE_END,
592 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_3[] = {
593 RTE_FLOW_ITEM_TYPE_ETH,
594 RTE_FLOW_ITEM_TYPE_VLAN,
595 RTE_FLOW_ITEM_TYPE_RAW,
596 RTE_FLOW_ITEM_TYPE_RAW,
597 RTE_FLOW_ITEM_TYPE_RAW,
598 RTE_FLOW_ITEM_TYPE_END,
601 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_1[] = {
602 RTE_FLOW_ITEM_TYPE_ETH,
603 RTE_FLOW_ITEM_TYPE_VLAN,
604 RTE_FLOW_ITEM_TYPE_IPV4,
605 RTE_FLOW_ITEM_TYPE_RAW,
606 RTE_FLOW_ITEM_TYPE_END,
609 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_2[] = {
610 RTE_FLOW_ITEM_TYPE_ETH,
611 RTE_FLOW_ITEM_TYPE_VLAN,
612 RTE_FLOW_ITEM_TYPE_IPV4,
613 RTE_FLOW_ITEM_TYPE_RAW,
614 RTE_FLOW_ITEM_TYPE_RAW,
615 RTE_FLOW_ITEM_TYPE_END,
618 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_3[] = {
619 RTE_FLOW_ITEM_TYPE_ETH,
620 RTE_FLOW_ITEM_TYPE_VLAN,
621 RTE_FLOW_ITEM_TYPE_IPV4,
622 RTE_FLOW_ITEM_TYPE_RAW,
623 RTE_FLOW_ITEM_TYPE_RAW,
624 RTE_FLOW_ITEM_TYPE_RAW,
625 RTE_FLOW_ITEM_TYPE_END,
628 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_1[] = {
629 RTE_FLOW_ITEM_TYPE_ETH,
630 RTE_FLOW_ITEM_TYPE_VLAN,
631 RTE_FLOW_ITEM_TYPE_IPV4,
632 RTE_FLOW_ITEM_TYPE_UDP,
633 RTE_FLOW_ITEM_TYPE_RAW,
634 RTE_FLOW_ITEM_TYPE_END,
637 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_2[] = {
638 RTE_FLOW_ITEM_TYPE_ETH,
639 RTE_FLOW_ITEM_TYPE_VLAN,
640 RTE_FLOW_ITEM_TYPE_IPV4,
641 RTE_FLOW_ITEM_TYPE_UDP,
642 RTE_FLOW_ITEM_TYPE_RAW,
643 RTE_FLOW_ITEM_TYPE_RAW,
644 RTE_FLOW_ITEM_TYPE_END,
647 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_3[] = {
648 RTE_FLOW_ITEM_TYPE_ETH,
649 RTE_FLOW_ITEM_TYPE_VLAN,
650 RTE_FLOW_ITEM_TYPE_IPV4,
651 RTE_FLOW_ITEM_TYPE_UDP,
652 RTE_FLOW_ITEM_TYPE_RAW,
653 RTE_FLOW_ITEM_TYPE_RAW,
654 RTE_FLOW_ITEM_TYPE_RAW,
655 RTE_FLOW_ITEM_TYPE_END,
658 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_1[] = {
659 RTE_FLOW_ITEM_TYPE_ETH,
660 RTE_FLOW_ITEM_TYPE_VLAN,
661 RTE_FLOW_ITEM_TYPE_IPV4,
662 RTE_FLOW_ITEM_TYPE_TCP,
663 RTE_FLOW_ITEM_TYPE_RAW,
664 RTE_FLOW_ITEM_TYPE_END,
667 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_2[] = {
668 RTE_FLOW_ITEM_TYPE_ETH,
669 RTE_FLOW_ITEM_TYPE_VLAN,
670 RTE_FLOW_ITEM_TYPE_IPV4,
671 RTE_FLOW_ITEM_TYPE_TCP,
672 RTE_FLOW_ITEM_TYPE_RAW,
673 RTE_FLOW_ITEM_TYPE_RAW,
674 RTE_FLOW_ITEM_TYPE_END,
677 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_3[] = {
678 RTE_FLOW_ITEM_TYPE_ETH,
679 RTE_FLOW_ITEM_TYPE_VLAN,
680 RTE_FLOW_ITEM_TYPE_IPV4,
681 RTE_FLOW_ITEM_TYPE_TCP,
682 RTE_FLOW_ITEM_TYPE_RAW,
683 RTE_FLOW_ITEM_TYPE_RAW,
684 RTE_FLOW_ITEM_TYPE_RAW,
685 RTE_FLOW_ITEM_TYPE_END,
688 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_1[] = {
689 RTE_FLOW_ITEM_TYPE_ETH,
690 RTE_FLOW_ITEM_TYPE_VLAN,
691 RTE_FLOW_ITEM_TYPE_IPV4,
692 RTE_FLOW_ITEM_TYPE_SCTP,
693 RTE_FLOW_ITEM_TYPE_RAW,
694 RTE_FLOW_ITEM_TYPE_END,
697 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_2[] = {
698 RTE_FLOW_ITEM_TYPE_ETH,
699 RTE_FLOW_ITEM_TYPE_VLAN,
700 RTE_FLOW_ITEM_TYPE_IPV4,
701 RTE_FLOW_ITEM_TYPE_SCTP,
702 RTE_FLOW_ITEM_TYPE_RAW,
703 RTE_FLOW_ITEM_TYPE_RAW,
704 RTE_FLOW_ITEM_TYPE_END,
707 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_3[] = {
708 RTE_FLOW_ITEM_TYPE_ETH,
709 RTE_FLOW_ITEM_TYPE_VLAN,
710 RTE_FLOW_ITEM_TYPE_IPV4,
711 RTE_FLOW_ITEM_TYPE_SCTP,
712 RTE_FLOW_ITEM_TYPE_RAW,
713 RTE_FLOW_ITEM_TYPE_RAW,
714 RTE_FLOW_ITEM_TYPE_RAW,
715 RTE_FLOW_ITEM_TYPE_END,
718 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_1[] = {
719 RTE_FLOW_ITEM_TYPE_ETH,
720 RTE_FLOW_ITEM_TYPE_VLAN,
721 RTE_FLOW_ITEM_TYPE_IPV6,
722 RTE_FLOW_ITEM_TYPE_RAW,
723 RTE_FLOW_ITEM_TYPE_END,
726 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_2[] = {
727 RTE_FLOW_ITEM_TYPE_ETH,
728 RTE_FLOW_ITEM_TYPE_VLAN,
729 RTE_FLOW_ITEM_TYPE_IPV6,
730 RTE_FLOW_ITEM_TYPE_RAW,
731 RTE_FLOW_ITEM_TYPE_RAW,
732 RTE_FLOW_ITEM_TYPE_END,
735 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_3[] = {
736 RTE_FLOW_ITEM_TYPE_ETH,
737 RTE_FLOW_ITEM_TYPE_VLAN,
738 RTE_FLOW_ITEM_TYPE_IPV6,
739 RTE_FLOW_ITEM_TYPE_RAW,
740 RTE_FLOW_ITEM_TYPE_RAW,
741 RTE_FLOW_ITEM_TYPE_RAW,
742 RTE_FLOW_ITEM_TYPE_END,
745 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_1[] = {
746 RTE_FLOW_ITEM_TYPE_ETH,
747 RTE_FLOW_ITEM_TYPE_VLAN,
748 RTE_FLOW_ITEM_TYPE_IPV6,
749 RTE_FLOW_ITEM_TYPE_UDP,
750 RTE_FLOW_ITEM_TYPE_RAW,
751 RTE_FLOW_ITEM_TYPE_END,
754 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_2[] = {
755 RTE_FLOW_ITEM_TYPE_ETH,
756 RTE_FLOW_ITEM_TYPE_VLAN,
757 RTE_FLOW_ITEM_TYPE_IPV6,
758 RTE_FLOW_ITEM_TYPE_UDP,
759 RTE_FLOW_ITEM_TYPE_RAW,
760 RTE_FLOW_ITEM_TYPE_RAW,
761 RTE_FLOW_ITEM_TYPE_END,
764 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_3[] = {
765 RTE_FLOW_ITEM_TYPE_ETH,
766 RTE_FLOW_ITEM_TYPE_VLAN,
767 RTE_FLOW_ITEM_TYPE_IPV6,
768 RTE_FLOW_ITEM_TYPE_UDP,
769 RTE_FLOW_ITEM_TYPE_RAW,
770 RTE_FLOW_ITEM_TYPE_RAW,
771 RTE_FLOW_ITEM_TYPE_RAW,
772 RTE_FLOW_ITEM_TYPE_END,
775 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_1[] = {
776 RTE_FLOW_ITEM_TYPE_ETH,
777 RTE_FLOW_ITEM_TYPE_VLAN,
778 RTE_FLOW_ITEM_TYPE_IPV6,
779 RTE_FLOW_ITEM_TYPE_TCP,
780 RTE_FLOW_ITEM_TYPE_RAW,
781 RTE_FLOW_ITEM_TYPE_END,
784 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_2[] = {
785 RTE_FLOW_ITEM_TYPE_ETH,
786 RTE_FLOW_ITEM_TYPE_VLAN,
787 RTE_FLOW_ITEM_TYPE_IPV6,
788 RTE_FLOW_ITEM_TYPE_TCP,
789 RTE_FLOW_ITEM_TYPE_RAW,
790 RTE_FLOW_ITEM_TYPE_RAW,
791 RTE_FLOW_ITEM_TYPE_END,
794 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_3[] = {
795 RTE_FLOW_ITEM_TYPE_ETH,
796 RTE_FLOW_ITEM_TYPE_VLAN,
797 RTE_FLOW_ITEM_TYPE_IPV6,
798 RTE_FLOW_ITEM_TYPE_TCP,
799 RTE_FLOW_ITEM_TYPE_RAW,
800 RTE_FLOW_ITEM_TYPE_RAW,
801 RTE_FLOW_ITEM_TYPE_RAW,
802 RTE_FLOW_ITEM_TYPE_END,
805 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_1[] = {
806 RTE_FLOW_ITEM_TYPE_ETH,
807 RTE_FLOW_ITEM_TYPE_VLAN,
808 RTE_FLOW_ITEM_TYPE_IPV6,
809 RTE_FLOW_ITEM_TYPE_SCTP,
810 RTE_FLOW_ITEM_TYPE_RAW,
811 RTE_FLOW_ITEM_TYPE_END,
814 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_2[] = {
815 RTE_FLOW_ITEM_TYPE_ETH,
816 RTE_FLOW_ITEM_TYPE_VLAN,
817 RTE_FLOW_ITEM_TYPE_IPV6,
818 RTE_FLOW_ITEM_TYPE_SCTP,
819 RTE_FLOW_ITEM_TYPE_RAW,
820 RTE_FLOW_ITEM_TYPE_RAW,
821 RTE_FLOW_ITEM_TYPE_END,
824 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_3[] = {
825 RTE_FLOW_ITEM_TYPE_ETH,
826 RTE_FLOW_ITEM_TYPE_VLAN,
827 RTE_FLOW_ITEM_TYPE_IPV6,
828 RTE_FLOW_ITEM_TYPE_SCTP,
829 RTE_FLOW_ITEM_TYPE_RAW,
830 RTE_FLOW_ITEM_TYPE_RAW,
831 RTE_FLOW_ITEM_TYPE_RAW,
832 RTE_FLOW_ITEM_TYPE_END,
835 static enum rte_flow_item_type pattern_fdir_ipv4_vf[] = {
836 RTE_FLOW_ITEM_TYPE_ETH,
837 RTE_FLOW_ITEM_TYPE_IPV4,
838 RTE_FLOW_ITEM_TYPE_VF,
839 RTE_FLOW_ITEM_TYPE_END,
842 static enum rte_flow_item_type pattern_fdir_ipv4_udp_vf[] = {
843 RTE_FLOW_ITEM_TYPE_ETH,
844 RTE_FLOW_ITEM_TYPE_IPV4,
845 RTE_FLOW_ITEM_TYPE_UDP,
846 RTE_FLOW_ITEM_TYPE_VF,
847 RTE_FLOW_ITEM_TYPE_END,
850 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_vf[] = {
851 RTE_FLOW_ITEM_TYPE_ETH,
852 RTE_FLOW_ITEM_TYPE_IPV4,
853 RTE_FLOW_ITEM_TYPE_TCP,
854 RTE_FLOW_ITEM_TYPE_VF,
855 RTE_FLOW_ITEM_TYPE_END,
858 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_vf[] = {
859 RTE_FLOW_ITEM_TYPE_ETH,
860 RTE_FLOW_ITEM_TYPE_IPV4,
861 RTE_FLOW_ITEM_TYPE_SCTP,
862 RTE_FLOW_ITEM_TYPE_VF,
863 RTE_FLOW_ITEM_TYPE_END,
866 static enum rte_flow_item_type pattern_fdir_ipv6_vf[] = {
867 RTE_FLOW_ITEM_TYPE_ETH,
868 RTE_FLOW_ITEM_TYPE_IPV6,
869 RTE_FLOW_ITEM_TYPE_VF,
870 RTE_FLOW_ITEM_TYPE_END,
873 static enum rte_flow_item_type pattern_fdir_ipv6_udp_vf[] = {
874 RTE_FLOW_ITEM_TYPE_ETH,
875 RTE_FLOW_ITEM_TYPE_IPV6,
876 RTE_FLOW_ITEM_TYPE_UDP,
877 RTE_FLOW_ITEM_TYPE_VF,
878 RTE_FLOW_ITEM_TYPE_END,
881 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_vf[] = {
882 RTE_FLOW_ITEM_TYPE_ETH,
883 RTE_FLOW_ITEM_TYPE_IPV6,
884 RTE_FLOW_ITEM_TYPE_TCP,
885 RTE_FLOW_ITEM_TYPE_VF,
886 RTE_FLOW_ITEM_TYPE_END,
889 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_vf[] = {
890 RTE_FLOW_ITEM_TYPE_ETH,
891 RTE_FLOW_ITEM_TYPE_IPV6,
892 RTE_FLOW_ITEM_TYPE_SCTP,
893 RTE_FLOW_ITEM_TYPE_VF,
894 RTE_FLOW_ITEM_TYPE_END,
897 static enum rte_flow_item_type pattern_fdir_ethertype_raw_1_vf[] = {
898 RTE_FLOW_ITEM_TYPE_ETH,
899 RTE_FLOW_ITEM_TYPE_RAW,
900 RTE_FLOW_ITEM_TYPE_VF,
901 RTE_FLOW_ITEM_TYPE_END,
904 static enum rte_flow_item_type pattern_fdir_ethertype_raw_2_vf[] = {
905 RTE_FLOW_ITEM_TYPE_ETH,
906 RTE_FLOW_ITEM_TYPE_RAW,
907 RTE_FLOW_ITEM_TYPE_RAW,
908 RTE_FLOW_ITEM_TYPE_VF,
909 RTE_FLOW_ITEM_TYPE_END,
912 static enum rte_flow_item_type pattern_fdir_ethertype_raw_3_vf[] = {
913 RTE_FLOW_ITEM_TYPE_ETH,
914 RTE_FLOW_ITEM_TYPE_RAW,
915 RTE_FLOW_ITEM_TYPE_RAW,
916 RTE_FLOW_ITEM_TYPE_RAW,
917 RTE_FLOW_ITEM_TYPE_VF,
918 RTE_FLOW_ITEM_TYPE_END,
921 static enum rte_flow_item_type pattern_fdir_ipv4_raw_1_vf[] = {
922 RTE_FLOW_ITEM_TYPE_ETH,
923 RTE_FLOW_ITEM_TYPE_IPV4,
924 RTE_FLOW_ITEM_TYPE_RAW,
925 RTE_FLOW_ITEM_TYPE_VF,
926 RTE_FLOW_ITEM_TYPE_END,
929 static enum rte_flow_item_type pattern_fdir_ipv4_raw_2_vf[] = {
930 RTE_FLOW_ITEM_TYPE_ETH,
931 RTE_FLOW_ITEM_TYPE_IPV4,
932 RTE_FLOW_ITEM_TYPE_RAW,
933 RTE_FLOW_ITEM_TYPE_RAW,
934 RTE_FLOW_ITEM_TYPE_VF,
935 RTE_FLOW_ITEM_TYPE_END,
938 static enum rte_flow_item_type pattern_fdir_ipv4_raw_3_vf[] = {
939 RTE_FLOW_ITEM_TYPE_ETH,
940 RTE_FLOW_ITEM_TYPE_IPV4,
941 RTE_FLOW_ITEM_TYPE_RAW,
942 RTE_FLOW_ITEM_TYPE_RAW,
943 RTE_FLOW_ITEM_TYPE_RAW,
944 RTE_FLOW_ITEM_TYPE_VF,
945 RTE_FLOW_ITEM_TYPE_END,
948 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_1_vf[] = {
949 RTE_FLOW_ITEM_TYPE_ETH,
950 RTE_FLOW_ITEM_TYPE_IPV4,
951 RTE_FLOW_ITEM_TYPE_UDP,
952 RTE_FLOW_ITEM_TYPE_RAW,
953 RTE_FLOW_ITEM_TYPE_VF,
954 RTE_FLOW_ITEM_TYPE_END,
957 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_2_vf[] = {
958 RTE_FLOW_ITEM_TYPE_ETH,
959 RTE_FLOW_ITEM_TYPE_IPV4,
960 RTE_FLOW_ITEM_TYPE_UDP,
961 RTE_FLOW_ITEM_TYPE_RAW,
962 RTE_FLOW_ITEM_TYPE_RAW,
963 RTE_FLOW_ITEM_TYPE_VF,
964 RTE_FLOW_ITEM_TYPE_END,
967 static enum rte_flow_item_type pattern_fdir_ipv4_udp_raw_3_vf[] = {
968 RTE_FLOW_ITEM_TYPE_ETH,
969 RTE_FLOW_ITEM_TYPE_IPV4,
970 RTE_FLOW_ITEM_TYPE_UDP,
971 RTE_FLOW_ITEM_TYPE_RAW,
972 RTE_FLOW_ITEM_TYPE_RAW,
973 RTE_FLOW_ITEM_TYPE_RAW,
974 RTE_FLOW_ITEM_TYPE_VF,
975 RTE_FLOW_ITEM_TYPE_END,
978 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_1_vf[] = {
979 RTE_FLOW_ITEM_TYPE_ETH,
980 RTE_FLOW_ITEM_TYPE_IPV4,
981 RTE_FLOW_ITEM_TYPE_TCP,
982 RTE_FLOW_ITEM_TYPE_RAW,
983 RTE_FLOW_ITEM_TYPE_VF,
984 RTE_FLOW_ITEM_TYPE_END,
987 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_2_vf[] = {
988 RTE_FLOW_ITEM_TYPE_ETH,
989 RTE_FLOW_ITEM_TYPE_IPV4,
990 RTE_FLOW_ITEM_TYPE_TCP,
991 RTE_FLOW_ITEM_TYPE_RAW,
992 RTE_FLOW_ITEM_TYPE_RAW,
993 RTE_FLOW_ITEM_TYPE_VF,
994 RTE_FLOW_ITEM_TYPE_END,
997 static enum rte_flow_item_type pattern_fdir_ipv4_tcp_raw_3_vf[] = {
998 RTE_FLOW_ITEM_TYPE_ETH,
999 RTE_FLOW_ITEM_TYPE_IPV4,
1000 RTE_FLOW_ITEM_TYPE_TCP,
1001 RTE_FLOW_ITEM_TYPE_RAW,
1002 RTE_FLOW_ITEM_TYPE_RAW,
1003 RTE_FLOW_ITEM_TYPE_RAW,
1004 RTE_FLOW_ITEM_TYPE_VF,
1005 RTE_FLOW_ITEM_TYPE_END,
1008 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_1_vf[] = {
1009 RTE_FLOW_ITEM_TYPE_ETH,
1010 RTE_FLOW_ITEM_TYPE_IPV4,
1011 RTE_FLOW_ITEM_TYPE_SCTP,
1012 RTE_FLOW_ITEM_TYPE_RAW,
1013 RTE_FLOW_ITEM_TYPE_VF,
1014 RTE_FLOW_ITEM_TYPE_END,
1017 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_2_vf[] = {
1018 RTE_FLOW_ITEM_TYPE_ETH,
1019 RTE_FLOW_ITEM_TYPE_IPV4,
1020 RTE_FLOW_ITEM_TYPE_SCTP,
1021 RTE_FLOW_ITEM_TYPE_RAW,
1022 RTE_FLOW_ITEM_TYPE_RAW,
1023 RTE_FLOW_ITEM_TYPE_VF,
1024 RTE_FLOW_ITEM_TYPE_END,
1027 static enum rte_flow_item_type pattern_fdir_ipv4_sctp_raw_3_vf[] = {
1028 RTE_FLOW_ITEM_TYPE_ETH,
1029 RTE_FLOW_ITEM_TYPE_IPV4,
1030 RTE_FLOW_ITEM_TYPE_SCTP,
1031 RTE_FLOW_ITEM_TYPE_RAW,
1032 RTE_FLOW_ITEM_TYPE_RAW,
1033 RTE_FLOW_ITEM_TYPE_RAW,
1034 RTE_FLOW_ITEM_TYPE_VF,
1035 RTE_FLOW_ITEM_TYPE_END,
1038 static enum rte_flow_item_type pattern_fdir_ipv6_raw_1_vf[] = {
1039 RTE_FLOW_ITEM_TYPE_ETH,
1040 RTE_FLOW_ITEM_TYPE_IPV6,
1041 RTE_FLOW_ITEM_TYPE_RAW,
1042 RTE_FLOW_ITEM_TYPE_VF,
1043 RTE_FLOW_ITEM_TYPE_END,
1046 static enum rte_flow_item_type pattern_fdir_ipv6_raw_2_vf[] = {
1047 RTE_FLOW_ITEM_TYPE_ETH,
1048 RTE_FLOW_ITEM_TYPE_IPV6,
1049 RTE_FLOW_ITEM_TYPE_RAW,
1050 RTE_FLOW_ITEM_TYPE_RAW,
1051 RTE_FLOW_ITEM_TYPE_VF,
1052 RTE_FLOW_ITEM_TYPE_END,
1055 static enum rte_flow_item_type pattern_fdir_ipv6_raw_3_vf[] = {
1056 RTE_FLOW_ITEM_TYPE_ETH,
1057 RTE_FLOW_ITEM_TYPE_IPV6,
1058 RTE_FLOW_ITEM_TYPE_RAW,
1059 RTE_FLOW_ITEM_TYPE_RAW,
1060 RTE_FLOW_ITEM_TYPE_RAW,
1061 RTE_FLOW_ITEM_TYPE_VF,
1062 RTE_FLOW_ITEM_TYPE_END,
1065 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_1_vf[] = {
1066 RTE_FLOW_ITEM_TYPE_ETH,
1067 RTE_FLOW_ITEM_TYPE_IPV6,
1068 RTE_FLOW_ITEM_TYPE_UDP,
1069 RTE_FLOW_ITEM_TYPE_RAW,
1070 RTE_FLOW_ITEM_TYPE_VF,
1071 RTE_FLOW_ITEM_TYPE_END,
1074 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_2_vf[] = {
1075 RTE_FLOW_ITEM_TYPE_ETH,
1076 RTE_FLOW_ITEM_TYPE_IPV6,
1077 RTE_FLOW_ITEM_TYPE_UDP,
1078 RTE_FLOW_ITEM_TYPE_RAW,
1079 RTE_FLOW_ITEM_TYPE_RAW,
1080 RTE_FLOW_ITEM_TYPE_VF,
1081 RTE_FLOW_ITEM_TYPE_END,
1084 static enum rte_flow_item_type pattern_fdir_ipv6_udp_raw_3_vf[] = {
1085 RTE_FLOW_ITEM_TYPE_ETH,
1086 RTE_FLOW_ITEM_TYPE_IPV6,
1087 RTE_FLOW_ITEM_TYPE_UDP,
1088 RTE_FLOW_ITEM_TYPE_RAW,
1089 RTE_FLOW_ITEM_TYPE_RAW,
1090 RTE_FLOW_ITEM_TYPE_RAW,
1091 RTE_FLOW_ITEM_TYPE_VF,
1092 RTE_FLOW_ITEM_TYPE_END,
1095 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_1_vf[] = {
1096 RTE_FLOW_ITEM_TYPE_ETH,
1097 RTE_FLOW_ITEM_TYPE_IPV6,
1098 RTE_FLOW_ITEM_TYPE_TCP,
1099 RTE_FLOW_ITEM_TYPE_RAW,
1100 RTE_FLOW_ITEM_TYPE_VF,
1101 RTE_FLOW_ITEM_TYPE_END,
1104 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_2_vf[] = {
1105 RTE_FLOW_ITEM_TYPE_ETH,
1106 RTE_FLOW_ITEM_TYPE_IPV6,
1107 RTE_FLOW_ITEM_TYPE_TCP,
1108 RTE_FLOW_ITEM_TYPE_RAW,
1109 RTE_FLOW_ITEM_TYPE_RAW,
1110 RTE_FLOW_ITEM_TYPE_VF,
1111 RTE_FLOW_ITEM_TYPE_END,
1114 static enum rte_flow_item_type pattern_fdir_ipv6_tcp_raw_3_vf[] = {
1115 RTE_FLOW_ITEM_TYPE_ETH,
1116 RTE_FLOW_ITEM_TYPE_IPV6,
1117 RTE_FLOW_ITEM_TYPE_TCP,
1118 RTE_FLOW_ITEM_TYPE_RAW,
1119 RTE_FLOW_ITEM_TYPE_RAW,
1120 RTE_FLOW_ITEM_TYPE_RAW,
1121 RTE_FLOW_ITEM_TYPE_VF,
1122 RTE_FLOW_ITEM_TYPE_END,
1125 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_1_vf[] = {
1126 RTE_FLOW_ITEM_TYPE_ETH,
1127 RTE_FLOW_ITEM_TYPE_IPV6,
1128 RTE_FLOW_ITEM_TYPE_SCTP,
1129 RTE_FLOW_ITEM_TYPE_RAW,
1130 RTE_FLOW_ITEM_TYPE_VF,
1131 RTE_FLOW_ITEM_TYPE_END,
1134 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_2_vf[] = {
1135 RTE_FLOW_ITEM_TYPE_ETH,
1136 RTE_FLOW_ITEM_TYPE_IPV6,
1137 RTE_FLOW_ITEM_TYPE_SCTP,
1138 RTE_FLOW_ITEM_TYPE_RAW,
1139 RTE_FLOW_ITEM_TYPE_RAW,
1140 RTE_FLOW_ITEM_TYPE_VF,
1141 RTE_FLOW_ITEM_TYPE_END,
1144 static enum rte_flow_item_type pattern_fdir_ipv6_sctp_raw_3_vf[] = {
1145 RTE_FLOW_ITEM_TYPE_ETH,
1146 RTE_FLOW_ITEM_TYPE_IPV6,
1147 RTE_FLOW_ITEM_TYPE_SCTP,
1148 RTE_FLOW_ITEM_TYPE_RAW,
1149 RTE_FLOW_ITEM_TYPE_RAW,
1150 RTE_FLOW_ITEM_TYPE_RAW,
1151 RTE_FLOW_ITEM_TYPE_VF,
1152 RTE_FLOW_ITEM_TYPE_END,
1155 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_vf[] = {
1156 RTE_FLOW_ITEM_TYPE_ETH,
1157 RTE_FLOW_ITEM_TYPE_VLAN,
1158 RTE_FLOW_ITEM_TYPE_VF,
1159 RTE_FLOW_ITEM_TYPE_END,
1162 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_vf[] = {
1163 RTE_FLOW_ITEM_TYPE_ETH,
1164 RTE_FLOW_ITEM_TYPE_VLAN,
1165 RTE_FLOW_ITEM_TYPE_IPV4,
1166 RTE_FLOW_ITEM_TYPE_VF,
1167 RTE_FLOW_ITEM_TYPE_END,
1170 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_vf[] = {
1171 RTE_FLOW_ITEM_TYPE_ETH,
1172 RTE_FLOW_ITEM_TYPE_VLAN,
1173 RTE_FLOW_ITEM_TYPE_IPV4,
1174 RTE_FLOW_ITEM_TYPE_UDP,
1175 RTE_FLOW_ITEM_TYPE_VF,
1176 RTE_FLOW_ITEM_TYPE_END,
1179 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_vf[] = {
1180 RTE_FLOW_ITEM_TYPE_ETH,
1181 RTE_FLOW_ITEM_TYPE_VLAN,
1182 RTE_FLOW_ITEM_TYPE_IPV4,
1183 RTE_FLOW_ITEM_TYPE_TCP,
1184 RTE_FLOW_ITEM_TYPE_VF,
1185 RTE_FLOW_ITEM_TYPE_END,
1188 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_vf[] = {
1189 RTE_FLOW_ITEM_TYPE_ETH,
1190 RTE_FLOW_ITEM_TYPE_VLAN,
1191 RTE_FLOW_ITEM_TYPE_IPV4,
1192 RTE_FLOW_ITEM_TYPE_SCTP,
1193 RTE_FLOW_ITEM_TYPE_VF,
1194 RTE_FLOW_ITEM_TYPE_END,
1197 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_vf[] = {
1198 RTE_FLOW_ITEM_TYPE_ETH,
1199 RTE_FLOW_ITEM_TYPE_VLAN,
1200 RTE_FLOW_ITEM_TYPE_IPV6,
1201 RTE_FLOW_ITEM_TYPE_VF,
1202 RTE_FLOW_ITEM_TYPE_END,
1205 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_vf[] = {
1206 RTE_FLOW_ITEM_TYPE_ETH,
1207 RTE_FLOW_ITEM_TYPE_VLAN,
1208 RTE_FLOW_ITEM_TYPE_IPV6,
1209 RTE_FLOW_ITEM_TYPE_UDP,
1210 RTE_FLOW_ITEM_TYPE_VF,
1211 RTE_FLOW_ITEM_TYPE_END,
1214 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_vf[] = {
1215 RTE_FLOW_ITEM_TYPE_ETH,
1216 RTE_FLOW_ITEM_TYPE_VLAN,
1217 RTE_FLOW_ITEM_TYPE_IPV6,
1218 RTE_FLOW_ITEM_TYPE_TCP,
1219 RTE_FLOW_ITEM_TYPE_VF,
1220 RTE_FLOW_ITEM_TYPE_END,
1223 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_vf[] = {
1224 RTE_FLOW_ITEM_TYPE_ETH,
1225 RTE_FLOW_ITEM_TYPE_VLAN,
1226 RTE_FLOW_ITEM_TYPE_IPV6,
1227 RTE_FLOW_ITEM_TYPE_SCTP,
1228 RTE_FLOW_ITEM_TYPE_VF,
1229 RTE_FLOW_ITEM_TYPE_END,
1232 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_1_vf[] = {
1233 RTE_FLOW_ITEM_TYPE_ETH,
1234 RTE_FLOW_ITEM_TYPE_VLAN,
1235 RTE_FLOW_ITEM_TYPE_RAW,
1236 RTE_FLOW_ITEM_TYPE_VF,
1237 RTE_FLOW_ITEM_TYPE_END,
1240 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_2_vf[] = {
1241 RTE_FLOW_ITEM_TYPE_ETH,
1242 RTE_FLOW_ITEM_TYPE_VLAN,
1243 RTE_FLOW_ITEM_TYPE_RAW,
1244 RTE_FLOW_ITEM_TYPE_RAW,
1245 RTE_FLOW_ITEM_TYPE_VF,
1246 RTE_FLOW_ITEM_TYPE_END,
1249 static enum rte_flow_item_type pattern_fdir_ethertype_vlan_raw_3_vf[] = {
1250 RTE_FLOW_ITEM_TYPE_ETH,
1251 RTE_FLOW_ITEM_TYPE_VLAN,
1252 RTE_FLOW_ITEM_TYPE_RAW,
1253 RTE_FLOW_ITEM_TYPE_RAW,
1254 RTE_FLOW_ITEM_TYPE_RAW,
1255 RTE_FLOW_ITEM_TYPE_VF,
1256 RTE_FLOW_ITEM_TYPE_END,
1259 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_1_vf[] = {
1260 RTE_FLOW_ITEM_TYPE_ETH,
1261 RTE_FLOW_ITEM_TYPE_VLAN,
1262 RTE_FLOW_ITEM_TYPE_IPV4,
1263 RTE_FLOW_ITEM_TYPE_RAW,
1264 RTE_FLOW_ITEM_TYPE_VF,
1265 RTE_FLOW_ITEM_TYPE_END,
1268 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_2_vf[] = {
1269 RTE_FLOW_ITEM_TYPE_ETH,
1270 RTE_FLOW_ITEM_TYPE_VLAN,
1271 RTE_FLOW_ITEM_TYPE_IPV4,
1272 RTE_FLOW_ITEM_TYPE_RAW,
1273 RTE_FLOW_ITEM_TYPE_RAW,
1274 RTE_FLOW_ITEM_TYPE_VF,
1275 RTE_FLOW_ITEM_TYPE_END,
1278 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_raw_3_vf[] = {
1279 RTE_FLOW_ITEM_TYPE_ETH,
1280 RTE_FLOW_ITEM_TYPE_VLAN,
1281 RTE_FLOW_ITEM_TYPE_IPV4,
1282 RTE_FLOW_ITEM_TYPE_RAW,
1283 RTE_FLOW_ITEM_TYPE_RAW,
1284 RTE_FLOW_ITEM_TYPE_RAW,
1285 RTE_FLOW_ITEM_TYPE_VF,
1286 RTE_FLOW_ITEM_TYPE_END,
1289 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_1_vf[] = {
1290 RTE_FLOW_ITEM_TYPE_ETH,
1291 RTE_FLOW_ITEM_TYPE_VLAN,
1292 RTE_FLOW_ITEM_TYPE_IPV4,
1293 RTE_FLOW_ITEM_TYPE_UDP,
1294 RTE_FLOW_ITEM_TYPE_RAW,
1295 RTE_FLOW_ITEM_TYPE_VF,
1296 RTE_FLOW_ITEM_TYPE_END,
1299 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_2_vf[] = {
1300 RTE_FLOW_ITEM_TYPE_ETH,
1301 RTE_FLOW_ITEM_TYPE_VLAN,
1302 RTE_FLOW_ITEM_TYPE_IPV4,
1303 RTE_FLOW_ITEM_TYPE_UDP,
1304 RTE_FLOW_ITEM_TYPE_RAW,
1305 RTE_FLOW_ITEM_TYPE_RAW,
1306 RTE_FLOW_ITEM_TYPE_VF,
1307 RTE_FLOW_ITEM_TYPE_END,
1310 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_udp_raw_3_vf[] = {
1311 RTE_FLOW_ITEM_TYPE_ETH,
1312 RTE_FLOW_ITEM_TYPE_VLAN,
1313 RTE_FLOW_ITEM_TYPE_IPV4,
1314 RTE_FLOW_ITEM_TYPE_UDP,
1315 RTE_FLOW_ITEM_TYPE_RAW,
1316 RTE_FLOW_ITEM_TYPE_RAW,
1317 RTE_FLOW_ITEM_TYPE_RAW,
1318 RTE_FLOW_ITEM_TYPE_VF,
1319 RTE_FLOW_ITEM_TYPE_END,
1322 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_1_vf[] = {
1323 RTE_FLOW_ITEM_TYPE_ETH,
1324 RTE_FLOW_ITEM_TYPE_VLAN,
1325 RTE_FLOW_ITEM_TYPE_IPV4,
1326 RTE_FLOW_ITEM_TYPE_TCP,
1327 RTE_FLOW_ITEM_TYPE_RAW,
1328 RTE_FLOW_ITEM_TYPE_VF,
1329 RTE_FLOW_ITEM_TYPE_END,
1332 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_2_vf[] = {
1333 RTE_FLOW_ITEM_TYPE_ETH,
1334 RTE_FLOW_ITEM_TYPE_VLAN,
1335 RTE_FLOW_ITEM_TYPE_IPV4,
1336 RTE_FLOW_ITEM_TYPE_TCP,
1337 RTE_FLOW_ITEM_TYPE_RAW,
1338 RTE_FLOW_ITEM_TYPE_RAW,
1339 RTE_FLOW_ITEM_TYPE_VF,
1340 RTE_FLOW_ITEM_TYPE_END,
1343 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_tcp_raw_3_vf[] = {
1344 RTE_FLOW_ITEM_TYPE_ETH,
1345 RTE_FLOW_ITEM_TYPE_VLAN,
1346 RTE_FLOW_ITEM_TYPE_IPV4,
1347 RTE_FLOW_ITEM_TYPE_TCP,
1348 RTE_FLOW_ITEM_TYPE_RAW,
1349 RTE_FLOW_ITEM_TYPE_RAW,
1350 RTE_FLOW_ITEM_TYPE_RAW,
1351 RTE_FLOW_ITEM_TYPE_VF,
1352 RTE_FLOW_ITEM_TYPE_END,
1355 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_1_vf[] = {
1356 RTE_FLOW_ITEM_TYPE_ETH,
1357 RTE_FLOW_ITEM_TYPE_VLAN,
1358 RTE_FLOW_ITEM_TYPE_IPV4,
1359 RTE_FLOW_ITEM_TYPE_SCTP,
1360 RTE_FLOW_ITEM_TYPE_RAW,
1361 RTE_FLOW_ITEM_TYPE_VF,
1362 RTE_FLOW_ITEM_TYPE_END,
1365 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_2_vf[] = {
1366 RTE_FLOW_ITEM_TYPE_ETH,
1367 RTE_FLOW_ITEM_TYPE_VLAN,
1368 RTE_FLOW_ITEM_TYPE_IPV4,
1369 RTE_FLOW_ITEM_TYPE_SCTP,
1370 RTE_FLOW_ITEM_TYPE_RAW,
1371 RTE_FLOW_ITEM_TYPE_RAW,
1372 RTE_FLOW_ITEM_TYPE_VF,
1373 RTE_FLOW_ITEM_TYPE_END,
1376 static enum rte_flow_item_type pattern_fdir_vlan_ipv4_sctp_raw_3_vf[] = {
1377 RTE_FLOW_ITEM_TYPE_ETH,
1378 RTE_FLOW_ITEM_TYPE_VLAN,
1379 RTE_FLOW_ITEM_TYPE_IPV4,
1380 RTE_FLOW_ITEM_TYPE_SCTP,
1381 RTE_FLOW_ITEM_TYPE_RAW,
1382 RTE_FLOW_ITEM_TYPE_RAW,
1383 RTE_FLOW_ITEM_TYPE_RAW,
1384 RTE_FLOW_ITEM_TYPE_VF,
1385 RTE_FLOW_ITEM_TYPE_END,
1388 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_1_vf[] = {
1389 RTE_FLOW_ITEM_TYPE_ETH,
1390 RTE_FLOW_ITEM_TYPE_VLAN,
1391 RTE_FLOW_ITEM_TYPE_IPV6,
1392 RTE_FLOW_ITEM_TYPE_RAW,
1393 RTE_FLOW_ITEM_TYPE_VF,
1394 RTE_FLOW_ITEM_TYPE_END,
1397 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_2_vf[] = {
1398 RTE_FLOW_ITEM_TYPE_ETH,
1399 RTE_FLOW_ITEM_TYPE_VLAN,
1400 RTE_FLOW_ITEM_TYPE_IPV6,
1401 RTE_FLOW_ITEM_TYPE_RAW,
1402 RTE_FLOW_ITEM_TYPE_RAW,
1403 RTE_FLOW_ITEM_TYPE_VF,
1404 RTE_FLOW_ITEM_TYPE_END,
1407 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_raw_3_vf[] = {
1408 RTE_FLOW_ITEM_TYPE_ETH,
1409 RTE_FLOW_ITEM_TYPE_VLAN,
1410 RTE_FLOW_ITEM_TYPE_IPV6,
1411 RTE_FLOW_ITEM_TYPE_RAW,
1412 RTE_FLOW_ITEM_TYPE_RAW,
1413 RTE_FLOW_ITEM_TYPE_RAW,
1414 RTE_FLOW_ITEM_TYPE_VF,
1415 RTE_FLOW_ITEM_TYPE_END,
1418 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_1_vf[] = {
1419 RTE_FLOW_ITEM_TYPE_ETH,
1420 RTE_FLOW_ITEM_TYPE_VLAN,
1421 RTE_FLOW_ITEM_TYPE_IPV6,
1422 RTE_FLOW_ITEM_TYPE_UDP,
1423 RTE_FLOW_ITEM_TYPE_RAW,
1424 RTE_FLOW_ITEM_TYPE_VF,
1425 RTE_FLOW_ITEM_TYPE_END,
1428 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_2_vf[] = {
1429 RTE_FLOW_ITEM_TYPE_ETH,
1430 RTE_FLOW_ITEM_TYPE_VLAN,
1431 RTE_FLOW_ITEM_TYPE_IPV6,
1432 RTE_FLOW_ITEM_TYPE_UDP,
1433 RTE_FLOW_ITEM_TYPE_RAW,
1434 RTE_FLOW_ITEM_TYPE_RAW,
1435 RTE_FLOW_ITEM_TYPE_VF,
1436 RTE_FLOW_ITEM_TYPE_END,
1439 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_udp_raw_3_vf[] = {
1440 RTE_FLOW_ITEM_TYPE_ETH,
1441 RTE_FLOW_ITEM_TYPE_VLAN,
1442 RTE_FLOW_ITEM_TYPE_IPV6,
1443 RTE_FLOW_ITEM_TYPE_UDP,
1444 RTE_FLOW_ITEM_TYPE_RAW,
1445 RTE_FLOW_ITEM_TYPE_RAW,
1446 RTE_FLOW_ITEM_TYPE_RAW,
1447 RTE_FLOW_ITEM_TYPE_VF,
1448 RTE_FLOW_ITEM_TYPE_END,
1451 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_1_vf[] = {
1452 RTE_FLOW_ITEM_TYPE_ETH,
1453 RTE_FLOW_ITEM_TYPE_VLAN,
1454 RTE_FLOW_ITEM_TYPE_IPV6,
1455 RTE_FLOW_ITEM_TYPE_TCP,
1456 RTE_FLOW_ITEM_TYPE_RAW,
1457 RTE_FLOW_ITEM_TYPE_VF,
1458 RTE_FLOW_ITEM_TYPE_END,
1461 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_2_vf[] = {
1462 RTE_FLOW_ITEM_TYPE_ETH,
1463 RTE_FLOW_ITEM_TYPE_VLAN,
1464 RTE_FLOW_ITEM_TYPE_IPV6,
1465 RTE_FLOW_ITEM_TYPE_TCP,
1466 RTE_FLOW_ITEM_TYPE_RAW,
1467 RTE_FLOW_ITEM_TYPE_RAW,
1468 RTE_FLOW_ITEM_TYPE_VF,
1469 RTE_FLOW_ITEM_TYPE_END,
1472 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_tcp_raw_3_vf[] = {
1473 RTE_FLOW_ITEM_TYPE_ETH,
1474 RTE_FLOW_ITEM_TYPE_VLAN,
1475 RTE_FLOW_ITEM_TYPE_IPV6,
1476 RTE_FLOW_ITEM_TYPE_TCP,
1477 RTE_FLOW_ITEM_TYPE_RAW,
1478 RTE_FLOW_ITEM_TYPE_RAW,
1479 RTE_FLOW_ITEM_TYPE_RAW,
1480 RTE_FLOW_ITEM_TYPE_VF,
1481 RTE_FLOW_ITEM_TYPE_END,
1484 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_1_vf[] = {
1485 RTE_FLOW_ITEM_TYPE_ETH,
1486 RTE_FLOW_ITEM_TYPE_VLAN,
1487 RTE_FLOW_ITEM_TYPE_IPV6,
1488 RTE_FLOW_ITEM_TYPE_SCTP,
1489 RTE_FLOW_ITEM_TYPE_RAW,
1490 RTE_FLOW_ITEM_TYPE_VF,
1491 RTE_FLOW_ITEM_TYPE_END,
1494 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_2_vf[] = {
1495 RTE_FLOW_ITEM_TYPE_ETH,
1496 RTE_FLOW_ITEM_TYPE_VLAN,
1497 RTE_FLOW_ITEM_TYPE_IPV6,
1498 RTE_FLOW_ITEM_TYPE_SCTP,
1499 RTE_FLOW_ITEM_TYPE_RAW,
1500 RTE_FLOW_ITEM_TYPE_RAW,
1501 RTE_FLOW_ITEM_TYPE_VF,
1502 RTE_FLOW_ITEM_TYPE_END,
1505 static enum rte_flow_item_type pattern_fdir_vlan_ipv6_sctp_raw_3_vf[] = {
1506 RTE_FLOW_ITEM_TYPE_ETH,
1507 RTE_FLOW_ITEM_TYPE_VLAN,
1508 RTE_FLOW_ITEM_TYPE_IPV6,
1509 RTE_FLOW_ITEM_TYPE_SCTP,
1510 RTE_FLOW_ITEM_TYPE_RAW,
1511 RTE_FLOW_ITEM_TYPE_RAW,
1512 RTE_FLOW_ITEM_TYPE_RAW,
1513 RTE_FLOW_ITEM_TYPE_VF,
1514 RTE_FLOW_ITEM_TYPE_END,
1517 /* Pattern matched tunnel filter */
1518 static enum rte_flow_item_type pattern_vxlan_1[] = {
1519 RTE_FLOW_ITEM_TYPE_ETH,
1520 RTE_FLOW_ITEM_TYPE_IPV4,
1521 RTE_FLOW_ITEM_TYPE_UDP,
1522 RTE_FLOW_ITEM_TYPE_VXLAN,
1523 RTE_FLOW_ITEM_TYPE_ETH,
1524 RTE_FLOW_ITEM_TYPE_END,
1527 static enum rte_flow_item_type pattern_vxlan_2[] = {
1528 RTE_FLOW_ITEM_TYPE_ETH,
1529 RTE_FLOW_ITEM_TYPE_IPV6,
1530 RTE_FLOW_ITEM_TYPE_UDP,
1531 RTE_FLOW_ITEM_TYPE_VXLAN,
1532 RTE_FLOW_ITEM_TYPE_ETH,
1533 RTE_FLOW_ITEM_TYPE_END,
1536 static enum rte_flow_item_type pattern_vxlan_3[] = {
1537 RTE_FLOW_ITEM_TYPE_ETH,
1538 RTE_FLOW_ITEM_TYPE_IPV4,
1539 RTE_FLOW_ITEM_TYPE_UDP,
1540 RTE_FLOW_ITEM_TYPE_VXLAN,
1541 RTE_FLOW_ITEM_TYPE_ETH,
1542 RTE_FLOW_ITEM_TYPE_VLAN,
1543 RTE_FLOW_ITEM_TYPE_END,
1546 static enum rte_flow_item_type pattern_vxlan_4[] = {
1547 RTE_FLOW_ITEM_TYPE_ETH,
1548 RTE_FLOW_ITEM_TYPE_IPV6,
1549 RTE_FLOW_ITEM_TYPE_UDP,
1550 RTE_FLOW_ITEM_TYPE_VXLAN,
1551 RTE_FLOW_ITEM_TYPE_ETH,
1552 RTE_FLOW_ITEM_TYPE_VLAN,
1553 RTE_FLOW_ITEM_TYPE_END,
1556 static enum rte_flow_item_type pattern_nvgre_1[] = {
1557 RTE_FLOW_ITEM_TYPE_ETH,
1558 RTE_FLOW_ITEM_TYPE_IPV4,
1559 RTE_FLOW_ITEM_TYPE_NVGRE,
1560 RTE_FLOW_ITEM_TYPE_ETH,
1561 RTE_FLOW_ITEM_TYPE_END,
1564 static enum rte_flow_item_type pattern_nvgre_2[] = {
1565 RTE_FLOW_ITEM_TYPE_ETH,
1566 RTE_FLOW_ITEM_TYPE_IPV6,
1567 RTE_FLOW_ITEM_TYPE_NVGRE,
1568 RTE_FLOW_ITEM_TYPE_ETH,
1569 RTE_FLOW_ITEM_TYPE_END,
1572 static enum rte_flow_item_type pattern_nvgre_3[] = {
1573 RTE_FLOW_ITEM_TYPE_ETH,
1574 RTE_FLOW_ITEM_TYPE_IPV4,
1575 RTE_FLOW_ITEM_TYPE_NVGRE,
1576 RTE_FLOW_ITEM_TYPE_ETH,
1577 RTE_FLOW_ITEM_TYPE_VLAN,
1578 RTE_FLOW_ITEM_TYPE_END,
1581 static enum rte_flow_item_type pattern_nvgre_4[] = {
1582 RTE_FLOW_ITEM_TYPE_ETH,
1583 RTE_FLOW_ITEM_TYPE_IPV6,
1584 RTE_FLOW_ITEM_TYPE_NVGRE,
1585 RTE_FLOW_ITEM_TYPE_ETH,
1586 RTE_FLOW_ITEM_TYPE_VLAN,
1587 RTE_FLOW_ITEM_TYPE_END,
1590 static enum rte_flow_item_type pattern_mpls_1[] = {
1591 RTE_FLOW_ITEM_TYPE_ETH,
1592 RTE_FLOW_ITEM_TYPE_IPV4,
1593 RTE_FLOW_ITEM_TYPE_UDP,
1594 RTE_FLOW_ITEM_TYPE_MPLS,
1595 RTE_FLOW_ITEM_TYPE_END,
1598 static enum rte_flow_item_type pattern_mpls_2[] = {
1599 RTE_FLOW_ITEM_TYPE_ETH,
1600 RTE_FLOW_ITEM_TYPE_IPV6,
1601 RTE_FLOW_ITEM_TYPE_UDP,
1602 RTE_FLOW_ITEM_TYPE_MPLS,
1603 RTE_FLOW_ITEM_TYPE_END,
1606 static enum rte_flow_item_type pattern_mpls_3[] = {
1607 RTE_FLOW_ITEM_TYPE_ETH,
1608 RTE_FLOW_ITEM_TYPE_IPV4,
1609 RTE_FLOW_ITEM_TYPE_GRE,
1610 RTE_FLOW_ITEM_TYPE_MPLS,
1611 RTE_FLOW_ITEM_TYPE_END,
1614 static enum rte_flow_item_type pattern_mpls_4[] = {
1615 RTE_FLOW_ITEM_TYPE_ETH,
1616 RTE_FLOW_ITEM_TYPE_IPV6,
1617 RTE_FLOW_ITEM_TYPE_GRE,
1618 RTE_FLOW_ITEM_TYPE_MPLS,
1619 RTE_FLOW_ITEM_TYPE_END,
1622 static enum rte_flow_item_type pattern_qinq_1[] = {
1623 RTE_FLOW_ITEM_TYPE_ETH,
1624 RTE_FLOW_ITEM_TYPE_VLAN,
1625 RTE_FLOW_ITEM_TYPE_VLAN,
1626 RTE_FLOW_ITEM_TYPE_END,
1629 static enum rte_flow_item_type pattern_fdir_ipv4_l2tpv3oip[] = {
1630 RTE_FLOW_ITEM_TYPE_ETH,
1631 RTE_FLOW_ITEM_TYPE_IPV4,
1632 RTE_FLOW_ITEM_TYPE_L2TPV3OIP,
1633 RTE_FLOW_ITEM_TYPE_END,
1636 static enum rte_flow_item_type pattern_fdir_ipv6_l2tpv3oip[] = {
1637 RTE_FLOW_ITEM_TYPE_ETH,
1638 RTE_FLOW_ITEM_TYPE_IPV6,
1639 RTE_FLOW_ITEM_TYPE_L2TPV3OIP,
1640 RTE_FLOW_ITEM_TYPE_END,
1643 static enum rte_flow_item_type pattern_fdir_ipv4_esp[] = {
1644 RTE_FLOW_ITEM_TYPE_ETH,
1645 RTE_FLOW_ITEM_TYPE_IPV4,
1646 RTE_FLOW_ITEM_TYPE_ESP,
1647 RTE_FLOW_ITEM_TYPE_END,
1650 static enum rte_flow_item_type pattern_fdir_ipv6_esp[] = {
1651 RTE_FLOW_ITEM_TYPE_ETH,
1652 RTE_FLOW_ITEM_TYPE_IPV6,
1653 RTE_FLOW_ITEM_TYPE_ESP,
1654 RTE_FLOW_ITEM_TYPE_END,
1657 static enum rte_flow_item_type pattern_fdir_ipv4_udp_esp[] = {
1658 RTE_FLOW_ITEM_TYPE_ETH,
1659 RTE_FLOW_ITEM_TYPE_IPV4,
1660 RTE_FLOW_ITEM_TYPE_UDP,
1661 RTE_FLOW_ITEM_TYPE_ESP,
1662 RTE_FLOW_ITEM_TYPE_END,
1665 static enum rte_flow_item_type pattern_fdir_ipv6_udp_esp[] = {
1666 RTE_FLOW_ITEM_TYPE_ETH,
1667 RTE_FLOW_ITEM_TYPE_IPV6,
1668 RTE_FLOW_ITEM_TYPE_UDP,
1669 RTE_FLOW_ITEM_TYPE_ESP,
1670 RTE_FLOW_ITEM_TYPE_END,
1673 static struct i40e_valid_pattern i40e_supported_patterns[] = {
1675 { pattern_ethertype, i40e_flow_parse_ethertype_filter },
1676 /* FDIR - support default flow type without flexible payload*/
1677 { pattern_ethertype, i40e_flow_parse_fdir_filter },
1678 { pattern_fdir_ipv4, i40e_flow_parse_fdir_filter },
1679 { pattern_fdir_ipv4_udp, i40e_flow_parse_fdir_filter },
1680 { pattern_fdir_ipv4_tcp, i40e_flow_parse_fdir_filter },
1681 { pattern_fdir_ipv4_sctp, i40e_flow_parse_fdir_filter },
1682 { pattern_fdir_ipv4_gtpc, i40e_flow_parse_fdir_filter },
1683 { pattern_fdir_ipv4_gtpu, i40e_flow_parse_fdir_filter },
1684 { pattern_fdir_ipv4_gtpu_ipv4, i40e_flow_parse_fdir_filter },
1685 { pattern_fdir_ipv4_gtpu_ipv6, i40e_flow_parse_fdir_filter },
1686 { pattern_fdir_ipv4_esp, i40e_flow_parse_fdir_filter },
1687 { pattern_fdir_ipv4_udp_esp, i40e_flow_parse_fdir_filter },
1688 { pattern_fdir_ipv6, i40e_flow_parse_fdir_filter },
1689 { pattern_fdir_ipv6_udp, i40e_flow_parse_fdir_filter },
1690 { pattern_fdir_ipv6_tcp, i40e_flow_parse_fdir_filter },
1691 { pattern_fdir_ipv6_sctp, i40e_flow_parse_fdir_filter },
1692 { pattern_fdir_ipv6_gtpc, i40e_flow_parse_fdir_filter },
1693 { pattern_fdir_ipv6_gtpu, i40e_flow_parse_fdir_filter },
1694 { pattern_fdir_ipv6_gtpu_ipv4, i40e_flow_parse_fdir_filter },
1695 { pattern_fdir_ipv6_gtpu_ipv6, i40e_flow_parse_fdir_filter },
1696 { pattern_fdir_ipv6_esp, i40e_flow_parse_fdir_filter },
1697 { pattern_fdir_ipv6_udp_esp, i40e_flow_parse_fdir_filter },
1698 /* FDIR - support default flow type with flexible payload */
1699 { pattern_fdir_ethertype_raw_1, i40e_flow_parse_fdir_filter },
1700 { pattern_fdir_ethertype_raw_2, i40e_flow_parse_fdir_filter },
1701 { pattern_fdir_ethertype_raw_3, i40e_flow_parse_fdir_filter },
1702 { pattern_fdir_ipv4_raw_1, i40e_flow_parse_fdir_filter },
1703 { pattern_fdir_ipv4_raw_2, i40e_flow_parse_fdir_filter },
1704 { pattern_fdir_ipv4_raw_3, i40e_flow_parse_fdir_filter },
1705 { pattern_fdir_ipv4_udp_raw_1, i40e_flow_parse_fdir_filter },
1706 { pattern_fdir_ipv4_udp_raw_2, i40e_flow_parse_fdir_filter },
1707 { pattern_fdir_ipv4_udp_raw_3, i40e_flow_parse_fdir_filter },
1708 { pattern_fdir_ipv4_tcp_raw_1, i40e_flow_parse_fdir_filter },
1709 { pattern_fdir_ipv4_tcp_raw_2, i40e_flow_parse_fdir_filter },
1710 { pattern_fdir_ipv4_tcp_raw_3, i40e_flow_parse_fdir_filter },
1711 { pattern_fdir_ipv4_sctp_raw_1, i40e_flow_parse_fdir_filter },
1712 { pattern_fdir_ipv4_sctp_raw_2, i40e_flow_parse_fdir_filter },
1713 { pattern_fdir_ipv4_sctp_raw_3, i40e_flow_parse_fdir_filter },
1714 { pattern_fdir_ipv6_raw_1, i40e_flow_parse_fdir_filter },
1715 { pattern_fdir_ipv6_raw_2, i40e_flow_parse_fdir_filter },
1716 { pattern_fdir_ipv6_raw_3, i40e_flow_parse_fdir_filter },
1717 { pattern_fdir_ipv6_udp_raw_1, i40e_flow_parse_fdir_filter },
1718 { pattern_fdir_ipv6_udp_raw_2, i40e_flow_parse_fdir_filter },
1719 { pattern_fdir_ipv6_udp_raw_3, i40e_flow_parse_fdir_filter },
1720 { pattern_fdir_ipv6_tcp_raw_1, i40e_flow_parse_fdir_filter },
1721 { pattern_fdir_ipv6_tcp_raw_2, i40e_flow_parse_fdir_filter },
1722 { pattern_fdir_ipv6_tcp_raw_3, i40e_flow_parse_fdir_filter },
1723 { pattern_fdir_ipv6_sctp_raw_1, i40e_flow_parse_fdir_filter },
1724 { pattern_fdir_ipv6_sctp_raw_2, i40e_flow_parse_fdir_filter },
1725 { pattern_fdir_ipv6_sctp_raw_3, i40e_flow_parse_fdir_filter },
1726 /* FDIR - support single vlan input set */
1727 { pattern_fdir_ethertype_vlan, i40e_flow_parse_fdir_filter },
1728 { pattern_fdir_vlan_ipv4, i40e_flow_parse_fdir_filter },
1729 { pattern_fdir_vlan_ipv4_udp, i40e_flow_parse_fdir_filter },
1730 { pattern_fdir_vlan_ipv4_tcp, i40e_flow_parse_fdir_filter },
1731 { pattern_fdir_vlan_ipv4_sctp, i40e_flow_parse_fdir_filter },
1732 { pattern_fdir_vlan_ipv6, i40e_flow_parse_fdir_filter },
1733 { pattern_fdir_vlan_ipv6_udp, i40e_flow_parse_fdir_filter },
1734 { pattern_fdir_vlan_ipv6_tcp, i40e_flow_parse_fdir_filter },
1735 { pattern_fdir_vlan_ipv6_sctp, i40e_flow_parse_fdir_filter },
1736 { pattern_fdir_ethertype_vlan_raw_1, i40e_flow_parse_fdir_filter },
1737 { pattern_fdir_ethertype_vlan_raw_2, i40e_flow_parse_fdir_filter },
1738 { pattern_fdir_ethertype_vlan_raw_3, i40e_flow_parse_fdir_filter },
1739 { pattern_fdir_vlan_ipv4_raw_1, i40e_flow_parse_fdir_filter },
1740 { pattern_fdir_vlan_ipv4_raw_2, i40e_flow_parse_fdir_filter },
1741 { pattern_fdir_vlan_ipv4_raw_3, i40e_flow_parse_fdir_filter },
1742 { pattern_fdir_vlan_ipv4_udp_raw_1, i40e_flow_parse_fdir_filter },
1743 { pattern_fdir_vlan_ipv4_udp_raw_2, i40e_flow_parse_fdir_filter },
1744 { pattern_fdir_vlan_ipv4_udp_raw_3, i40e_flow_parse_fdir_filter },
1745 { pattern_fdir_vlan_ipv4_tcp_raw_1, i40e_flow_parse_fdir_filter },
1746 { pattern_fdir_vlan_ipv4_tcp_raw_2, i40e_flow_parse_fdir_filter },
1747 { pattern_fdir_vlan_ipv4_tcp_raw_3, i40e_flow_parse_fdir_filter },
1748 { pattern_fdir_vlan_ipv4_sctp_raw_1, i40e_flow_parse_fdir_filter },
1749 { pattern_fdir_vlan_ipv4_sctp_raw_2, i40e_flow_parse_fdir_filter },
1750 { pattern_fdir_vlan_ipv4_sctp_raw_3, i40e_flow_parse_fdir_filter },
1751 { pattern_fdir_vlan_ipv6_raw_1, i40e_flow_parse_fdir_filter },
1752 { pattern_fdir_vlan_ipv6_raw_2, i40e_flow_parse_fdir_filter },
1753 { pattern_fdir_vlan_ipv6_raw_3, i40e_flow_parse_fdir_filter },
1754 { pattern_fdir_vlan_ipv6_udp_raw_1, i40e_flow_parse_fdir_filter },
1755 { pattern_fdir_vlan_ipv6_udp_raw_2, i40e_flow_parse_fdir_filter },
1756 { pattern_fdir_vlan_ipv6_udp_raw_3, i40e_flow_parse_fdir_filter },
1757 { pattern_fdir_vlan_ipv6_tcp_raw_1, i40e_flow_parse_fdir_filter },
1758 { pattern_fdir_vlan_ipv6_tcp_raw_2, i40e_flow_parse_fdir_filter },
1759 { pattern_fdir_vlan_ipv6_tcp_raw_3, i40e_flow_parse_fdir_filter },
1760 { pattern_fdir_vlan_ipv6_sctp_raw_1, i40e_flow_parse_fdir_filter },
1761 { pattern_fdir_vlan_ipv6_sctp_raw_2, i40e_flow_parse_fdir_filter },
1762 { pattern_fdir_vlan_ipv6_sctp_raw_3, i40e_flow_parse_fdir_filter },
1763 /* FDIR - support VF item */
1764 { pattern_fdir_ipv4_vf, i40e_flow_parse_fdir_filter },
1765 { pattern_fdir_ipv4_udp_vf, i40e_flow_parse_fdir_filter },
1766 { pattern_fdir_ipv4_tcp_vf, i40e_flow_parse_fdir_filter },
1767 { pattern_fdir_ipv4_sctp_vf, i40e_flow_parse_fdir_filter },
1768 { pattern_fdir_ipv6_vf, i40e_flow_parse_fdir_filter },
1769 { pattern_fdir_ipv6_udp_vf, i40e_flow_parse_fdir_filter },
1770 { pattern_fdir_ipv6_tcp_vf, i40e_flow_parse_fdir_filter },
1771 { pattern_fdir_ipv6_sctp_vf, i40e_flow_parse_fdir_filter },
1772 { pattern_fdir_ethertype_raw_1_vf, i40e_flow_parse_fdir_filter },
1773 { pattern_fdir_ethertype_raw_2_vf, i40e_flow_parse_fdir_filter },
1774 { pattern_fdir_ethertype_raw_3_vf, i40e_flow_parse_fdir_filter },
1775 { pattern_fdir_ipv4_raw_1_vf, i40e_flow_parse_fdir_filter },
1776 { pattern_fdir_ipv4_raw_2_vf, i40e_flow_parse_fdir_filter },
1777 { pattern_fdir_ipv4_raw_3_vf, i40e_flow_parse_fdir_filter },
1778 { pattern_fdir_ipv4_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1779 { pattern_fdir_ipv4_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1780 { pattern_fdir_ipv4_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1781 { pattern_fdir_ipv4_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1782 { pattern_fdir_ipv4_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1783 { pattern_fdir_ipv4_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1784 { pattern_fdir_ipv4_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1785 { pattern_fdir_ipv4_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1786 { pattern_fdir_ipv4_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1787 { pattern_fdir_ipv6_raw_1_vf, i40e_flow_parse_fdir_filter },
1788 { pattern_fdir_ipv6_raw_2_vf, i40e_flow_parse_fdir_filter },
1789 { pattern_fdir_ipv6_raw_3_vf, i40e_flow_parse_fdir_filter },
1790 { pattern_fdir_ipv6_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1791 { pattern_fdir_ipv6_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1792 { pattern_fdir_ipv6_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1793 { pattern_fdir_ipv6_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1794 { pattern_fdir_ipv6_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1795 { pattern_fdir_ipv6_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1796 { pattern_fdir_ipv6_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1797 { pattern_fdir_ipv6_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1798 { pattern_fdir_ipv6_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1799 { pattern_fdir_ethertype_vlan_vf, i40e_flow_parse_fdir_filter },
1800 { pattern_fdir_vlan_ipv4_vf, i40e_flow_parse_fdir_filter },
1801 { pattern_fdir_vlan_ipv4_udp_vf, i40e_flow_parse_fdir_filter },
1802 { pattern_fdir_vlan_ipv4_tcp_vf, i40e_flow_parse_fdir_filter },
1803 { pattern_fdir_vlan_ipv4_sctp_vf, i40e_flow_parse_fdir_filter },
1804 { pattern_fdir_vlan_ipv6_vf, i40e_flow_parse_fdir_filter },
1805 { pattern_fdir_vlan_ipv6_udp_vf, i40e_flow_parse_fdir_filter },
1806 { pattern_fdir_vlan_ipv6_tcp_vf, i40e_flow_parse_fdir_filter },
1807 { pattern_fdir_vlan_ipv6_sctp_vf, i40e_flow_parse_fdir_filter },
1808 { pattern_fdir_ethertype_vlan_raw_1_vf, i40e_flow_parse_fdir_filter },
1809 { pattern_fdir_ethertype_vlan_raw_2_vf, i40e_flow_parse_fdir_filter },
1810 { pattern_fdir_ethertype_vlan_raw_3_vf, i40e_flow_parse_fdir_filter },
1811 { pattern_fdir_vlan_ipv4_raw_1_vf, i40e_flow_parse_fdir_filter },
1812 { pattern_fdir_vlan_ipv4_raw_2_vf, i40e_flow_parse_fdir_filter },
1813 { pattern_fdir_vlan_ipv4_raw_3_vf, i40e_flow_parse_fdir_filter },
1814 { pattern_fdir_vlan_ipv4_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1815 { pattern_fdir_vlan_ipv4_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1816 { pattern_fdir_vlan_ipv4_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1817 { pattern_fdir_vlan_ipv4_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1818 { pattern_fdir_vlan_ipv4_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1819 { pattern_fdir_vlan_ipv4_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1820 { pattern_fdir_vlan_ipv4_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1821 { pattern_fdir_vlan_ipv4_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1822 { pattern_fdir_vlan_ipv4_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1823 { pattern_fdir_vlan_ipv6_raw_1_vf, i40e_flow_parse_fdir_filter },
1824 { pattern_fdir_vlan_ipv6_raw_2_vf, i40e_flow_parse_fdir_filter },
1825 { pattern_fdir_vlan_ipv6_raw_3_vf, i40e_flow_parse_fdir_filter },
1826 { pattern_fdir_vlan_ipv6_udp_raw_1_vf, i40e_flow_parse_fdir_filter },
1827 { pattern_fdir_vlan_ipv6_udp_raw_2_vf, i40e_flow_parse_fdir_filter },
1828 { pattern_fdir_vlan_ipv6_udp_raw_3_vf, i40e_flow_parse_fdir_filter },
1829 { pattern_fdir_vlan_ipv6_tcp_raw_1_vf, i40e_flow_parse_fdir_filter },
1830 { pattern_fdir_vlan_ipv6_tcp_raw_2_vf, i40e_flow_parse_fdir_filter },
1831 { pattern_fdir_vlan_ipv6_tcp_raw_3_vf, i40e_flow_parse_fdir_filter },
1832 { pattern_fdir_vlan_ipv6_sctp_raw_1_vf, i40e_flow_parse_fdir_filter },
1833 { pattern_fdir_vlan_ipv6_sctp_raw_2_vf, i40e_flow_parse_fdir_filter },
1834 { pattern_fdir_vlan_ipv6_sctp_raw_3_vf, i40e_flow_parse_fdir_filter },
1836 { pattern_vxlan_1, i40e_flow_parse_vxlan_filter },
1837 { pattern_vxlan_2, i40e_flow_parse_vxlan_filter },
1838 { pattern_vxlan_3, i40e_flow_parse_vxlan_filter },
1839 { pattern_vxlan_4, i40e_flow_parse_vxlan_filter },
1841 { pattern_nvgre_1, i40e_flow_parse_nvgre_filter },
1842 { pattern_nvgre_2, i40e_flow_parse_nvgre_filter },
1843 { pattern_nvgre_3, i40e_flow_parse_nvgre_filter },
1844 { pattern_nvgre_4, i40e_flow_parse_nvgre_filter },
1845 /* MPLSoUDP & MPLSoGRE */
1846 { pattern_mpls_1, i40e_flow_parse_mpls_filter },
1847 { pattern_mpls_2, i40e_flow_parse_mpls_filter },
1848 { pattern_mpls_3, i40e_flow_parse_mpls_filter },
1849 { pattern_mpls_4, i40e_flow_parse_mpls_filter },
1851 { pattern_fdir_ipv4_gtpc, i40e_flow_parse_gtp_filter },
1852 { pattern_fdir_ipv4_gtpu, i40e_flow_parse_gtp_filter },
1853 { pattern_fdir_ipv6_gtpc, i40e_flow_parse_gtp_filter },
1854 { pattern_fdir_ipv6_gtpu, i40e_flow_parse_gtp_filter },
1856 { pattern_qinq_1, i40e_flow_parse_qinq_filter },
1857 /* L2TPv3 over IP */
1858 { pattern_fdir_ipv4_l2tpv3oip, i40e_flow_parse_fdir_filter },
1859 { pattern_fdir_ipv6_l2tpv3oip, i40e_flow_parse_fdir_filter },
1861 { pattern_fdir_ipv4_udp, i40e_flow_parse_l4_cloud_filter },
1862 { pattern_fdir_ipv4_tcp, i40e_flow_parse_l4_cloud_filter },
1863 { pattern_fdir_ipv4_sctp, i40e_flow_parse_l4_cloud_filter },
1864 { pattern_fdir_ipv6_udp, i40e_flow_parse_l4_cloud_filter },
1865 { pattern_fdir_ipv6_tcp, i40e_flow_parse_l4_cloud_filter },
1866 { pattern_fdir_ipv6_sctp, i40e_flow_parse_l4_cloud_filter },
1869 #define NEXT_ITEM_OF_ACTION(act, actions, index) \
1871 act = actions + index; \
1872 while (act->type == RTE_FLOW_ACTION_TYPE_VOID) { \
1874 act = actions + index; \
1878 /* Find the first VOID or non-VOID item pointer */
1879 static const struct rte_flow_item *
1880 i40e_find_first_item(const struct rte_flow_item *item, bool is_void)
1884 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
1886 is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
1888 is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
1896 /* Skip all VOID items of the pattern */
1898 i40e_pattern_skip_void_item(struct rte_flow_item *items,
1899 const struct rte_flow_item *pattern)
1901 uint32_t cpy_count = 0;
1902 const struct rte_flow_item *pb = pattern, *pe = pattern;
1905 /* Find a non-void item first */
1906 pb = i40e_find_first_item(pb, false);
1907 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
1912 /* Find a void item */
1913 pe = i40e_find_first_item(pb + 1, true);
1915 cpy_count = pe - pb;
1916 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
1920 if (pe->type == RTE_FLOW_ITEM_TYPE_END) {
1927 /* Copy the END item. */
1928 rte_memcpy(items, pe, sizeof(struct rte_flow_item));
1931 /* Check if the pattern matches a supported item type array */
1933 i40e_match_pattern(enum rte_flow_item_type *item_array,
1934 struct rte_flow_item *pattern)
1936 struct rte_flow_item *item = pattern;
1938 while ((*item_array == item->type) &&
1939 (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
1944 return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
1945 item->type == RTE_FLOW_ITEM_TYPE_END);
1948 /* Find if there's parse filter function matched */
1949 static parse_filter_t
1950 i40e_find_parse_filter_func(struct rte_flow_item *pattern, uint32_t *idx)
1952 parse_filter_t parse_filter = NULL;
1955 for (; i < RTE_DIM(i40e_supported_patterns); i++) {
1956 if (i40e_match_pattern(i40e_supported_patterns[i].items,
1958 parse_filter = i40e_supported_patterns[i].parse_filter;
1965 return parse_filter;
1968 /* Parse attributes */
1970 i40e_flow_parse_attr(const struct rte_flow_attr *attr,
1971 struct rte_flow_error *error)
1973 /* Must be input direction */
1974 if (!attr->ingress) {
1975 rte_flow_error_set(error, EINVAL,
1976 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1977 attr, "Only support ingress.");
1983 rte_flow_error_set(error, EINVAL,
1984 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1985 attr, "Not support egress.");
1990 if (attr->priority) {
1991 rte_flow_error_set(error, EINVAL,
1992 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1993 attr, "Not support priority.");
1999 rte_flow_error_set(error, EINVAL,
2000 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
2001 attr, "Not support group.");
2009 i40e_get_outer_vlan(struct rte_eth_dev *dev)
2011 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
2012 int qinq = dev->data->dev_conf.rxmode.offloads &
2013 DEV_RX_OFFLOAD_VLAN_EXTEND;
2023 i40e_aq_debug_read_register(hw, I40E_GL_SWT_L2TAGCTRL(reg_id),
2026 tpid = (reg_r >> I40E_GL_SWT_L2TAGCTRL_ETHERTYPE_SHIFT) & 0xFFFF;
2031 /* 1. Last in item should be NULL as range is not supported.
2032 * 2. Supported filter types: MAC_ETHTYPE and ETHTYPE.
2033 * 3. SRC mac_addr mask should be 00:00:00:00:00:00.
2034 * 4. DST mac_addr mask should be 00:00:00:00:00:00 or
2036 * 5. Ether_type mask should be 0xFFFF.
2039 i40e_flow_parse_ethertype_pattern(struct rte_eth_dev *dev,
2040 const struct rte_flow_item *pattern,
2041 struct rte_flow_error *error,
2042 struct rte_eth_ethertype_filter *filter)
2044 const struct rte_flow_item *item = pattern;
2045 const struct rte_flow_item_eth *eth_spec;
2046 const struct rte_flow_item_eth *eth_mask;
2047 enum rte_flow_item_type item_type;
2048 uint16_t outer_tpid;
2050 outer_tpid = i40e_get_outer_vlan(dev);
2052 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
2054 rte_flow_error_set(error, EINVAL,
2055 RTE_FLOW_ERROR_TYPE_ITEM,
2057 "Not support range");
2060 item_type = item->type;
2061 switch (item_type) {
2062 case RTE_FLOW_ITEM_TYPE_ETH:
2063 eth_spec = item->spec;
2064 eth_mask = item->mask;
2065 /* Get the MAC info. */
2066 if (!eth_spec || !eth_mask) {
2067 rte_flow_error_set(error, EINVAL,
2068 RTE_FLOW_ERROR_TYPE_ITEM,
2070 "NULL ETH spec/mask");
2074 /* Mask bits of source MAC address must be full of 0.
2075 * Mask bits of destination MAC address must be full
2076 * of 1 or full of 0.
2078 if (!rte_is_zero_ether_addr(ð_mask->src) ||
2079 (!rte_is_zero_ether_addr(ð_mask->dst) &&
2080 !rte_is_broadcast_ether_addr(ð_mask->dst))) {
2081 rte_flow_error_set(error, EINVAL,
2082 RTE_FLOW_ERROR_TYPE_ITEM,
2084 "Invalid MAC_addr mask");
2088 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
2089 rte_flow_error_set(error, EINVAL,
2090 RTE_FLOW_ERROR_TYPE_ITEM,
2092 "Invalid ethertype mask");
2096 /* If mask bits of destination MAC address
2097 * are full of 1, set RTE_ETHTYPE_FLAGS_MAC.
2099 if (rte_is_broadcast_ether_addr(ð_mask->dst)) {
2100 filter->mac_addr = eth_spec->dst;
2101 filter->flags |= RTE_ETHTYPE_FLAGS_MAC;
2103 filter->flags &= ~RTE_ETHTYPE_FLAGS_MAC;
2105 filter->ether_type = rte_be_to_cpu_16(eth_spec->type);
2107 if (filter->ether_type == RTE_ETHER_TYPE_IPV4 ||
2108 filter->ether_type == RTE_ETHER_TYPE_IPV6 ||
2109 filter->ether_type == RTE_ETHER_TYPE_LLDP ||
2110 filter->ether_type == outer_tpid) {
2111 rte_flow_error_set(error, EINVAL,
2112 RTE_FLOW_ERROR_TYPE_ITEM,
2114 "Unsupported ether_type in"
2115 " control packet filter.");
2127 /* Ethertype action only supports QUEUE or DROP. */
2129 i40e_flow_parse_ethertype_action(struct rte_eth_dev *dev,
2130 const struct rte_flow_action *actions,
2131 struct rte_flow_error *error,
2132 struct rte_eth_ethertype_filter *filter)
2134 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2135 const struct rte_flow_action *act;
2136 const struct rte_flow_action_queue *act_q;
2139 /* Check if the first non-void action is QUEUE or DROP. */
2140 NEXT_ITEM_OF_ACTION(act, actions, index);
2141 if (act->type != RTE_FLOW_ACTION_TYPE_QUEUE &&
2142 act->type != RTE_FLOW_ACTION_TYPE_DROP) {
2143 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
2144 act, "Not supported action.");
2148 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
2150 filter->queue = act_q->index;
2151 if (filter->queue >= pf->dev_data->nb_rx_queues) {
2152 rte_flow_error_set(error, EINVAL,
2153 RTE_FLOW_ERROR_TYPE_ACTION,
2154 act, "Invalid queue ID for"
2155 " ethertype_filter.");
2159 filter->flags |= RTE_ETHTYPE_FLAGS_DROP;
2162 /* Check if the next non-void item is END */
2164 NEXT_ITEM_OF_ACTION(act, actions, index);
2165 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
2166 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
2167 act, "Not supported action.");
2175 i40e_flow_parse_ethertype_filter(struct rte_eth_dev *dev,
2176 const struct rte_flow_attr *attr,
2177 const struct rte_flow_item pattern[],
2178 const struct rte_flow_action actions[],
2179 struct rte_flow_error *error,
2180 union i40e_filter_t *filter)
2182 struct rte_eth_ethertype_filter *ethertype_filter =
2183 &filter->ethertype_filter;
2186 ret = i40e_flow_parse_ethertype_pattern(dev, pattern, error,
2191 ret = i40e_flow_parse_ethertype_action(dev, actions, error,
2196 ret = i40e_flow_parse_attr(attr, error);
2200 cons_filter_type = RTE_ETH_FILTER_ETHERTYPE;
2206 i40e_flow_check_raw_item(const struct rte_flow_item *item,
2207 const struct rte_flow_item_raw *raw_spec,
2208 struct rte_flow_error *error)
2210 if (!raw_spec->relative) {
2211 rte_flow_error_set(error, EINVAL,
2212 RTE_FLOW_ERROR_TYPE_ITEM,
2214 "Relative should be 1.");
2218 if (raw_spec->offset % sizeof(uint16_t)) {
2219 rte_flow_error_set(error, EINVAL,
2220 RTE_FLOW_ERROR_TYPE_ITEM,
2222 "Offset should be even.");
2226 if (raw_spec->search || raw_spec->limit) {
2227 rte_flow_error_set(error, EINVAL,
2228 RTE_FLOW_ERROR_TYPE_ITEM,
2230 "search or limit is not supported.");
2234 if (raw_spec->offset < 0) {
2235 rte_flow_error_set(error, EINVAL,
2236 RTE_FLOW_ERROR_TYPE_ITEM,
2238 "Offset should be non-negative.");
2245 i40e_flow_store_flex_pit(struct i40e_pf *pf,
2246 struct i40e_fdir_flex_pit *flex_pit,
2247 enum i40e_flxpld_layer_idx layer_idx,
2252 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + raw_id;
2253 /* Check if the configuration is conflicted */
2254 if (pf->fdir.flex_pit_flag[layer_idx] &&
2255 (pf->fdir.flex_set[field_idx].src_offset != flex_pit->src_offset ||
2256 pf->fdir.flex_set[field_idx].size != flex_pit->size ||
2257 pf->fdir.flex_set[field_idx].dst_offset != flex_pit->dst_offset))
2260 /* Check if the configuration exists. */
2261 if (pf->fdir.flex_pit_flag[layer_idx] &&
2262 (pf->fdir.flex_set[field_idx].src_offset == flex_pit->src_offset &&
2263 pf->fdir.flex_set[field_idx].size == flex_pit->size &&
2264 pf->fdir.flex_set[field_idx].dst_offset == flex_pit->dst_offset))
2267 pf->fdir.flex_set[field_idx].src_offset =
2268 flex_pit->src_offset;
2269 pf->fdir.flex_set[field_idx].size =
2271 pf->fdir.flex_set[field_idx].dst_offset =
2272 flex_pit->dst_offset;
2278 i40e_flow_store_flex_mask(struct i40e_pf *pf,
2279 enum i40e_filter_pctype pctype,
2282 struct i40e_fdir_flex_mask flex_mask;
2284 uint8_t i, nb_bitmask = 0;
2286 memset(&flex_mask, 0, sizeof(struct i40e_fdir_flex_mask));
2287 for (i = 0; i < I40E_FDIR_MAX_FLEX_LEN; i += sizeof(uint16_t)) {
2288 mask_tmp = I40E_WORD(mask[i], mask[i + 1]);
2290 flex_mask.word_mask |=
2291 I40E_FLEX_WORD_MASK(i / sizeof(uint16_t));
2292 if (mask_tmp != UINT16_MAX) {
2293 flex_mask.bitmask[nb_bitmask].mask = ~mask_tmp;
2294 flex_mask.bitmask[nb_bitmask].offset =
2295 i / sizeof(uint16_t);
2297 if (nb_bitmask > I40E_FDIR_BITMASK_NUM_WORD)
2302 flex_mask.nb_bitmask = nb_bitmask;
2304 if (pf->fdir.flex_mask_flag[pctype] &&
2305 (memcmp(&flex_mask, &pf->fdir.flex_mask[pctype],
2306 sizeof(struct i40e_fdir_flex_mask))))
2308 else if (pf->fdir.flex_mask_flag[pctype] &&
2309 !(memcmp(&flex_mask, &pf->fdir.flex_mask[pctype],
2310 sizeof(struct i40e_fdir_flex_mask))))
2313 memcpy(&pf->fdir.flex_mask[pctype], &flex_mask,
2314 sizeof(struct i40e_fdir_flex_mask));
2319 i40e_flow_set_fdir_flex_pit(struct i40e_pf *pf,
2320 enum i40e_flxpld_layer_idx layer_idx,
2323 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2324 uint32_t flx_pit, flx_ort;
2326 uint16_t min_next_off = 0; /* in words */
2330 flx_ort = (1 << I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT) |
2331 (raw_id << I40E_GLQF_ORT_FIELD_CNT_SHIFT) |
2332 (layer_idx * I40E_MAX_FLXPLD_FIED);
2333 I40E_WRITE_GLB_REG(hw, I40E_GLQF_ORT(33 + layer_idx), flx_ort);
2337 for (i = 0; i < raw_id; i++) {
2338 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
2339 flx_pit = MK_FLX_PIT(pf->fdir.flex_set[field_idx].src_offset,
2340 pf->fdir.flex_set[field_idx].size,
2341 pf->fdir.flex_set[field_idx].dst_offset);
2343 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
2344 min_next_off = pf->fdir.flex_set[field_idx].src_offset +
2345 pf->fdir.flex_set[field_idx].size;
2348 for (; i < I40E_MAX_FLXPLD_FIED; i++) {
2349 /* set the non-used register obeying register's constrain */
2350 field_idx = layer_idx * I40E_MAX_FLXPLD_FIED + i;
2351 flx_pit = MK_FLX_PIT(min_next_off, NONUSE_FLX_PIT_FSIZE,
2352 NONUSE_FLX_PIT_DEST_OFF);
2353 I40E_WRITE_REG(hw, I40E_PRTQF_FLX_PIT(field_idx), flx_pit);
2357 pf->fdir.flex_pit_flag[layer_idx] = 1;
2361 i40e_flow_set_fdir_flex_msk(struct i40e_pf *pf,
2362 enum i40e_filter_pctype pctype)
2364 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2365 struct i40e_fdir_flex_mask *flex_mask;
2366 uint32_t flxinset, fd_mask;
2370 flex_mask = &pf->fdir.flex_mask[pctype];
2371 flxinset = (flex_mask->word_mask <<
2372 I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) &
2373 I40E_PRTQF_FD_FLXINSET_INSET_MASK;
2374 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_FLXINSET(pctype), flxinset);
2376 for (i = 0; i < flex_mask->nb_bitmask; i++) {
2377 fd_mask = (flex_mask->bitmask[i].mask <<
2378 I40E_PRTQF_FD_MSK_MASK_SHIFT) &
2379 I40E_PRTQF_FD_MSK_MASK_MASK;
2380 fd_mask |= ((flex_mask->bitmask[i].offset +
2381 I40E_FLX_OFFSET_IN_FIELD_VECTOR) <<
2382 I40E_PRTQF_FD_MSK_OFFSET_SHIFT) &
2383 I40E_PRTQF_FD_MSK_OFFSET_MASK;
2384 i40e_write_rx_ctl(hw, I40E_PRTQF_FD_MSK(pctype, i), fd_mask);
2387 pf->fdir.flex_mask_flag[pctype] = 1;
2391 i40e_flow_set_fdir_inset(struct i40e_pf *pf,
2392 enum i40e_filter_pctype pctype,
2395 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
2396 uint64_t inset_reg = 0;
2397 uint32_t mask_reg[I40E_INSET_MASK_NUM_REG] = {0};
2400 /* Check if the input set is valid */
2401 if (i40e_validate_input_set(pctype, RTE_ETH_FILTER_FDIR,
2403 PMD_DRV_LOG(ERR, "Invalid input set");
2407 /* Check if the configuration is conflicted */
2408 if (pf->fdir.inset_flag[pctype] &&
2409 memcmp(&pf->fdir.input_set[pctype], &input_set, sizeof(uint64_t)))
2412 if (pf->fdir.inset_flag[pctype] &&
2413 !memcmp(&pf->fdir.input_set[pctype], &input_set, sizeof(uint64_t)))
2416 num = i40e_generate_inset_mask_reg(input_set, mask_reg,
2417 I40E_INSET_MASK_NUM_REG);
2421 if (pf->support_multi_driver) {
2422 for (i = 0; i < num; i++)
2423 if (i40e_read_rx_ctl(hw,
2424 I40E_GLQF_FD_MSK(i, pctype)) !=
2426 PMD_DRV_LOG(ERR, "Input set setting is not"
2428 " `support-multi-driver`"
2432 for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
2433 if (i40e_read_rx_ctl(hw,
2434 I40E_GLQF_FD_MSK(i, pctype)) != 0) {
2435 PMD_DRV_LOG(ERR, "Input set setting is not"
2437 " `support-multi-driver`"
2443 for (i = 0; i < num; i++)
2444 i40e_check_write_reg(hw, I40E_GLQF_FD_MSK(i, pctype),
2446 /*clear unused mask registers of the pctype */
2447 for (i = num; i < I40E_INSET_MASK_NUM_REG; i++)
2448 i40e_check_write_reg(hw,
2449 I40E_GLQF_FD_MSK(i, pctype), 0);
2452 inset_reg |= i40e_translate_input_set_reg(hw->mac.type, input_set);
2454 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 0),
2455 (uint32_t)(inset_reg & UINT32_MAX));
2456 i40e_check_write_reg(hw, I40E_PRTQF_FD_INSET(pctype, 1),
2457 (uint32_t)((inset_reg >>
2458 I40E_32_BIT_WIDTH) & UINT32_MAX));
2460 I40E_WRITE_FLUSH(hw);
2462 pf->fdir.input_set[pctype] = input_set;
2463 pf->fdir.inset_flag[pctype] = 1;
2468 i40e_flow_fdir_get_pctype_value(struct i40e_pf *pf,
2469 enum rte_flow_item_type item_type,
2470 struct i40e_fdir_filter_conf *filter)
2472 struct i40e_customized_pctype *cus_pctype = NULL;
2474 switch (item_type) {
2475 case RTE_FLOW_ITEM_TYPE_GTPC:
2476 cus_pctype = i40e_find_customized_pctype(pf,
2477 I40E_CUSTOMIZED_GTPC);
2479 case RTE_FLOW_ITEM_TYPE_GTPU:
2480 if (!filter->input.flow_ext.inner_ip)
2481 cus_pctype = i40e_find_customized_pctype(pf,
2482 I40E_CUSTOMIZED_GTPU);
2483 else if (filter->input.flow_ext.iip_type ==
2484 I40E_FDIR_IPTYPE_IPV4)
2485 cus_pctype = i40e_find_customized_pctype(pf,
2486 I40E_CUSTOMIZED_GTPU_IPV4);
2487 else if (filter->input.flow_ext.iip_type ==
2488 I40E_FDIR_IPTYPE_IPV6)
2489 cus_pctype = i40e_find_customized_pctype(pf,
2490 I40E_CUSTOMIZED_GTPU_IPV6);
2492 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
2493 if (filter->input.flow_ext.oip_type == I40E_FDIR_IPTYPE_IPV4)
2494 cus_pctype = i40e_find_customized_pctype(pf,
2495 I40E_CUSTOMIZED_IPV4_L2TPV3);
2496 else if (filter->input.flow_ext.oip_type ==
2497 I40E_FDIR_IPTYPE_IPV6)
2498 cus_pctype = i40e_find_customized_pctype(pf,
2499 I40E_CUSTOMIZED_IPV6_L2TPV3);
2501 case RTE_FLOW_ITEM_TYPE_ESP:
2502 if (!filter->input.flow_ext.is_udp) {
2503 if (filter->input.flow_ext.oip_type ==
2504 I40E_FDIR_IPTYPE_IPV4)
2505 cus_pctype = i40e_find_customized_pctype(pf,
2506 I40E_CUSTOMIZED_ESP_IPV4);
2507 else if (filter->input.flow_ext.oip_type ==
2508 I40E_FDIR_IPTYPE_IPV6)
2509 cus_pctype = i40e_find_customized_pctype(pf,
2510 I40E_CUSTOMIZED_ESP_IPV6);
2512 if (filter->input.flow_ext.oip_type ==
2513 I40E_FDIR_IPTYPE_IPV4)
2514 cus_pctype = i40e_find_customized_pctype(pf,
2515 I40E_CUSTOMIZED_ESP_IPV4_UDP);
2516 else if (filter->input.flow_ext.oip_type ==
2517 I40E_FDIR_IPTYPE_IPV6)
2518 cus_pctype = i40e_find_customized_pctype(pf,
2519 I40E_CUSTOMIZED_ESP_IPV6_UDP);
2520 filter->input.flow_ext.is_udp = false;
2524 PMD_DRV_LOG(ERR, "Unsupported item type");
2528 if (cus_pctype && cus_pctype->valid)
2529 return cus_pctype->pctype;
2531 return I40E_FILTER_PCTYPE_INVALID;
2535 i40e_flow_set_filter_spi(struct i40e_fdir_filter_conf *filter,
2536 const struct rte_flow_item_esp *esp_spec)
2538 if (filter->input.flow_ext.oip_type ==
2539 I40E_FDIR_IPTYPE_IPV4) {
2540 if (filter->input.flow_ext.is_udp)
2541 filter->input.flow.esp_ipv4_udp_flow.spi =
2544 filter->input.flow.esp_ipv4_flow.spi =
2547 if (filter->input.flow_ext.oip_type ==
2548 I40E_FDIR_IPTYPE_IPV6) {
2549 if (filter->input.flow_ext.is_udp)
2550 filter->input.flow.esp_ipv6_udp_flow.spi =
2553 filter->input.flow.esp_ipv6_flow.spi =
2558 /* 1. Last in item should be NULL as range is not supported.
2559 * 2. Supported patterns: refer to array i40e_supported_patterns.
2560 * 3. Default supported flow type and input set: refer to array
2561 * valid_fdir_inset_table in i40e_ethdev.c.
2562 * 4. Mask of fields which need to be matched should be
2564 * 5. Mask of fields which needn't to be matched should be
2566 * 6. GTP profile supports GTPv1 only.
2567 * 7. GTP-C response message ('source_port' = 2123) is not supported.
2570 i40e_flow_parse_fdir_pattern(struct rte_eth_dev *dev,
2571 const struct rte_flow_attr *attr,
2572 const struct rte_flow_item *pattern,
2573 struct rte_flow_error *error,
2574 struct i40e_fdir_filter_conf *filter)
2576 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
2577 const struct rte_flow_item *item = pattern;
2578 const struct rte_flow_item_eth *eth_spec, *eth_mask;
2579 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
2580 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
2581 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
2582 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
2583 const struct rte_flow_item_udp *udp_spec, *udp_mask;
2584 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
2585 const struct rte_flow_item_gtp *gtp_spec, *gtp_mask;
2586 const struct rte_flow_item_esp *esp_spec, *esp_mask;
2587 const struct rte_flow_item_raw *raw_spec, *raw_mask;
2588 const struct rte_flow_item_vf *vf_spec;
2589 const struct rte_flow_item_l2tpv3oip *l2tpv3oip_spec, *l2tpv3oip_mask;
2592 uint64_t input_set = I40E_INSET_NONE;
2594 enum rte_flow_item_type item_type;
2595 enum rte_flow_item_type next_type;
2596 enum rte_flow_item_type l3 = RTE_FLOW_ITEM_TYPE_END;
2597 enum rte_flow_item_type cus_proto = RTE_FLOW_ITEM_TYPE_END;
2599 uint8_t ipv6_addr_mask[16] = {
2600 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
2601 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
2602 enum i40e_flxpld_layer_idx layer_idx = I40E_FLXPLD_L2_IDX;
2604 int32_t off_arr[I40E_MAX_FLXPLD_FIED];
2605 uint16_t len_arr[I40E_MAX_FLXPLD_FIED];
2606 struct i40e_fdir_flex_pit flex_pit;
2607 uint8_t next_dst_off = 0;
2608 uint8_t flex_mask[I40E_FDIR_MAX_FLEX_LEN];
2610 bool cfg_flex_pit = true;
2611 bool cfg_flex_msk = true;
2612 uint16_t outer_tpid;
2613 uint16_t ether_type;
2614 uint32_t vtc_flow_cpu;
2615 bool outer_ip = true;
2618 memset(off_arr, 0, sizeof(off_arr));
2619 memset(len_arr, 0, sizeof(len_arr));
2620 memset(flex_mask, 0, I40E_FDIR_MAX_FLEX_LEN);
2621 outer_tpid = i40e_get_outer_vlan(dev);
2622 filter->input.flow_ext.customized_pctype = false;
2623 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
2625 rte_flow_error_set(error, EINVAL,
2626 RTE_FLOW_ERROR_TYPE_ITEM,
2628 "Not support range");
2631 item_type = item->type;
2632 switch (item_type) {
2633 case RTE_FLOW_ITEM_TYPE_ETH:
2634 eth_spec = item->spec;
2635 eth_mask = item->mask;
2636 next_type = (item + 1)->type;
2638 if (next_type == RTE_FLOW_ITEM_TYPE_END &&
2639 (!eth_spec || !eth_mask)) {
2640 rte_flow_error_set(error, EINVAL,
2641 RTE_FLOW_ERROR_TYPE_ITEM,
2643 "NULL eth spec/mask.");
2647 if (eth_spec && eth_mask) {
2648 if (rte_is_broadcast_ether_addr(ð_mask->dst) &&
2649 rte_is_zero_ether_addr(ð_mask->src)) {
2650 filter->input.flow.l2_flow.dst =
2652 input_set |= I40E_INSET_DMAC;
2653 } else if (rte_is_zero_ether_addr(ð_mask->dst) &&
2654 rte_is_broadcast_ether_addr(ð_mask->src)) {
2655 filter->input.flow.l2_flow.src =
2657 input_set |= I40E_INSET_SMAC;
2658 } else if (rte_is_broadcast_ether_addr(ð_mask->dst) &&
2659 rte_is_broadcast_ether_addr(ð_mask->src)) {
2660 filter->input.flow.l2_flow.dst =
2662 filter->input.flow.l2_flow.src =
2664 input_set |= (I40E_INSET_DMAC | I40E_INSET_SMAC);
2665 } else if (!rte_is_zero_ether_addr(ð_mask->src) ||
2666 !rte_is_zero_ether_addr(ð_mask->dst)) {
2667 rte_flow_error_set(error, EINVAL,
2668 RTE_FLOW_ERROR_TYPE_ITEM,
2670 "Invalid MAC_addr mask.");
2674 if (eth_spec && eth_mask &&
2675 next_type == RTE_FLOW_ITEM_TYPE_END) {
2676 if (eth_mask->type != RTE_BE16(0xffff)) {
2677 rte_flow_error_set(error, EINVAL,
2678 RTE_FLOW_ERROR_TYPE_ITEM,
2680 "Invalid type mask.");
2684 ether_type = rte_be_to_cpu_16(eth_spec->type);
2686 if (next_type == RTE_FLOW_ITEM_TYPE_VLAN ||
2687 ether_type == RTE_ETHER_TYPE_IPV4 ||
2688 ether_type == RTE_ETHER_TYPE_IPV6 ||
2689 ether_type == outer_tpid) {
2690 rte_flow_error_set(error, EINVAL,
2691 RTE_FLOW_ERROR_TYPE_ITEM,
2693 "Unsupported ether_type.");
2696 input_set |= I40E_INSET_LAST_ETHER_TYPE;
2697 filter->input.flow.l2_flow.ether_type =
2701 pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
2702 layer_idx = I40E_FLXPLD_L2_IDX;
2705 case RTE_FLOW_ITEM_TYPE_VLAN:
2706 vlan_spec = item->spec;
2707 vlan_mask = item->mask;
2709 RTE_ASSERT(!(input_set & I40E_INSET_LAST_ETHER_TYPE));
2710 if (vlan_spec && vlan_mask) {
2711 if (vlan_mask->tci ==
2712 rte_cpu_to_be_16(I40E_TCI_MASK)) {
2713 input_set |= I40E_INSET_VLAN_INNER;
2714 filter->input.flow_ext.vlan_tci =
2718 if (vlan_spec && vlan_mask && vlan_mask->inner_type) {
2719 if (vlan_mask->inner_type != RTE_BE16(0xffff)) {
2720 rte_flow_error_set(error, EINVAL,
2721 RTE_FLOW_ERROR_TYPE_ITEM,
2723 "Invalid inner_type"
2729 rte_be_to_cpu_16(vlan_spec->inner_type);
2731 if (ether_type == RTE_ETHER_TYPE_IPV4 ||
2732 ether_type == RTE_ETHER_TYPE_IPV6 ||
2733 ether_type == outer_tpid) {
2734 rte_flow_error_set(error, EINVAL,
2735 RTE_FLOW_ERROR_TYPE_ITEM,
2737 "Unsupported inner_type.");
2740 input_set |= I40E_INSET_LAST_ETHER_TYPE;
2741 filter->input.flow.l2_flow.ether_type =
2742 vlan_spec->inner_type;
2745 pctype = I40E_FILTER_PCTYPE_L2_PAYLOAD;
2746 layer_idx = I40E_FLXPLD_L2_IDX;
2749 case RTE_FLOW_ITEM_TYPE_IPV4:
2750 l3 = RTE_FLOW_ITEM_TYPE_IPV4;
2751 ipv4_spec = item->spec;
2752 ipv4_mask = item->mask;
2753 pctype = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
2754 layer_idx = I40E_FLXPLD_L3_IDX;
2756 if (ipv4_spec && ipv4_mask && outer_ip) {
2757 /* Check IPv4 mask and update input set */
2758 if (ipv4_mask->hdr.version_ihl ||
2759 ipv4_mask->hdr.total_length ||
2760 ipv4_mask->hdr.packet_id ||
2761 ipv4_mask->hdr.fragment_offset ||
2762 ipv4_mask->hdr.hdr_checksum) {
2763 rte_flow_error_set(error, EINVAL,
2764 RTE_FLOW_ERROR_TYPE_ITEM,
2766 "Invalid IPv4 mask.");
2770 if (ipv4_mask->hdr.src_addr == UINT32_MAX)
2771 input_set |= I40E_INSET_IPV4_SRC;
2772 if (ipv4_mask->hdr.dst_addr == UINT32_MAX)
2773 input_set |= I40E_INSET_IPV4_DST;
2774 if (ipv4_mask->hdr.type_of_service == UINT8_MAX)
2775 input_set |= I40E_INSET_IPV4_TOS;
2776 if (ipv4_mask->hdr.time_to_live == UINT8_MAX)
2777 input_set |= I40E_INSET_IPV4_TTL;
2778 if (ipv4_mask->hdr.next_proto_id == UINT8_MAX)
2779 input_set |= I40E_INSET_IPV4_PROTO;
2781 /* Check if it is fragment. */
2782 frag_off = ipv4_spec->hdr.fragment_offset;
2783 frag_off = rte_be_to_cpu_16(frag_off);
2784 if (frag_off & RTE_IPV4_HDR_OFFSET_MASK ||
2785 frag_off & RTE_IPV4_HDR_MF_FLAG)
2786 pctype = I40E_FILTER_PCTYPE_FRAG_IPV4;
2788 if (input_set & (I40E_INSET_DMAC | I40E_INSET_SMAC)) {
2789 if (input_set & (I40E_INSET_IPV4_SRC |
2790 I40E_INSET_IPV4_DST | I40E_INSET_IPV4_TOS |
2791 I40E_INSET_IPV4_TTL | I40E_INSET_IPV4_PROTO)) {
2792 rte_flow_error_set(error, EINVAL,
2793 RTE_FLOW_ERROR_TYPE_ITEM,
2795 "L2 and L3 input set are exclusive.");
2799 /* Get the filter info */
2800 filter->input.flow.ip4_flow.proto =
2801 ipv4_spec->hdr.next_proto_id;
2802 filter->input.flow.ip4_flow.tos =
2803 ipv4_spec->hdr.type_of_service;
2804 filter->input.flow.ip4_flow.ttl =
2805 ipv4_spec->hdr.time_to_live;
2806 filter->input.flow.ip4_flow.src_ip =
2807 ipv4_spec->hdr.src_addr;
2808 filter->input.flow.ip4_flow.dst_ip =
2809 ipv4_spec->hdr.dst_addr;
2811 filter->input.flow_ext.inner_ip = false;
2812 filter->input.flow_ext.oip_type =
2813 I40E_FDIR_IPTYPE_IPV4;
2815 } else if (!ipv4_spec && !ipv4_mask && !outer_ip) {
2816 filter->input.flow_ext.inner_ip = true;
2817 filter->input.flow_ext.iip_type =
2818 I40E_FDIR_IPTYPE_IPV4;
2819 } else if (!ipv4_spec && !ipv4_mask && outer_ip) {
2820 filter->input.flow_ext.inner_ip = false;
2821 filter->input.flow_ext.oip_type =
2822 I40E_FDIR_IPTYPE_IPV4;
2823 } else if ((ipv4_spec || ipv4_mask) && !outer_ip) {
2824 rte_flow_error_set(error, EINVAL,
2825 RTE_FLOW_ERROR_TYPE_ITEM,
2827 "Invalid inner IPv4 mask.");
2835 case RTE_FLOW_ITEM_TYPE_IPV6:
2836 l3 = RTE_FLOW_ITEM_TYPE_IPV6;
2837 ipv6_spec = item->spec;
2838 ipv6_mask = item->mask;
2839 pctype = I40E_FILTER_PCTYPE_NONF_IPV6_OTHER;
2840 layer_idx = I40E_FLXPLD_L3_IDX;
2842 if (ipv6_spec && ipv6_mask && outer_ip) {
2843 /* Check IPv6 mask and update input set */
2844 if (ipv6_mask->hdr.payload_len) {
2845 rte_flow_error_set(error, EINVAL,
2846 RTE_FLOW_ERROR_TYPE_ITEM,
2848 "Invalid IPv6 mask");
2852 if (!memcmp(ipv6_mask->hdr.src_addr,
2854 RTE_DIM(ipv6_mask->hdr.src_addr)))
2855 input_set |= I40E_INSET_IPV6_SRC;
2856 if (!memcmp(ipv6_mask->hdr.dst_addr,
2858 RTE_DIM(ipv6_mask->hdr.dst_addr)))
2859 input_set |= I40E_INSET_IPV6_DST;
2861 if ((ipv6_mask->hdr.vtc_flow &
2862 rte_cpu_to_be_32(I40E_IPV6_TC_MASK))
2863 == rte_cpu_to_be_32(I40E_IPV6_TC_MASK))
2864 input_set |= I40E_INSET_IPV6_TC;
2865 if (ipv6_mask->hdr.proto == UINT8_MAX)
2866 input_set |= I40E_INSET_IPV6_NEXT_HDR;
2867 if (ipv6_mask->hdr.hop_limits == UINT8_MAX)
2868 input_set |= I40E_INSET_IPV6_HOP_LIMIT;
2870 /* Get filter info */
2872 rte_be_to_cpu_32(ipv6_spec->hdr.vtc_flow);
2873 filter->input.flow.ipv6_flow.tc =
2874 (uint8_t)(vtc_flow_cpu >>
2875 I40E_FDIR_IPv6_TC_OFFSET);
2876 filter->input.flow.ipv6_flow.proto =
2877 ipv6_spec->hdr.proto;
2878 filter->input.flow.ipv6_flow.hop_limits =
2879 ipv6_spec->hdr.hop_limits;
2881 filter->input.flow_ext.inner_ip = false;
2882 filter->input.flow_ext.oip_type =
2883 I40E_FDIR_IPTYPE_IPV6;
2885 rte_memcpy(filter->input.flow.ipv6_flow.src_ip,
2886 ipv6_spec->hdr.src_addr, 16);
2887 rte_memcpy(filter->input.flow.ipv6_flow.dst_ip,
2888 ipv6_spec->hdr.dst_addr, 16);
2890 /* Check if it is fragment. */
2891 if (ipv6_spec->hdr.proto ==
2892 I40E_IPV6_FRAG_HEADER)
2893 pctype = I40E_FILTER_PCTYPE_FRAG_IPV6;
2894 } else if (!ipv6_spec && !ipv6_mask && !outer_ip) {
2895 filter->input.flow_ext.inner_ip = true;
2896 filter->input.flow_ext.iip_type =
2897 I40E_FDIR_IPTYPE_IPV6;
2898 } else if (!ipv6_spec && !ipv6_mask && outer_ip) {
2899 filter->input.flow_ext.inner_ip = false;
2900 filter->input.flow_ext.oip_type =
2901 I40E_FDIR_IPTYPE_IPV6;
2902 } else if ((ipv6_spec || ipv6_mask) && !outer_ip) {
2903 rte_flow_error_set(error, EINVAL,
2904 RTE_FLOW_ERROR_TYPE_ITEM,
2906 "Invalid inner IPv6 mask");
2913 case RTE_FLOW_ITEM_TYPE_TCP:
2914 tcp_spec = item->spec;
2915 tcp_mask = item->mask;
2917 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2919 I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
2920 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2922 I40E_FILTER_PCTYPE_NONF_IPV6_TCP;
2923 if (tcp_spec && tcp_mask) {
2924 /* Check TCP mask and update input set */
2925 if (tcp_mask->hdr.sent_seq ||
2926 tcp_mask->hdr.recv_ack ||
2927 tcp_mask->hdr.data_off ||
2928 tcp_mask->hdr.tcp_flags ||
2929 tcp_mask->hdr.rx_win ||
2930 tcp_mask->hdr.cksum ||
2931 tcp_mask->hdr.tcp_urp) {
2932 rte_flow_error_set(error, EINVAL,
2933 RTE_FLOW_ERROR_TYPE_ITEM,
2935 "Invalid TCP mask");
2939 if (tcp_mask->hdr.src_port == UINT16_MAX)
2940 input_set |= I40E_INSET_SRC_PORT;
2941 if (tcp_mask->hdr.dst_port == UINT16_MAX)
2942 input_set |= I40E_INSET_DST_PORT;
2944 if (input_set & (I40E_INSET_DMAC | I40E_INSET_SMAC)) {
2946 (I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT)) {
2947 rte_flow_error_set(error, EINVAL,
2948 RTE_FLOW_ERROR_TYPE_ITEM,
2950 "L2 and L4 input set are exclusive.");
2954 /* Get filter info */
2955 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
2956 filter->input.flow.tcp4_flow.src_port =
2957 tcp_spec->hdr.src_port;
2958 filter->input.flow.tcp4_flow.dst_port =
2959 tcp_spec->hdr.dst_port;
2960 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
2961 filter->input.flow.tcp6_flow.src_port =
2962 tcp_spec->hdr.src_port;
2963 filter->input.flow.tcp6_flow.dst_port =
2964 tcp_spec->hdr.dst_port;
2969 layer_idx = I40E_FLXPLD_L4_IDX;
2972 case RTE_FLOW_ITEM_TYPE_UDP:
2973 udp_spec = item->spec;
2974 udp_mask = item->mask;
2976 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
2978 I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
2979 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
2981 I40E_FILTER_PCTYPE_NONF_IPV6_UDP;
2983 if (udp_spec && udp_mask) {
2984 /* Check UDP mask and update input set*/
2985 if (udp_mask->hdr.dgram_len ||
2986 udp_mask->hdr.dgram_cksum) {
2987 rte_flow_error_set(error, EINVAL,
2988 RTE_FLOW_ERROR_TYPE_ITEM,
2990 "Invalid UDP mask");
2994 if (udp_mask->hdr.src_port == UINT16_MAX)
2995 input_set |= I40E_INSET_SRC_PORT;
2996 if (udp_mask->hdr.dst_port == UINT16_MAX)
2997 input_set |= I40E_INSET_DST_PORT;
2999 if (input_set & (I40E_INSET_DMAC | I40E_INSET_SMAC)) {
3001 (I40E_INSET_SRC_PORT | I40E_INSET_DST_PORT)) {
3002 rte_flow_error_set(error, EINVAL,
3003 RTE_FLOW_ERROR_TYPE_ITEM,
3005 "L2 and L4 input set are exclusive.");
3009 /* Get filter info */
3010 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
3011 filter->input.flow.udp4_flow.src_port =
3012 udp_spec->hdr.src_port;
3013 filter->input.flow.udp4_flow.dst_port =
3014 udp_spec->hdr.dst_port;
3015 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
3016 filter->input.flow.udp6_flow.src_port =
3017 udp_spec->hdr.src_port;
3018 filter->input.flow.udp6_flow.dst_port =
3019 udp_spec->hdr.dst_port;
3023 filter->input.flow_ext.is_udp = true;
3024 layer_idx = I40E_FLXPLD_L4_IDX;
3027 case RTE_FLOW_ITEM_TYPE_GTPC:
3028 case RTE_FLOW_ITEM_TYPE_GTPU:
3029 if (!pf->gtp_support) {
3030 rte_flow_error_set(error, EINVAL,
3031 RTE_FLOW_ERROR_TYPE_ITEM,
3033 "Unsupported protocol");
3037 gtp_spec = item->spec;
3038 gtp_mask = item->mask;
3040 if (gtp_spec && gtp_mask) {
3041 if (gtp_mask->v_pt_rsv_flags ||
3042 gtp_mask->msg_type ||
3043 gtp_mask->msg_len ||
3044 gtp_mask->teid != UINT32_MAX) {
3045 rte_flow_error_set(error, EINVAL,
3046 RTE_FLOW_ERROR_TYPE_ITEM,
3048 "Invalid GTP mask");
3052 filter->input.flow.gtp_flow.teid =
3054 filter->input.flow_ext.customized_pctype = true;
3055 cus_proto = item_type;
3058 case RTE_FLOW_ITEM_TYPE_ESP:
3059 if (!pf->esp_support) {
3060 rte_flow_error_set(error, EINVAL,
3061 RTE_FLOW_ERROR_TYPE_ITEM,
3063 "Unsupported ESP protocol");
3067 esp_spec = item->spec;
3068 esp_mask = item->mask;
3070 if (!esp_spec || !esp_mask) {
3071 rte_flow_error_set(error, EINVAL,
3072 RTE_FLOW_ERROR_TYPE_ITEM,
3074 "Invalid ESP item");
3078 if (esp_spec && esp_mask) {
3079 if (esp_mask->hdr.spi != UINT32_MAX) {
3080 rte_flow_error_set(error, EINVAL,
3081 RTE_FLOW_ERROR_TYPE_ITEM,
3083 "Invalid ESP mask");
3086 i40e_flow_set_filter_spi(filter, esp_spec);
3087 filter->input.flow_ext.customized_pctype = true;
3088 cus_proto = item_type;
3091 case RTE_FLOW_ITEM_TYPE_SCTP:
3092 sctp_spec = item->spec;
3093 sctp_mask = item->mask;
3095 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4)
3097 I40E_FILTER_PCTYPE_NONF_IPV4_SCTP;
3098 else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6)
3100 I40E_FILTER_PCTYPE_NONF_IPV6_SCTP;
3102 if (sctp_spec && sctp_mask) {
3103 /* Check SCTP mask and update input set */
3104 if (sctp_mask->hdr.cksum) {
3105 rte_flow_error_set(error, EINVAL,
3106 RTE_FLOW_ERROR_TYPE_ITEM,
3108 "Invalid UDP mask");
3112 if (sctp_mask->hdr.src_port == UINT16_MAX)
3113 input_set |= I40E_INSET_SRC_PORT;
3114 if (sctp_mask->hdr.dst_port == UINT16_MAX)
3115 input_set |= I40E_INSET_DST_PORT;
3116 if (sctp_mask->hdr.tag == UINT32_MAX)
3117 input_set |= I40E_INSET_SCTP_VT;
3119 /* Get filter info */
3120 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
3121 filter->input.flow.sctp4_flow.src_port =
3122 sctp_spec->hdr.src_port;
3123 filter->input.flow.sctp4_flow.dst_port =
3124 sctp_spec->hdr.dst_port;
3125 filter->input.flow.sctp4_flow.verify_tag
3126 = sctp_spec->hdr.tag;
3127 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
3128 filter->input.flow.sctp6_flow.src_port =
3129 sctp_spec->hdr.src_port;
3130 filter->input.flow.sctp6_flow.dst_port =
3131 sctp_spec->hdr.dst_port;
3132 filter->input.flow.sctp6_flow.verify_tag
3133 = sctp_spec->hdr.tag;
3137 layer_idx = I40E_FLXPLD_L4_IDX;
3140 case RTE_FLOW_ITEM_TYPE_RAW:
3141 raw_spec = item->spec;
3142 raw_mask = item->mask;
3144 if (!raw_spec || !raw_mask) {
3145 rte_flow_error_set(error, EINVAL,
3146 RTE_FLOW_ERROR_TYPE_ITEM,
3148 "NULL RAW spec/mask");
3152 if (pf->support_multi_driver) {
3153 rte_flow_error_set(error, ENOTSUP,
3154 RTE_FLOW_ERROR_TYPE_ITEM,
3156 "Unsupported flexible payload.");
3160 ret = i40e_flow_check_raw_item(item, raw_spec, error);
3164 off_arr[raw_id] = raw_spec->offset;
3165 len_arr[raw_id] = raw_spec->length;
3168 memset(&flex_pit, 0, sizeof(struct i40e_fdir_flex_pit));
3170 raw_spec->length / sizeof(uint16_t);
3171 flex_pit.dst_offset =
3172 next_dst_off / sizeof(uint16_t);
3174 for (i = 0; i <= raw_id; i++) {
3176 flex_pit.src_offset +=
3180 flex_pit.src_offset +=
3181 (off_arr[i] + len_arr[i]) /
3183 flex_size += len_arr[i];
3185 if (((flex_pit.src_offset + flex_pit.size) >=
3186 I40E_MAX_FLX_SOURCE_OFF / sizeof(uint16_t)) ||
3187 flex_size > I40E_FDIR_MAX_FLEXLEN) {
3188 rte_flow_error_set(error, EINVAL,
3189 RTE_FLOW_ERROR_TYPE_ITEM,
3191 "Exceeds maxmial payload limit.");
3195 /* Store flex pit to SW */
3196 ret = i40e_flow_store_flex_pit(pf, &flex_pit,
3199 rte_flow_error_set(error, EINVAL,
3200 RTE_FLOW_ERROR_TYPE_ITEM,
3202 "Conflict with the first flexible rule.");
3205 cfg_flex_pit = false;
3207 for (i = 0; i < raw_spec->length; i++) {
3208 j = i + next_dst_off;
3209 filter->input.flow_ext.flexbytes[j] =
3210 raw_spec->pattern[i];
3211 flex_mask[j] = raw_mask->pattern[i];
3214 next_dst_off += raw_spec->length;
3217 case RTE_FLOW_ITEM_TYPE_VF:
3218 vf_spec = item->spec;
3219 if (!attr->transfer) {
3220 rte_flow_error_set(error, ENOTSUP,
3221 RTE_FLOW_ERROR_TYPE_ITEM,
3223 "Matching VF traffic"
3224 " without affecting it"
3225 " (transfer attribute)"
3229 filter->input.flow_ext.is_vf = 1;
3230 filter->input.flow_ext.dst_id = vf_spec->id;
3231 if (filter->input.flow_ext.is_vf &&
3232 filter->input.flow_ext.dst_id >= pf->vf_num) {
3233 rte_flow_error_set(error, EINVAL,
3234 RTE_FLOW_ERROR_TYPE_ITEM,
3236 "Invalid VF ID for FDIR.");
3240 case RTE_FLOW_ITEM_TYPE_L2TPV3OIP:
3241 l2tpv3oip_spec = item->spec;
3242 l2tpv3oip_mask = item->mask;
3244 if (!l2tpv3oip_spec || !l2tpv3oip_mask)
3247 if (l2tpv3oip_mask->session_id != UINT32_MAX) {
3248 rte_flow_error_set(error, EINVAL,
3249 RTE_FLOW_ERROR_TYPE_ITEM,
3251 "Invalid L2TPv3 mask");
3255 if (l3 == RTE_FLOW_ITEM_TYPE_IPV4) {
3256 filter->input.flow.ip4_l2tpv3oip_flow.session_id =
3257 l2tpv3oip_spec->session_id;
3258 filter->input.flow_ext.oip_type =
3259 I40E_FDIR_IPTYPE_IPV4;
3260 } else if (l3 == RTE_FLOW_ITEM_TYPE_IPV6) {
3261 filter->input.flow.ip6_l2tpv3oip_flow.session_id =
3262 l2tpv3oip_spec->session_id;
3263 filter->input.flow_ext.oip_type =
3264 I40E_FDIR_IPTYPE_IPV6;
3267 filter->input.flow_ext.customized_pctype = true;
3268 cus_proto = item_type;
3275 /* Get customized pctype value */
3276 if (filter->input.flow_ext.customized_pctype) {
3277 pctype = i40e_flow_fdir_get_pctype_value(pf, cus_proto, filter);
3278 if (pctype == I40E_FILTER_PCTYPE_INVALID) {
3279 rte_flow_error_set(error, EINVAL,
3280 RTE_FLOW_ERROR_TYPE_ITEM,
3282 "Unsupported pctype");
3287 /* If customized pctype is not used, set fdir configuration.*/
3288 if (!filter->input.flow_ext.customized_pctype) {
3289 ret = i40e_flow_set_fdir_inset(pf, pctype, input_set);
3291 rte_flow_error_set(error, EINVAL,
3292 RTE_FLOW_ERROR_TYPE_ITEM, item,
3293 "Conflict with the first rule's input set.");
3295 } else if (ret == -EINVAL) {
3296 rte_flow_error_set(error, EINVAL,
3297 RTE_FLOW_ERROR_TYPE_ITEM, item,
3298 "Invalid pattern mask.");
3302 /* Store flex mask to SW */
3303 ret = i40e_flow_store_flex_mask(pf, pctype, flex_mask);
3305 rte_flow_error_set(error, EINVAL,
3306 RTE_FLOW_ERROR_TYPE_ITEM,
3308 "Exceed maximal number of bitmasks");
3310 } else if (ret == -2) {
3311 rte_flow_error_set(error, EINVAL,
3312 RTE_FLOW_ERROR_TYPE_ITEM,
3314 "Conflict with the first flexible rule");
3317 cfg_flex_msk = false;
3320 i40e_flow_set_fdir_flex_pit(pf, layer_idx, raw_id);
3323 i40e_flow_set_fdir_flex_msk(pf, pctype);
3326 filter->input.pctype = pctype;
3331 /* Parse to get the action info of a FDIR filter.
3332 * FDIR action supports QUEUE or (QUEUE + MARK).
3335 i40e_flow_parse_fdir_action(struct rte_eth_dev *dev,
3336 const struct rte_flow_action *actions,
3337 struct rte_flow_error *error,
3338 struct i40e_fdir_filter_conf *filter)
3340 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3341 const struct rte_flow_action *act;
3342 const struct rte_flow_action_queue *act_q;
3343 const struct rte_flow_action_mark *mark_spec = NULL;
3346 /* Check if the first non-void action is QUEUE or DROP or PASSTHRU. */
3347 NEXT_ITEM_OF_ACTION(act, actions, index);
3348 switch (act->type) {
3349 case RTE_FLOW_ACTION_TYPE_QUEUE:
3351 filter->action.rx_queue = act_q->index;
3352 if ((!filter->input.flow_ext.is_vf &&
3353 filter->action.rx_queue >= pf->dev_data->nb_rx_queues) ||
3354 (filter->input.flow_ext.is_vf &&
3355 filter->action.rx_queue >= pf->vf_nb_qps)) {
3356 rte_flow_error_set(error, EINVAL,
3357 RTE_FLOW_ERROR_TYPE_ACTION, act,
3358 "Invalid queue ID for FDIR.");
3361 filter->action.behavior = I40E_FDIR_ACCEPT;
3363 case RTE_FLOW_ACTION_TYPE_DROP:
3364 filter->action.behavior = I40E_FDIR_REJECT;
3366 case RTE_FLOW_ACTION_TYPE_PASSTHRU:
3367 filter->action.behavior = I40E_FDIR_PASSTHRU;
3369 case RTE_FLOW_ACTION_TYPE_MARK:
3370 filter->action.behavior = I40E_FDIR_PASSTHRU;
3371 mark_spec = act->conf;
3372 filter->action.report_status = I40E_FDIR_REPORT_ID;
3373 filter->soft_id = mark_spec->id;
3376 rte_flow_error_set(error, EINVAL,
3377 RTE_FLOW_ERROR_TYPE_ACTION, act,
3382 /* Check if the next non-void item is MARK or FLAG or END. */
3384 NEXT_ITEM_OF_ACTION(act, actions, index);
3385 switch (act->type) {
3386 case RTE_FLOW_ACTION_TYPE_MARK:
3388 /* Double MARK actions requested */
3389 rte_flow_error_set(error, EINVAL,
3390 RTE_FLOW_ERROR_TYPE_ACTION, act,
3394 mark_spec = act->conf;
3395 filter->action.report_status = I40E_FDIR_REPORT_ID;
3396 filter->soft_id = mark_spec->id;
3398 case RTE_FLOW_ACTION_TYPE_FLAG:
3400 /* MARK + FLAG not supported */
3401 rte_flow_error_set(error, EINVAL,
3402 RTE_FLOW_ERROR_TYPE_ACTION, act,
3406 filter->action.report_status = I40E_FDIR_NO_REPORT_STATUS;
3408 case RTE_FLOW_ACTION_TYPE_RSS:
3409 if (filter->action.behavior != I40E_FDIR_PASSTHRU) {
3410 /* RSS filter won't be next if FDIR did not pass thru */
3411 rte_flow_error_set(error, EINVAL,
3412 RTE_FLOW_ERROR_TYPE_ACTION, act,
3417 case RTE_FLOW_ACTION_TYPE_END:
3420 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
3421 act, "Invalid action.");
3425 /* Check if the next non-void item is END */
3427 NEXT_ITEM_OF_ACTION(act, actions, index);
3428 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
3429 rte_flow_error_set(error, EINVAL,
3430 RTE_FLOW_ERROR_TYPE_ACTION,
3431 act, "Invalid action.");
3439 i40e_flow_parse_fdir_filter(struct rte_eth_dev *dev,
3440 const struct rte_flow_attr *attr,
3441 const struct rte_flow_item pattern[],
3442 const struct rte_flow_action actions[],
3443 struct rte_flow_error *error,
3444 union i40e_filter_t *filter)
3446 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3447 struct i40e_fdir_filter_conf *fdir_filter =
3448 &filter->fdir_filter;
3451 ret = i40e_flow_parse_fdir_pattern(dev, attr, pattern, error,
3456 ret = i40e_flow_parse_fdir_action(dev, actions, error, fdir_filter);
3460 ret = i40e_flow_parse_attr(attr, error);
3464 cons_filter_type = RTE_ETH_FILTER_FDIR;
3466 if (pf->fdir.fdir_vsi == NULL) {
3467 /* Enable fdir when fdir flow is added at first time. */
3468 ret = i40e_fdir_setup(pf);
3469 if (ret != I40E_SUCCESS) {
3470 rte_flow_error_set(error, ENOTSUP,
3471 RTE_FLOW_ERROR_TYPE_HANDLE,
3472 NULL, "Failed to setup fdir.");
3475 ret = i40e_fdir_configure(dev);
3477 rte_flow_error_set(error, ENOTSUP,
3478 RTE_FLOW_ERROR_TYPE_HANDLE,
3479 NULL, "Failed to configure fdir.");
3484 /* If create the first fdir rule, enable fdir check for rx queues */
3485 if (TAILQ_EMPTY(&pf->fdir.fdir_list))
3486 i40e_fdir_rx_proc_enable(dev, 1);
3490 i40e_fdir_teardown(pf);
3494 /* Parse to get the action info of a tunnel filter
3495 * Tunnel action only supports PF, VF and QUEUE.
3498 i40e_flow_parse_tunnel_action(struct rte_eth_dev *dev,
3499 const struct rte_flow_action *actions,
3500 struct rte_flow_error *error,
3501 struct i40e_tunnel_filter_conf *filter)
3503 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
3504 const struct rte_flow_action *act;
3505 const struct rte_flow_action_queue *act_q;
3506 const struct rte_flow_action_vf *act_vf;
3509 /* Check if the first non-void action is PF or VF. */
3510 NEXT_ITEM_OF_ACTION(act, actions, index);
3511 if (act->type != RTE_FLOW_ACTION_TYPE_PF &&
3512 act->type != RTE_FLOW_ACTION_TYPE_VF) {
3513 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
3514 act, "Not supported action.");
3518 if (act->type == RTE_FLOW_ACTION_TYPE_VF) {
3520 filter->vf_id = act_vf->id;
3521 filter->is_to_vf = 1;
3522 if (filter->vf_id >= pf->vf_num) {
3523 rte_flow_error_set(error, EINVAL,
3524 RTE_FLOW_ERROR_TYPE_ACTION,
3525 act, "Invalid VF ID for tunnel filter");
3530 /* Check if the next non-void item is QUEUE */
3532 NEXT_ITEM_OF_ACTION(act, actions, index);
3533 if (act->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
3535 filter->queue_id = act_q->index;
3536 if ((!filter->is_to_vf) &&
3537 (filter->queue_id >= pf->dev_data->nb_rx_queues)) {
3538 rte_flow_error_set(error, EINVAL,
3539 RTE_FLOW_ERROR_TYPE_ACTION,
3540 act, "Invalid queue ID for tunnel filter");
3542 } else if (filter->is_to_vf &&
3543 (filter->queue_id >= pf->vf_nb_qps)) {
3544 rte_flow_error_set(error, EINVAL,
3545 RTE_FLOW_ERROR_TYPE_ACTION,
3546 act, "Invalid queue ID for tunnel filter");
3551 /* Check if the next non-void item is END */
3553 NEXT_ITEM_OF_ACTION(act, actions, index);
3554 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
3555 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
3556 act, "Not supported action.");
3563 /* 1. Last in item should be NULL as range is not supported.
3564 * 2. Supported filter types: Source port only and Destination port only.
3565 * 3. Mask of fields which need to be matched should be
3567 * 4. Mask of fields which needn't to be matched should be
3571 i40e_flow_parse_l4_pattern(const struct rte_flow_item *pattern,
3572 struct rte_flow_error *error,
3573 struct i40e_tunnel_filter_conf *filter)
3575 const struct rte_flow_item_sctp *sctp_spec, *sctp_mask;
3576 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
3577 const struct rte_flow_item_udp *udp_spec, *udp_mask;
3578 const struct rte_flow_item *item = pattern;
3579 enum rte_flow_item_type item_type;
3581 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3583 rte_flow_error_set(error, EINVAL,
3584 RTE_FLOW_ERROR_TYPE_ITEM,
3586 "Not support range");
3589 item_type = item->type;
3590 switch (item_type) {
3591 case RTE_FLOW_ITEM_TYPE_ETH:
3592 if (item->spec || item->mask) {
3593 rte_flow_error_set(error, EINVAL,
3594 RTE_FLOW_ERROR_TYPE_ITEM,
3596 "Invalid ETH item");
3601 case RTE_FLOW_ITEM_TYPE_IPV4:
3602 filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
3603 /* IPv4 is used to describe protocol,
3604 * spec and mask should be NULL.
3606 if (item->spec || item->mask) {
3607 rte_flow_error_set(error, EINVAL,
3608 RTE_FLOW_ERROR_TYPE_ITEM,
3610 "Invalid IPv4 item");
3615 case RTE_FLOW_ITEM_TYPE_IPV6:
3616 filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
3617 /* IPv6 is used to describe protocol,
3618 * spec and mask should be NULL.
3620 if (item->spec || item->mask) {
3621 rte_flow_error_set(error, EINVAL,
3622 RTE_FLOW_ERROR_TYPE_ITEM,
3624 "Invalid IPv6 item");
3629 case RTE_FLOW_ITEM_TYPE_UDP:
3630 udp_spec = item->spec;
3631 udp_mask = item->mask;
3633 if (!udp_spec || !udp_mask) {
3634 rte_flow_error_set(error, EINVAL,
3635 RTE_FLOW_ERROR_TYPE_ITEM,
3637 "Invalid udp item");
3641 if (udp_spec->hdr.src_port != 0 &&
3642 udp_spec->hdr.dst_port != 0) {
3643 rte_flow_error_set(error, EINVAL,
3644 RTE_FLOW_ERROR_TYPE_ITEM,
3646 "Invalid udp spec");
3650 if (udp_spec->hdr.src_port != 0) {
3651 filter->l4_port_type =
3652 I40E_L4_PORT_TYPE_SRC;
3654 rte_be_to_cpu_32(udp_spec->hdr.src_port);
3657 if (udp_spec->hdr.dst_port != 0) {
3658 filter->l4_port_type =
3659 I40E_L4_PORT_TYPE_DST;
3661 rte_be_to_cpu_32(udp_spec->hdr.dst_port);
3664 filter->tunnel_type = I40E_CLOUD_TYPE_UDP;
3667 case RTE_FLOW_ITEM_TYPE_TCP:
3668 tcp_spec = item->spec;
3669 tcp_mask = item->mask;
3671 if (!tcp_spec || !tcp_mask) {
3672 rte_flow_error_set(error, EINVAL,
3673 RTE_FLOW_ERROR_TYPE_ITEM,
3675 "Invalid tcp item");
3679 if (tcp_spec->hdr.src_port != 0 &&
3680 tcp_spec->hdr.dst_port != 0) {
3681 rte_flow_error_set(error, EINVAL,
3682 RTE_FLOW_ERROR_TYPE_ITEM,
3684 "Invalid tcp spec");
3688 if (tcp_spec->hdr.src_port != 0) {
3689 filter->l4_port_type =
3690 I40E_L4_PORT_TYPE_SRC;
3692 rte_be_to_cpu_32(tcp_spec->hdr.src_port);
3695 if (tcp_spec->hdr.dst_port != 0) {
3696 filter->l4_port_type =
3697 I40E_L4_PORT_TYPE_DST;
3699 rte_be_to_cpu_32(tcp_spec->hdr.dst_port);
3702 filter->tunnel_type = I40E_CLOUD_TYPE_TCP;
3705 case RTE_FLOW_ITEM_TYPE_SCTP:
3706 sctp_spec = item->spec;
3707 sctp_mask = item->mask;
3709 if (!sctp_spec || !sctp_mask) {
3710 rte_flow_error_set(error, EINVAL,
3711 RTE_FLOW_ERROR_TYPE_ITEM,
3713 "Invalid sctp item");
3717 if (sctp_spec->hdr.src_port != 0 &&
3718 sctp_spec->hdr.dst_port != 0) {
3719 rte_flow_error_set(error, EINVAL,
3720 RTE_FLOW_ERROR_TYPE_ITEM,
3722 "Invalid sctp spec");
3726 if (sctp_spec->hdr.src_port != 0) {
3727 filter->l4_port_type =
3728 I40E_L4_PORT_TYPE_SRC;
3730 rte_be_to_cpu_32(sctp_spec->hdr.src_port);
3733 if (sctp_spec->hdr.dst_port != 0) {
3734 filter->l4_port_type =
3735 I40E_L4_PORT_TYPE_DST;
3737 rte_be_to_cpu_32(sctp_spec->hdr.dst_port);
3740 filter->tunnel_type = I40E_CLOUD_TYPE_SCTP;
3752 i40e_flow_parse_l4_cloud_filter(struct rte_eth_dev *dev,
3753 const struct rte_flow_attr *attr,
3754 const struct rte_flow_item pattern[],
3755 const struct rte_flow_action actions[],
3756 struct rte_flow_error *error,
3757 union i40e_filter_t *filter)
3759 struct i40e_tunnel_filter_conf *tunnel_filter =
3760 &filter->consistent_tunnel_filter;
3763 ret = i40e_flow_parse_l4_pattern(pattern, error, tunnel_filter);
3767 ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
3771 ret = i40e_flow_parse_attr(attr, error);
3775 cons_filter_type = RTE_ETH_FILTER_TUNNEL;
3780 static uint16_t i40e_supported_tunnel_filter_types[] = {
3781 ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID |
3782 ETH_TUNNEL_FILTER_IVLAN,
3783 ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_IVLAN,
3784 ETH_TUNNEL_FILTER_IMAC | ETH_TUNNEL_FILTER_TENID,
3785 ETH_TUNNEL_FILTER_OMAC | ETH_TUNNEL_FILTER_TENID |
3786 ETH_TUNNEL_FILTER_IMAC,
3787 ETH_TUNNEL_FILTER_IMAC,
3791 i40e_check_tunnel_filter_type(uint8_t filter_type)
3795 for (i = 0; i < RTE_DIM(i40e_supported_tunnel_filter_types); i++) {
3796 if (filter_type == i40e_supported_tunnel_filter_types[i])
3803 /* 1. Last in item should be NULL as range is not supported.
3804 * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
3805 * IMAC_TENID, OMAC_TENID_IMAC and IMAC.
3806 * 3. Mask of fields which need to be matched should be
3808 * 4. Mask of fields which needn't to be matched should be
3812 i40e_flow_parse_vxlan_pattern(__rte_unused struct rte_eth_dev *dev,
3813 const struct rte_flow_item *pattern,
3814 struct rte_flow_error *error,
3815 struct i40e_tunnel_filter_conf *filter)
3817 const struct rte_flow_item *item = pattern;
3818 const struct rte_flow_item_eth *eth_spec;
3819 const struct rte_flow_item_eth *eth_mask;
3820 const struct rte_flow_item_vxlan *vxlan_spec;
3821 const struct rte_flow_item_vxlan *vxlan_mask;
3822 const struct rte_flow_item_vlan *vlan_spec;
3823 const struct rte_flow_item_vlan *vlan_mask;
3824 uint8_t filter_type = 0;
3825 bool is_vni_masked = 0;
3826 uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
3827 enum rte_flow_item_type item_type;
3828 bool vxlan_flag = 0;
3829 uint32_t tenant_id_be = 0;
3832 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3834 rte_flow_error_set(error, EINVAL,
3835 RTE_FLOW_ERROR_TYPE_ITEM,
3837 "Not support range");
3840 item_type = item->type;
3841 switch (item_type) {
3842 case RTE_FLOW_ITEM_TYPE_ETH:
3843 eth_spec = item->spec;
3844 eth_mask = item->mask;
3846 /* Check if ETH item is used for place holder.
3847 * If yes, both spec and mask should be NULL.
3848 * If no, both spec and mask shouldn't be NULL.
3850 if ((!eth_spec && eth_mask) ||
3851 (eth_spec && !eth_mask)) {
3852 rte_flow_error_set(error, EINVAL,
3853 RTE_FLOW_ERROR_TYPE_ITEM,
3855 "Invalid ether spec/mask");
3859 if (eth_spec && eth_mask) {
3860 /* DST address of inner MAC shouldn't be masked.
3861 * SRC address of Inner MAC should be masked.
3863 if (!rte_is_broadcast_ether_addr(ð_mask->dst) ||
3864 !rte_is_zero_ether_addr(ð_mask->src) ||
3866 rte_flow_error_set(error, EINVAL,
3867 RTE_FLOW_ERROR_TYPE_ITEM,
3869 "Invalid ether spec/mask");
3874 rte_memcpy(&filter->outer_mac,
3876 RTE_ETHER_ADDR_LEN);
3877 filter_type |= ETH_TUNNEL_FILTER_OMAC;
3879 rte_memcpy(&filter->inner_mac,
3881 RTE_ETHER_ADDR_LEN);
3882 filter_type |= ETH_TUNNEL_FILTER_IMAC;
3886 case RTE_FLOW_ITEM_TYPE_VLAN:
3887 vlan_spec = item->spec;
3888 vlan_mask = item->mask;
3889 if (!(vlan_spec && vlan_mask) ||
3890 vlan_mask->inner_type) {
3891 rte_flow_error_set(error, EINVAL,
3892 RTE_FLOW_ERROR_TYPE_ITEM,
3894 "Invalid vlan item");
3898 if (vlan_spec && vlan_mask) {
3899 if (vlan_mask->tci ==
3900 rte_cpu_to_be_16(I40E_TCI_MASK))
3901 filter->inner_vlan =
3902 rte_be_to_cpu_16(vlan_spec->tci) &
3904 filter_type |= ETH_TUNNEL_FILTER_IVLAN;
3907 case RTE_FLOW_ITEM_TYPE_IPV4:
3908 filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
3909 /* IPv4 is used to describe protocol,
3910 * spec and mask should be NULL.
3912 if (item->spec || item->mask) {
3913 rte_flow_error_set(error, EINVAL,
3914 RTE_FLOW_ERROR_TYPE_ITEM,
3916 "Invalid IPv4 item");
3920 case RTE_FLOW_ITEM_TYPE_IPV6:
3921 filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
3922 /* IPv6 is used to describe protocol,
3923 * spec and mask should be NULL.
3925 if (item->spec || item->mask) {
3926 rte_flow_error_set(error, EINVAL,
3927 RTE_FLOW_ERROR_TYPE_ITEM,
3929 "Invalid IPv6 item");
3933 case RTE_FLOW_ITEM_TYPE_UDP:
3934 /* UDP is used to describe protocol,
3935 * spec and mask should be NULL.
3937 if (item->spec || item->mask) {
3938 rte_flow_error_set(error, EINVAL,
3939 RTE_FLOW_ERROR_TYPE_ITEM,
3941 "Invalid UDP item");
3945 case RTE_FLOW_ITEM_TYPE_VXLAN:
3946 vxlan_spec = item->spec;
3947 vxlan_mask = item->mask;
3948 /* Check if VXLAN item is used to describe protocol.
3949 * If yes, both spec and mask should be NULL.
3950 * If no, both spec and mask shouldn't be NULL.
3952 if ((!vxlan_spec && vxlan_mask) ||
3953 (vxlan_spec && !vxlan_mask)) {
3954 rte_flow_error_set(error, EINVAL,
3955 RTE_FLOW_ERROR_TYPE_ITEM,
3957 "Invalid VXLAN item");
3961 /* Check if VNI is masked. */
3962 if (vxlan_spec && vxlan_mask) {
3964 !!memcmp(vxlan_mask->vni, vni_mask,
3966 if (is_vni_masked) {
3967 rte_flow_error_set(error, EINVAL,
3968 RTE_FLOW_ERROR_TYPE_ITEM,
3970 "Invalid VNI mask");
3974 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
3975 vxlan_spec->vni, 3);
3977 rte_be_to_cpu_32(tenant_id_be);
3978 filter_type |= ETH_TUNNEL_FILTER_TENID;
3988 ret = i40e_check_tunnel_filter_type(filter_type);
3990 rte_flow_error_set(error, EINVAL,
3991 RTE_FLOW_ERROR_TYPE_ITEM,
3993 "Invalid filter type");
3996 filter->filter_type = filter_type;
3998 filter->tunnel_type = I40E_TUNNEL_TYPE_VXLAN;
4004 i40e_flow_parse_vxlan_filter(struct rte_eth_dev *dev,
4005 const struct rte_flow_attr *attr,
4006 const struct rte_flow_item pattern[],
4007 const struct rte_flow_action actions[],
4008 struct rte_flow_error *error,
4009 union i40e_filter_t *filter)
4011 struct i40e_tunnel_filter_conf *tunnel_filter =
4012 &filter->consistent_tunnel_filter;
4015 ret = i40e_flow_parse_vxlan_pattern(dev, pattern,
4016 error, tunnel_filter);
4020 ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
4024 ret = i40e_flow_parse_attr(attr, error);
4028 cons_filter_type = RTE_ETH_FILTER_TUNNEL;
4033 /* 1. Last in item should be NULL as range is not supported.
4034 * 2. Supported filter types: IMAC_IVLAN_TENID, IMAC_IVLAN,
4035 * IMAC_TENID, OMAC_TENID_IMAC and IMAC.
4036 * 3. Mask of fields which need to be matched should be
4038 * 4. Mask of fields which needn't to be matched should be
4042 i40e_flow_parse_nvgre_pattern(__rte_unused struct rte_eth_dev *dev,
4043 const struct rte_flow_item *pattern,
4044 struct rte_flow_error *error,
4045 struct i40e_tunnel_filter_conf *filter)
4047 const struct rte_flow_item *item = pattern;
4048 const struct rte_flow_item_eth *eth_spec;
4049 const struct rte_flow_item_eth *eth_mask;
4050 const struct rte_flow_item_nvgre *nvgre_spec;
4051 const struct rte_flow_item_nvgre *nvgre_mask;
4052 const struct rte_flow_item_vlan *vlan_spec;
4053 const struct rte_flow_item_vlan *vlan_mask;
4054 enum rte_flow_item_type item_type;
4055 uint8_t filter_type = 0;
4056 bool is_tni_masked = 0;
4057 uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
4058 bool nvgre_flag = 0;
4059 uint32_t tenant_id_be = 0;
4062 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
4064 rte_flow_error_set(error, EINVAL,
4065 RTE_FLOW_ERROR_TYPE_ITEM,
4067 "Not support range");
4070 item_type = item->type;
4071 switch (item_type) {
4072 case RTE_FLOW_ITEM_TYPE_ETH:
4073 eth_spec = item->spec;
4074 eth_mask = item->mask;
4076 /* Check if ETH item is used for place holder.
4077 * If yes, both spec and mask should be NULL.
4078 * If no, both spec and mask shouldn't be NULL.
4080 if ((!eth_spec && eth_mask) ||
4081 (eth_spec && !eth_mask)) {
4082 rte_flow_error_set(error, EINVAL,
4083 RTE_FLOW_ERROR_TYPE_ITEM,
4085 "Invalid ether spec/mask");
4089 if (eth_spec && eth_mask) {
4090 /* DST address of inner MAC shouldn't be masked.
4091 * SRC address of Inner MAC should be masked.
4093 if (!rte_is_broadcast_ether_addr(ð_mask->dst) ||
4094 !rte_is_zero_ether_addr(ð_mask->src) ||
4096 rte_flow_error_set(error, EINVAL,
4097 RTE_FLOW_ERROR_TYPE_ITEM,
4099 "Invalid ether spec/mask");
4104 rte_memcpy(&filter->outer_mac,
4106 RTE_ETHER_ADDR_LEN);
4107 filter_type |= ETH_TUNNEL_FILTER_OMAC;
4109 rte_memcpy(&filter->inner_mac,
4111 RTE_ETHER_ADDR_LEN);
4112 filter_type |= ETH_TUNNEL_FILTER_IMAC;
4117 case RTE_FLOW_ITEM_TYPE_VLAN:
4118 vlan_spec = item->spec;
4119 vlan_mask = item->mask;
4120 if (!(vlan_spec && vlan_mask) ||
4121 vlan_mask->inner_type) {
4122 rte_flow_error_set(error, EINVAL,
4123 RTE_FLOW_ERROR_TYPE_ITEM,
4125 "Invalid vlan item");
4129 if (vlan_spec && vlan_mask) {
4130 if (vlan_mask->tci ==
4131 rte_cpu_to_be_16(I40E_TCI_MASK))
4132 filter->inner_vlan =
4133 rte_be_to_cpu_16(vlan_spec->tci) &
4135 filter_type |= ETH_TUNNEL_FILTER_IVLAN;
4138 case RTE_FLOW_ITEM_TYPE_IPV4:
4139 filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
4140 /* IPv4 is used to describe protocol,
4141 * spec and mask should be NULL.
4143 if (item->spec || item->mask) {
4144 rte_flow_error_set(error, EINVAL,
4145 RTE_FLOW_ERROR_TYPE_ITEM,
4147 "Invalid IPv4 item");
4151 case RTE_FLOW_ITEM_TYPE_IPV6:
4152 filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
4153 /* IPv6 is used to describe protocol,
4154 * spec and mask should be NULL.
4156 if (item->spec || item->mask) {
4157 rte_flow_error_set(error, EINVAL,
4158 RTE_FLOW_ERROR_TYPE_ITEM,
4160 "Invalid IPv6 item");
4164 case RTE_FLOW_ITEM_TYPE_NVGRE:
4165 nvgre_spec = item->spec;
4166 nvgre_mask = item->mask;
4167 /* Check if NVGRE item is used to describe protocol.
4168 * If yes, both spec and mask should be NULL.
4169 * If no, both spec and mask shouldn't be NULL.
4171 if ((!nvgre_spec && nvgre_mask) ||
4172 (nvgre_spec && !nvgre_mask)) {
4173 rte_flow_error_set(error, EINVAL,
4174 RTE_FLOW_ERROR_TYPE_ITEM,
4176 "Invalid NVGRE item");
4180 if (nvgre_spec && nvgre_mask) {
4182 !!memcmp(nvgre_mask->tni, tni_mask,
4184 if (is_tni_masked) {
4185 rte_flow_error_set(error, EINVAL,
4186 RTE_FLOW_ERROR_TYPE_ITEM,
4188 "Invalid TNI mask");
4191 if (nvgre_mask->protocol &&
4192 nvgre_mask->protocol != 0xFFFF) {
4193 rte_flow_error_set(error, EINVAL,
4194 RTE_FLOW_ERROR_TYPE_ITEM,
4196 "Invalid NVGRE item");
4199 if (nvgre_mask->c_k_s_rsvd0_ver &&
4200 nvgre_mask->c_k_s_rsvd0_ver !=
4201 rte_cpu_to_be_16(0xFFFF)) {
4202 rte_flow_error_set(error, EINVAL,
4203 RTE_FLOW_ERROR_TYPE_ITEM,
4205 "Invalid NVGRE item");
4208 if (nvgre_spec->c_k_s_rsvd0_ver !=
4209 rte_cpu_to_be_16(0x2000) &&
4210 nvgre_mask->c_k_s_rsvd0_ver) {
4211 rte_flow_error_set(error, EINVAL,
4212 RTE_FLOW_ERROR_TYPE_ITEM,
4214 "Invalid NVGRE item");
4217 if (nvgre_mask->protocol &&
4218 nvgre_spec->protocol !=
4219 rte_cpu_to_be_16(0x6558)) {
4220 rte_flow_error_set(error, EINVAL,
4221 RTE_FLOW_ERROR_TYPE_ITEM,
4223 "Invalid NVGRE item");
4226 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
4227 nvgre_spec->tni, 3);
4229 rte_be_to_cpu_32(tenant_id_be);
4230 filter_type |= ETH_TUNNEL_FILTER_TENID;
4240 ret = i40e_check_tunnel_filter_type(filter_type);
4242 rte_flow_error_set(error, EINVAL,
4243 RTE_FLOW_ERROR_TYPE_ITEM,
4245 "Invalid filter type");
4248 filter->filter_type = filter_type;
4250 filter->tunnel_type = I40E_TUNNEL_TYPE_NVGRE;
4256 i40e_flow_parse_nvgre_filter(struct rte_eth_dev *dev,
4257 const struct rte_flow_attr *attr,
4258 const struct rte_flow_item pattern[],
4259 const struct rte_flow_action actions[],
4260 struct rte_flow_error *error,
4261 union i40e_filter_t *filter)
4263 struct i40e_tunnel_filter_conf *tunnel_filter =
4264 &filter->consistent_tunnel_filter;
4267 ret = i40e_flow_parse_nvgre_pattern(dev, pattern,
4268 error, tunnel_filter);
4272 ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
4276 ret = i40e_flow_parse_attr(attr, error);
4280 cons_filter_type = RTE_ETH_FILTER_TUNNEL;
4285 /* 1. Last in item should be NULL as range is not supported.
4286 * 2. Supported filter types: MPLS label.
4287 * 3. Mask of fields which need to be matched should be
4289 * 4. Mask of fields which needn't to be matched should be
4293 i40e_flow_parse_mpls_pattern(__rte_unused struct rte_eth_dev *dev,
4294 const struct rte_flow_item *pattern,
4295 struct rte_flow_error *error,
4296 struct i40e_tunnel_filter_conf *filter)
4298 const struct rte_flow_item *item = pattern;
4299 const struct rte_flow_item_mpls *mpls_spec;
4300 const struct rte_flow_item_mpls *mpls_mask;
4301 enum rte_flow_item_type item_type;
4302 bool is_mplsoudp = 0; /* 1 - MPLSoUDP, 0 - MPLSoGRE */
4303 const uint8_t label_mask[3] = {0xFF, 0xFF, 0xF0};
4304 uint32_t label_be = 0;
4306 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
4308 rte_flow_error_set(error, EINVAL,
4309 RTE_FLOW_ERROR_TYPE_ITEM,
4311 "Not support range");
4314 item_type = item->type;
4315 switch (item_type) {
4316 case RTE_FLOW_ITEM_TYPE_ETH:
4317 if (item->spec || item->mask) {
4318 rte_flow_error_set(error, EINVAL,
4319 RTE_FLOW_ERROR_TYPE_ITEM,
4321 "Invalid ETH item");
4325 case RTE_FLOW_ITEM_TYPE_IPV4:
4326 filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
4327 /* IPv4 is used to describe protocol,
4328 * spec and mask should be NULL.
4330 if (item->spec || item->mask) {
4331 rte_flow_error_set(error, EINVAL,
4332 RTE_FLOW_ERROR_TYPE_ITEM,
4334 "Invalid IPv4 item");
4338 case RTE_FLOW_ITEM_TYPE_IPV6:
4339 filter->ip_type = I40E_TUNNEL_IPTYPE_IPV6;
4340 /* IPv6 is used to describe protocol,
4341 * spec and mask should be NULL.
4343 if (item->spec || item->mask) {
4344 rte_flow_error_set(error, EINVAL,
4345 RTE_FLOW_ERROR_TYPE_ITEM,
4347 "Invalid IPv6 item");
4351 case RTE_FLOW_ITEM_TYPE_UDP:
4352 /* UDP is used to describe protocol,
4353 * spec and mask should be NULL.
4355 if (item->spec || item->mask) {
4356 rte_flow_error_set(error, EINVAL,
4357 RTE_FLOW_ERROR_TYPE_ITEM,
4359 "Invalid UDP item");
4364 case RTE_FLOW_ITEM_TYPE_GRE:
4365 /* GRE is used to describe protocol,
4366 * spec and mask should be NULL.
4368 if (item->spec || item->mask) {
4369 rte_flow_error_set(error, EINVAL,
4370 RTE_FLOW_ERROR_TYPE_ITEM,
4372 "Invalid GRE item");
4376 case RTE_FLOW_ITEM_TYPE_MPLS:
4377 mpls_spec = item->spec;
4378 mpls_mask = item->mask;
4380 if (!mpls_spec || !mpls_mask) {
4381 rte_flow_error_set(error, EINVAL,
4382 RTE_FLOW_ERROR_TYPE_ITEM,
4384 "Invalid MPLS item");
4388 if (memcmp(mpls_mask->label_tc_s, label_mask, 3)) {
4389 rte_flow_error_set(error, EINVAL,
4390 RTE_FLOW_ERROR_TYPE_ITEM,
4392 "Invalid MPLS label mask");
4395 rte_memcpy(((uint8_t *)&label_be + 1),
4396 mpls_spec->label_tc_s, 3);
4397 filter->tenant_id = rte_be_to_cpu_32(label_be) >> 4;
4405 filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoUDP;
4407 filter->tunnel_type = I40E_TUNNEL_TYPE_MPLSoGRE;
4413 i40e_flow_parse_mpls_filter(struct rte_eth_dev *dev,
4414 const struct rte_flow_attr *attr,
4415 const struct rte_flow_item pattern[],
4416 const struct rte_flow_action actions[],
4417 struct rte_flow_error *error,
4418 union i40e_filter_t *filter)
4420 struct i40e_tunnel_filter_conf *tunnel_filter =
4421 &filter->consistent_tunnel_filter;
4424 ret = i40e_flow_parse_mpls_pattern(dev, pattern,
4425 error, tunnel_filter);
4429 ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
4433 ret = i40e_flow_parse_attr(attr, error);
4437 cons_filter_type = RTE_ETH_FILTER_TUNNEL;
4442 /* 1. Last in item should be NULL as range is not supported.
4443 * 2. Supported filter types: GTP TEID.
4444 * 3. Mask of fields which need to be matched should be
4446 * 4. Mask of fields which needn't to be matched should be
4448 * 5. GTP profile supports GTPv1 only.
4449 * 6. GTP-C response message ('source_port' = 2123) is not supported.
4452 i40e_flow_parse_gtp_pattern(struct rte_eth_dev *dev,
4453 const struct rte_flow_item *pattern,
4454 struct rte_flow_error *error,
4455 struct i40e_tunnel_filter_conf *filter)
4457 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4458 const struct rte_flow_item *item = pattern;
4459 const struct rte_flow_item_gtp *gtp_spec;
4460 const struct rte_flow_item_gtp *gtp_mask;
4461 enum rte_flow_item_type item_type;
4463 if (!pf->gtp_support) {
4464 rte_flow_error_set(error, EINVAL,
4465 RTE_FLOW_ERROR_TYPE_ITEM,
4467 "GTP is not supported by default.");
4471 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
4473 rte_flow_error_set(error, EINVAL,
4474 RTE_FLOW_ERROR_TYPE_ITEM,
4476 "Not support range");
4479 item_type = item->type;
4480 switch (item_type) {
4481 case RTE_FLOW_ITEM_TYPE_ETH:
4482 if (item->spec || item->mask) {
4483 rte_flow_error_set(error, EINVAL,
4484 RTE_FLOW_ERROR_TYPE_ITEM,
4486 "Invalid ETH item");
4490 case RTE_FLOW_ITEM_TYPE_IPV4:
4491 filter->ip_type = I40E_TUNNEL_IPTYPE_IPV4;
4492 /* IPv4 is used to describe protocol,
4493 * spec and mask should be NULL.
4495 if (item->spec || item->mask) {
4496 rte_flow_error_set(error, EINVAL,
4497 RTE_FLOW_ERROR_TYPE_ITEM,
4499 "Invalid IPv4 item");
4503 case RTE_FLOW_ITEM_TYPE_UDP:
4504 if (item->spec || item->mask) {
4505 rte_flow_error_set(error, EINVAL,
4506 RTE_FLOW_ERROR_TYPE_ITEM,
4508 "Invalid UDP item");
4512 case RTE_FLOW_ITEM_TYPE_GTPC:
4513 case RTE_FLOW_ITEM_TYPE_GTPU:
4514 gtp_spec = item->spec;
4515 gtp_mask = item->mask;
4517 if (!gtp_spec || !gtp_mask) {
4518 rte_flow_error_set(error, EINVAL,
4519 RTE_FLOW_ERROR_TYPE_ITEM,
4521 "Invalid GTP item");
4525 if (gtp_mask->v_pt_rsv_flags ||
4526 gtp_mask->msg_type ||
4527 gtp_mask->msg_len ||
4528 gtp_mask->teid != UINT32_MAX) {
4529 rte_flow_error_set(error, EINVAL,
4530 RTE_FLOW_ERROR_TYPE_ITEM,
4532 "Invalid GTP mask");
4536 if (item_type == RTE_FLOW_ITEM_TYPE_GTPC)
4537 filter->tunnel_type = I40E_TUNNEL_TYPE_GTPC;
4538 else if (item_type == RTE_FLOW_ITEM_TYPE_GTPU)
4539 filter->tunnel_type = I40E_TUNNEL_TYPE_GTPU;
4541 filter->tenant_id = rte_be_to_cpu_32(gtp_spec->teid);
4553 i40e_flow_parse_gtp_filter(struct rte_eth_dev *dev,
4554 const struct rte_flow_attr *attr,
4555 const struct rte_flow_item pattern[],
4556 const struct rte_flow_action actions[],
4557 struct rte_flow_error *error,
4558 union i40e_filter_t *filter)
4560 struct i40e_tunnel_filter_conf *tunnel_filter =
4561 &filter->consistent_tunnel_filter;
4564 ret = i40e_flow_parse_gtp_pattern(dev, pattern,
4565 error, tunnel_filter);
4569 ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
4573 ret = i40e_flow_parse_attr(attr, error);
4577 cons_filter_type = RTE_ETH_FILTER_TUNNEL;
4582 /* 1. Last in item should be NULL as range is not supported.
4583 * 2. Supported filter types: QINQ.
4584 * 3. Mask of fields which need to be matched should be
4586 * 4. Mask of fields which needn't to be matched should be
4590 i40e_flow_parse_qinq_pattern(__rte_unused struct rte_eth_dev *dev,
4591 const struct rte_flow_item *pattern,
4592 struct rte_flow_error *error,
4593 struct i40e_tunnel_filter_conf *filter)
4595 const struct rte_flow_item *item = pattern;
4596 const struct rte_flow_item_vlan *vlan_spec = NULL;
4597 const struct rte_flow_item_vlan *vlan_mask = NULL;
4598 const struct rte_flow_item_vlan *i_vlan_spec = NULL;
4599 const struct rte_flow_item_vlan *i_vlan_mask = NULL;
4600 const struct rte_flow_item_vlan *o_vlan_spec = NULL;
4601 const struct rte_flow_item_vlan *o_vlan_mask = NULL;
4603 enum rte_flow_item_type item_type;
4606 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
4608 rte_flow_error_set(error, EINVAL,
4609 RTE_FLOW_ERROR_TYPE_ITEM,
4611 "Not support range");
4614 item_type = item->type;
4615 switch (item_type) {
4616 case RTE_FLOW_ITEM_TYPE_ETH:
4617 if (item->spec || item->mask) {
4618 rte_flow_error_set(error, EINVAL,
4619 RTE_FLOW_ERROR_TYPE_ITEM,
4621 "Invalid ETH item");
4625 case RTE_FLOW_ITEM_TYPE_VLAN:
4626 vlan_spec = item->spec;
4627 vlan_mask = item->mask;
4629 if (!(vlan_spec && vlan_mask) ||
4630 vlan_mask->inner_type) {
4631 rte_flow_error_set(error, EINVAL,
4632 RTE_FLOW_ERROR_TYPE_ITEM,
4634 "Invalid vlan item");
4639 o_vlan_spec = vlan_spec;
4640 o_vlan_mask = vlan_mask;
4643 i_vlan_spec = vlan_spec;
4644 i_vlan_mask = vlan_mask;
4654 /* Get filter specification */
4655 if ((o_vlan_mask != NULL) && (o_vlan_mask->tci ==
4656 rte_cpu_to_be_16(I40E_TCI_MASK)) &&
4657 (i_vlan_mask != NULL) &&
4658 (i_vlan_mask->tci == rte_cpu_to_be_16(I40E_TCI_MASK))) {
4659 filter->outer_vlan = rte_be_to_cpu_16(o_vlan_spec->tci)
4661 filter->inner_vlan = rte_be_to_cpu_16(i_vlan_spec->tci)
4664 rte_flow_error_set(error, EINVAL,
4665 RTE_FLOW_ERROR_TYPE_ITEM,
4667 "Invalid filter type");
4671 filter->tunnel_type = I40E_TUNNEL_TYPE_QINQ;
4676 i40e_flow_parse_qinq_filter(struct rte_eth_dev *dev,
4677 const struct rte_flow_attr *attr,
4678 const struct rte_flow_item pattern[],
4679 const struct rte_flow_action actions[],
4680 struct rte_flow_error *error,
4681 union i40e_filter_t *filter)
4683 struct i40e_tunnel_filter_conf *tunnel_filter =
4684 &filter->consistent_tunnel_filter;
4687 ret = i40e_flow_parse_qinq_pattern(dev, pattern,
4688 error, tunnel_filter);
4692 ret = i40e_flow_parse_tunnel_action(dev, actions, error, tunnel_filter);
4696 ret = i40e_flow_parse_attr(attr, error);
4700 cons_filter_type = RTE_ETH_FILTER_TUNNEL;
4706 * This function is used to do configuration i40e existing RSS with rte_flow.
4707 * It also enable queue region configuration using flow API for i40e.
4708 * pattern can be used indicate what parameters will be include in flow,
4709 * like user_priority or flowtype for queue region or HASH function for RSS.
4710 * Action is used to transmit parameter like queue index and HASH
4711 * function for RSS, or flowtype for queue region configuration.
4714 * Case 1: try to transform patterns to pctype. valid pctype will be
4715 * used in parse action.
4716 * Case 2: only ETH, indicate flowtype for queue region will be parsed.
4717 * Case 3: only VLAN, indicate user_priority for queue region will be parsed.
4718 * So, pattern choice is depened on the purpose of configuration of
4721 * action RSS will be used to transmit valid parameter with
4722 * struct rte_flow_action_rss for all the 3 case.
4725 i40e_flow_parse_rss_pattern(__rte_unused struct rte_eth_dev *dev,
4726 const struct rte_flow_item *pattern,
4727 struct rte_flow_error *error,
4728 struct i40e_rss_pattern_info *p_info,
4729 struct i40e_queue_regions *info)
4731 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
4732 const struct rte_flow_item *item = pattern;
4733 enum rte_flow_item_type item_type;
4734 struct rte_flow_item *items;
4735 uint32_t item_num = 0; /* non-void item number of pattern*/
4737 static const struct {
4738 enum rte_flow_item_type *item_array;
4740 } i40e_rss_pctype_patterns[] = {
4741 { pattern_fdir_ipv4,
4742 ETH_RSS_FRAG_IPV4 | ETH_RSS_NONFRAG_IPV4_OTHER },
4743 { pattern_fdir_ipv4_tcp, ETH_RSS_NONFRAG_IPV4_TCP },
4744 { pattern_fdir_ipv4_udp, ETH_RSS_NONFRAG_IPV4_UDP },
4745 { pattern_fdir_ipv4_sctp, ETH_RSS_NONFRAG_IPV4_SCTP },
4746 { pattern_fdir_ipv4_esp, ETH_RSS_ESP },
4747 { pattern_fdir_ipv4_udp_esp, ETH_RSS_ESP },
4748 { pattern_fdir_ipv6,
4749 ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_OTHER },
4750 { pattern_fdir_ipv6_tcp, ETH_RSS_NONFRAG_IPV6_TCP },
4751 { pattern_fdir_ipv6_udp, ETH_RSS_NONFRAG_IPV6_UDP },
4752 { pattern_fdir_ipv6_sctp, ETH_RSS_NONFRAG_IPV6_SCTP },
4753 { pattern_ethertype, ETH_RSS_L2_PAYLOAD },
4754 { pattern_fdir_ipv6_esp, ETH_RSS_ESP },
4755 { pattern_fdir_ipv6_udp_esp, ETH_RSS_ESP },
4758 p_info->types = I40E_RSS_TYPE_INVALID;
4760 if (item->type == RTE_FLOW_ITEM_TYPE_END) {
4761 p_info->types = I40E_RSS_TYPE_NONE;
4765 /* Convert pattern to RSS offload types */
4766 while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
4767 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
4773 items = rte_zmalloc("i40e_pattern",
4774 item_num * sizeof(struct rte_flow_item), 0);
4776 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
4777 NULL, "No memory for PMD internal items.");
4781 i40e_pattern_skip_void_item(items, pattern);
4783 for (i = 0; i < RTE_DIM(i40e_rss_pctype_patterns); i++) {
4784 if (i40e_match_pattern(i40e_rss_pctype_patterns[i].item_array,
4786 p_info->types = i40e_rss_pctype_patterns[i].type;
4793 for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
4795 rte_flow_error_set(error, EINVAL,
4796 RTE_FLOW_ERROR_TYPE_ITEM,
4798 "Not support range");
4801 item_type = item->type;
4802 switch (item_type) {
4803 case RTE_FLOW_ITEM_TYPE_ETH:
4804 p_info->action_flag = 1;
4806 case RTE_FLOW_ITEM_TYPE_VLAN:
4807 vlan_spec = item->spec;
4808 vlan_mask = item->mask;
4809 if (vlan_spec && vlan_mask) {
4810 if (vlan_mask->tci ==
4811 rte_cpu_to_be_16(I40E_TCI_MASK)) {
4812 info->region[0].user_priority[0] =
4814 vlan_spec->tci) >> 13) & 0x7;
4815 info->region[0].user_priority_num = 1;
4816 info->queue_region_number = 1;
4817 p_info->action_flag = 0;
4822 p_info->action_flag = 0;
4823 memset(info, 0, sizeof(struct i40e_queue_regions));
4832 * This function is used to parse RSS queue index, total queue number and
4833 * hash functions, If the purpose of this configuration is for queue region
4834 * configuration, it will set queue_region_conf flag to TRUE, else to FALSE.
4835 * In queue region configuration, it also need to parse hardware flowtype
4836 * and user_priority from configuration, it will also cheeck the validity
4837 * of these parameters. For example, The queue region sizes should
4838 * be any of the following values: 1, 2, 4, 8, 16, 32, 64, the
4839 * hw_flowtype or PCTYPE max index should be 63, the user priority
4840 * max index should be 7, and so on. And also, queue index should be
4841 * continuous sequence and queue region index should be part of RSS
4842 * queue index for this port.
4843 * For hash params, the pctype in action and pattern must be same.
4844 * Set queue index must be with non-types.
4847 i40e_flow_parse_rss_action(struct rte_eth_dev *dev,
4848 const struct rte_flow_action *actions,
4849 struct rte_flow_error *error,
4850 struct i40e_rss_pattern_info p_info,
4851 struct i40e_queue_regions *conf_info,
4852 union i40e_filter_t *filter)
4854 const struct rte_flow_action *act;
4855 const struct rte_flow_action_rss *rss;
4856 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
4857 struct i40e_queue_regions *info = &pf->queue_region;
4858 struct i40e_rte_flow_rss_conf *rss_config =
4860 struct i40e_rte_flow_rss_conf *rss_info = &pf->rss_info;
4861 uint16_t i, j, n, tmp, nb_types;
4863 uint64_t hf_bit = 1;
4865 static const struct {
4867 enum i40e_filter_pctype pctype;
4868 } pctype_match_table[] = {
4870 I40E_FILTER_PCTYPE_FRAG_IPV4},
4871 {ETH_RSS_NONFRAG_IPV4_TCP,
4872 I40E_FILTER_PCTYPE_NONF_IPV4_TCP},
4873 {ETH_RSS_NONFRAG_IPV4_UDP,
4874 I40E_FILTER_PCTYPE_NONF_IPV4_UDP},
4875 {ETH_RSS_NONFRAG_IPV4_SCTP,
4876 I40E_FILTER_PCTYPE_NONF_IPV4_SCTP},
4877 {ETH_RSS_NONFRAG_IPV4_OTHER,
4878 I40E_FILTER_PCTYPE_NONF_IPV4_OTHER},
4880 I40E_FILTER_PCTYPE_FRAG_IPV6},
4881 {ETH_RSS_NONFRAG_IPV6_TCP,
4882 I40E_FILTER_PCTYPE_NONF_IPV6_TCP},
4883 {ETH_RSS_NONFRAG_IPV6_UDP,
4884 I40E_FILTER_PCTYPE_NONF_IPV6_UDP},
4885 {ETH_RSS_NONFRAG_IPV6_SCTP,
4886 I40E_FILTER_PCTYPE_NONF_IPV6_SCTP},
4887 {ETH_RSS_NONFRAG_IPV6_OTHER,
4888 I40E_FILTER_PCTYPE_NONF_IPV6_OTHER},
4889 {ETH_RSS_L2_PAYLOAD,
4890 I40E_FILTER_PCTYPE_L2_PAYLOAD},
4893 NEXT_ITEM_OF_ACTION(act, actions, index);
4897 * RSS only supports forwarding,
4898 * check if the first not void action is RSS.
4900 if (act->type != RTE_FLOW_ACTION_TYPE_RSS) {
4901 memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf));
4902 rte_flow_error_set(error, EINVAL,
4903 RTE_FLOW_ERROR_TYPE_ACTION,
4904 act, "Not supported action.");
4908 if (p_info.action_flag && rss->queue_num) {
4909 for (j = 0; j < RTE_DIM(pctype_match_table); j++) {
4910 if (rss->types & pctype_match_table[j].rss_type) {
4911 conf_info->region[0].hw_flowtype[0] =
4912 (uint8_t)pctype_match_table[j].pctype;
4913 conf_info->region[0].flowtype_num = 1;
4914 conf_info->queue_region_number = 1;
4921 * Do some queue region related parameters check
4922 * in order to keep queue index for queue region to be
4923 * continuous sequence and also to be part of RSS
4924 * queue index for this port.
4926 if (conf_info->queue_region_number) {
4927 for (i = 0; i < rss->queue_num; i++) {
4928 for (j = 0; j < rss_info->conf.queue_num; j++) {
4929 if (rss->queue[i] == rss_info->conf.queue[j])
4932 if (j == rss_info->conf.queue_num) {
4933 rte_flow_error_set(error, EINVAL,
4934 RTE_FLOW_ERROR_TYPE_ACTION,
4941 for (i = 0; i < rss->queue_num - 1; i++) {
4942 if (rss->queue[i + 1] != rss->queue[i] + 1) {
4943 rte_flow_error_set(error, EINVAL,
4944 RTE_FLOW_ERROR_TYPE_ACTION,
4952 /* Parse queue region related parameters from configuration */
4953 for (n = 0; n < conf_info->queue_region_number; n++) {
4954 if (conf_info->region[n].user_priority_num ||
4955 conf_info->region[n].flowtype_num) {
4956 if (!((rte_is_power_of_2(rss->queue_num)) &&
4957 rss->queue_num <= 64)) {
4958 rte_flow_error_set(error, EINVAL,
4959 RTE_FLOW_ERROR_TYPE_ACTION,
4961 "The region sizes should be any of the following values: 1, 2, 4, 8, 16, 32, 64 as long as the "
4962 "total number of queues do not exceed the VSI allocation");
4966 if (conf_info->region[n].user_priority[n] >=
4967 I40E_MAX_USER_PRIORITY) {
4968 rte_flow_error_set(error, EINVAL,
4969 RTE_FLOW_ERROR_TYPE_ACTION,
4971 "the user priority max index is 7");
4975 if (conf_info->region[n].hw_flowtype[n] >=
4976 I40E_FILTER_PCTYPE_MAX) {
4977 rte_flow_error_set(error, EINVAL,
4978 RTE_FLOW_ERROR_TYPE_ACTION,
4980 "the hw_flowtype or PCTYPE max index is 63");
4984 for (i = 0; i < info->queue_region_number; i++) {
4985 if (info->region[i].queue_num ==
4987 info->region[i].queue_start_index ==
4992 if (i == info->queue_region_number) {
4993 if (i > I40E_REGION_MAX_INDEX) {
4994 rte_flow_error_set(error, EINVAL,
4995 RTE_FLOW_ERROR_TYPE_ACTION,
4997 "the queue region max index is 7");
5001 info->region[i].queue_num =
5003 info->region[i].queue_start_index =
5005 info->region[i].region_id =
5006 info->queue_region_number;
5008 j = info->region[i].user_priority_num;
5009 tmp = conf_info->region[n].user_priority[0];
5010 if (conf_info->region[n].user_priority_num) {
5011 info->region[i].user_priority[j] = tmp;
5012 info->region[i].user_priority_num++;
5015 j = info->region[i].flowtype_num;
5016 tmp = conf_info->region[n].hw_flowtype[0];
5017 if (conf_info->region[n].flowtype_num) {
5018 info->region[i].hw_flowtype[j] = tmp;
5019 info->region[i].flowtype_num++;
5021 info->queue_region_number++;
5023 j = info->region[i].user_priority_num;
5024 tmp = conf_info->region[n].user_priority[0];
5025 if (conf_info->region[n].user_priority_num) {
5026 info->region[i].user_priority[j] = tmp;
5027 info->region[i].user_priority_num++;
5030 j = info->region[i].flowtype_num;
5031 tmp = conf_info->region[n].hw_flowtype[0];
5032 if (conf_info->region[n].flowtype_num) {
5033 info->region[i].hw_flowtype[j] = tmp;
5034 info->region[i].flowtype_num++;
5039 rss_config->queue_region_conf = TRUE;
5043 * Return function if this flow is used for queue region configuration
5045 if (rss_config->queue_region_conf)
5049 rte_flow_error_set(error, EINVAL,
5050 RTE_FLOW_ERROR_TYPE_ACTION,
5056 for (n = 0; n < rss->queue_num; n++) {
5057 if (rss->queue[n] >= dev->data->nb_rx_queues) {
5058 rte_flow_error_set(error, EINVAL,
5059 RTE_FLOW_ERROR_TYPE_ACTION,
5061 "queue id > max number of queues");
5066 if (rss->queue_num && (p_info.types || rss->types))
5067 return rte_flow_error_set
5068 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
5069 "RSS types must be empty while configuring queue region");
5071 /* validate pattern and pctype */
5072 if (!(rss->types & p_info.types) &&
5073 (rss->types || p_info.types) && !rss->queue_num)
5074 return rte_flow_error_set
5075 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
5076 act, "invalid pctype");
5079 for (n = 0; n < RTE_ETH_FLOW_MAX; n++) {
5080 if (rss->types & (hf_bit << n))
5083 return rte_flow_error_set
5084 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
5085 act, "multi pctype is not supported");
5088 if (rss->func == RTE_ETH_HASH_FUNCTION_SIMPLE_XOR &&
5089 (p_info.types || rss->types || rss->queue_num))
5090 return rte_flow_error_set
5091 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
5092 "pattern, type and queues must be empty while"
5093 " setting hash function as simple_xor");
5095 if (rss->func == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ &&
5096 !(p_info.types && rss->types))
5097 return rte_flow_error_set
5098 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
5099 "pctype and queues can not be empty while"
5100 " setting hash function as symmetric toeplitz");
5102 /* Parse RSS related parameters from configuration */
5103 if (rss->func >= RTE_ETH_HASH_FUNCTION_MAX ||
5104 rss->func == RTE_ETH_HASH_FUNCTION_TOEPLITZ)
5105 return rte_flow_error_set
5106 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
5107 "RSS hash functions are not supported");
5109 return rte_flow_error_set
5110 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
5111 "a nonzero RSS encapsulation level is not supported");
5112 if (rss->key_len && rss->key_len > RTE_DIM(rss_config->key))
5113 return rte_flow_error_set
5114 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
5115 "RSS hash key too large");
5116 if (rss->queue_num > RTE_DIM(rss_config->queue))
5117 return rte_flow_error_set
5118 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION, act,
5119 "too many queues for RSS context");
5120 if (i40e_rss_conf_init(rss_config, rss))
5121 return rte_flow_error_set
5122 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION, act,
5123 "RSS context initialization failure");
5127 /* check if the next not void action is END */
5128 NEXT_ITEM_OF_ACTION(act, actions, index);
5129 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
5130 memset(rss_config, 0, sizeof(struct i40e_rte_flow_rss_conf));
5131 rte_flow_error_set(error, EINVAL,
5132 RTE_FLOW_ERROR_TYPE_ACTION,
5133 act, "Not supported action.");
5136 rss_config->queue_region_conf = FALSE;
5142 i40e_parse_rss_filter(struct rte_eth_dev *dev,
5143 const struct rte_flow_attr *attr,
5144 const struct rte_flow_item pattern[],
5145 const struct rte_flow_action actions[],
5146 union i40e_filter_t *filter,
5147 struct rte_flow_error *error)
5149 struct i40e_rss_pattern_info p_info;
5150 struct i40e_queue_regions info;
5153 memset(&info, 0, sizeof(struct i40e_queue_regions));
5154 memset(&p_info, 0, sizeof(struct i40e_rss_pattern_info));
5156 ret = i40e_flow_parse_rss_pattern(dev, pattern,
5157 error, &p_info, &info);
5161 ret = i40e_flow_parse_rss_action(dev, actions, error,
5162 p_info, &info, filter);
5166 ret = i40e_flow_parse_attr(attr, error);
5170 cons_filter_type = RTE_ETH_FILTER_HASH;
5176 i40e_config_rss_filter_set(struct rte_eth_dev *dev,
5177 struct i40e_rte_flow_rss_conf *conf)
5179 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5180 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5181 struct i40e_rss_filter *rss_filter;
5184 if (conf->queue_region_conf) {
5185 ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 1);
5187 ret = i40e_config_rss_filter(pf, conf, 1);
5193 rss_filter = rte_zmalloc("i40e_rss_filter",
5194 sizeof(*rss_filter), 0);
5195 if (rss_filter == NULL) {
5196 PMD_DRV_LOG(ERR, "Failed to alloc memory.");
5199 rss_filter->rss_filter_info = *conf;
5200 /* the rule new created is always valid
5201 * the existing rule covered by new rule will be set invalid
5203 rss_filter->rss_filter_info.valid = true;
5205 TAILQ_INSERT_TAIL(&pf->rss_config_list, rss_filter, next);
5211 i40e_config_rss_filter_del(struct rte_eth_dev *dev,
5212 struct i40e_rte_flow_rss_conf *conf)
5214 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5215 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5216 struct i40e_rss_filter *rss_filter;
5219 if (conf->queue_region_conf)
5220 i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
5222 i40e_config_rss_filter(pf, conf, 0);
5224 TAILQ_FOREACH_SAFE(rss_filter, &pf->rss_config_list, next, temp) {
5225 if (!memcmp(&rss_filter->rss_filter_info, conf,
5226 sizeof(struct rte_flow_action_rss))) {
5227 TAILQ_REMOVE(&pf->rss_config_list, rss_filter, next);
5228 rte_free(rss_filter);
5235 i40e_flow_validate(struct rte_eth_dev *dev,
5236 const struct rte_flow_attr *attr,
5237 const struct rte_flow_item pattern[],
5238 const struct rte_flow_action actions[],
5239 struct rte_flow_error *error)
5241 struct rte_flow_item *items; /* internal pattern w/o VOID items */
5242 parse_filter_t parse_filter;
5243 uint32_t item_num = 0; /* non-void item number of pattern*/
5246 int ret = I40E_NOT_SUPPORTED;
5249 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
5250 NULL, "NULL pattern.");
5255 rte_flow_error_set(error, EINVAL,
5256 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
5257 NULL, "NULL action.");
5262 rte_flow_error_set(error, EINVAL,
5263 RTE_FLOW_ERROR_TYPE_ATTR,
5264 NULL, "NULL attribute.");
5268 memset(&cons_filter, 0, sizeof(cons_filter));
5270 /* Get the non-void item of action */
5271 while ((actions + i)->type == RTE_FLOW_ACTION_TYPE_VOID)
5274 if ((actions + i)->type == RTE_FLOW_ACTION_TYPE_RSS) {
5275 ret = i40e_parse_rss_filter(dev, attr, pattern,
5276 actions, &cons_filter, error);
5281 /* Get the non-void item number of pattern */
5282 while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
5283 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
5289 items = rte_zmalloc("i40e_pattern",
5290 item_num * sizeof(struct rte_flow_item), 0);
5292 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
5293 NULL, "No memory for PMD internal items.");
5297 i40e_pattern_skip_void_item(items, pattern);
5301 parse_filter = i40e_find_parse_filter_func(items, &i);
5302 if (!parse_filter && !flag) {
5303 rte_flow_error_set(error, EINVAL,
5304 RTE_FLOW_ERROR_TYPE_ITEM,
5305 pattern, "Unsupported pattern");
5310 ret = parse_filter(dev, attr, items, actions,
5311 error, &cons_filter);
5313 } while ((ret < 0) && (i < RTE_DIM(i40e_supported_patterns)));
5320 static struct rte_flow *
5321 i40e_flow_create(struct rte_eth_dev *dev,
5322 const struct rte_flow_attr *attr,
5323 const struct rte_flow_item pattern[],
5324 const struct rte_flow_action actions[],
5325 struct rte_flow_error *error)
5327 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5328 struct rte_flow *flow;
5331 flow = rte_zmalloc("i40e_flow", sizeof(struct rte_flow), 0);
5333 rte_flow_error_set(error, ENOMEM,
5334 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5335 "Failed to allocate memory");
5339 ret = i40e_flow_validate(dev, attr, pattern, actions, error);
5343 switch (cons_filter_type) {
5344 case RTE_ETH_FILTER_ETHERTYPE:
5345 ret = i40e_ethertype_filter_set(pf,
5346 &cons_filter.ethertype_filter, 1);
5349 flow->rule = TAILQ_LAST(&pf->ethertype.ethertype_list,
5350 i40e_ethertype_filter_list);
5352 case RTE_ETH_FILTER_FDIR:
5353 ret = i40e_flow_add_del_fdir_filter(dev,
5354 &cons_filter.fdir_filter, 1);
5357 flow->rule = TAILQ_LAST(&pf->fdir.fdir_list,
5358 i40e_fdir_filter_list);
5360 case RTE_ETH_FILTER_TUNNEL:
5361 ret = i40e_dev_consistent_tunnel_filter_set(pf,
5362 &cons_filter.consistent_tunnel_filter, 1);
5365 flow->rule = TAILQ_LAST(&pf->tunnel.tunnel_list,
5366 i40e_tunnel_filter_list);
5368 case RTE_ETH_FILTER_HASH:
5369 ret = i40e_config_rss_filter_set(dev,
5370 &cons_filter.rss_conf);
5373 flow->rule = TAILQ_LAST(&pf->rss_config_list,
5374 i40e_rss_conf_list);
5380 flow->filter_type = cons_filter_type;
5381 TAILQ_INSERT_TAIL(&pf->flow_list, flow, node);
5385 rte_flow_error_set(error, -ret,
5386 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5387 "Failed to create flow.");
5393 i40e_flow_destroy(struct rte_eth_dev *dev,
5394 struct rte_flow *flow,
5395 struct rte_flow_error *error)
5397 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5398 enum rte_filter_type filter_type = flow->filter_type;
5401 switch (filter_type) {
5402 case RTE_ETH_FILTER_ETHERTYPE:
5403 ret = i40e_flow_destroy_ethertype_filter(pf,
5404 (struct i40e_ethertype_filter *)flow->rule);
5406 case RTE_ETH_FILTER_TUNNEL:
5407 ret = i40e_flow_destroy_tunnel_filter(pf,
5408 (struct i40e_tunnel_filter *)flow->rule);
5410 case RTE_ETH_FILTER_FDIR:
5411 ret = i40e_flow_add_del_fdir_filter(dev,
5412 &((struct i40e_fdir_filter *)flow->rule)->fdir, 0);
5414 /* If the last flow is destroyed, disable fdir. */
5415 if (!ret && TAILQ_EMPTY(&pf->fdir.fdir_list)) {
5416 i40e_fdir_rx_proc_enable(dev, 0);
5419 case RTE_ETH_FILTER_HASH:
5420 ret = i40e_config_rss_filter_del(dev,
5421 &((struct i40e_rss_filter *)flow->rule)->rss_filter_info);
5424 PMD_DRV_LOG(WARNING, "Filter type (%d) not supported",
5431 TAILQ_REMOVE(&pf->flow_list, flow, node);
5434 rte_flow_error_set(error, -ret,
5435 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5436 "Failed to destroy flow.");
5442 i40e_flow_destroy_ethertype_filter(struct i40e_pf *pf,
5443 struct i40e_ethertype_filter *filter)
5445 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5446 struct i40e_ethertype_rule *ethertype_rule = &pf->ethertype;
5447 struct i40e_ethertype_filter *node;
5448 struct i40e_control_filter_stats stats;
5452 if (!(filter->flags & RTE_ETHTYPE_FLAGS_MAC))
5453 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_IGNORE_MAC;
5454 if (filter->flags & RTE_ETHTYPE_FLAGS_DROP)
5455 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_DROP;
5456 flags |= I40E_AQC_ADD_CONTROL_PACKET_FLAGS_TO_QUEUE;
5458 memset(&stats, 0, sizeof(stats));
5459 ret = i40e_aq_add_rem_control_packet_filter(hw,
5460 filter->input.mac_addr.addr_bytes,
5461 filter->input.ether_type,
5462 flags, pf->main_vsi->seid,
5463 filter->queue, 0, &stats, NULL);
5467 node = i40e_sw_ethertype_filter_lookup(ethertype_rule, &filter->input);
5471 ret = i40e_sw_ethertype_filter_del(pf, &node->input);
5477 i40e_flow_destroy_tunnel_filter(struct i40e_pf *pf,
5478 struct i40e_tunnel_filter *filter)
5480 struct i40e_hw *hw = I40E_PF_TO_HW(pf);
5481 struct i40e_vsi *vsi;
5482 struct i40e_pf_vf *vf;
5483 struct i40e_aqc_cloud_filters_element_bb cld_filter;
5484 struct i40e_tunnel_rule *tunnel_rule = &pf->tunnel;
5485 struct i40e_tunnel_filter *node;
5486 bool big_buffer = 0;
5489 memset(&cld_filter, 0, sizeof(cld_filter));
5490 rte_ether_addr_copy((struct rte_ether_addr *)&filter->input.outer_mac,
5491 (struct rte_ether_addr *)&cld_filter.element.outer_mac);
5492 rte_ether_addr_copy((struct rte_ether_addr *)&filter->input.inner_mac,
5493 (struct rte_ether_addr *)&cld_filter.element.inner_mac);
5494 cld_filter.element.inner_vlan = filter->input.inner_vlan;
5495 cld_filter.element.flags = filter->input.flags;
5496 cld_filter.element.tenant_id = filter->input.tenant_id;
5497 cld_filter.element.queue_number = filter->queue;
5498 rte_memcpy(cld_filter.general_fields,
5499 filter->input.general_fields,
5500 sizeof(cld_filter.general_fields));
5502 if (!filter->is_to_vf)
5505 vf = &pf->vfs[filter->vf_id];
5509 if (((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X11) ==
5510 I40E_AQC_ADD_CLOUD_FILTER_0X11) ||
5511 ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X12) ==
5512 I40E_AQC_ADD_CLOUD_FILTER_0X12) ||
5513 ((filter->input.flags & I40E_AQC_ADD_CLOUD_FILTER_0X10) ==
5514 I40E_AQC_ADD_CLOUD_FILTER_0X10))
5518 ret = i40e_aq_rem_cloud_filters_bb(hw, vsi->seid,
5521 ret = i40e_aq_rem_cloud_filters(hw, vsi->seid,
5522 &cld_filter.element, 1);
5526 node = i40e_sw_tunnel_filter_lookup(tunnel_rule, &filter->input);
5530 ret = i40e_sw_tunnel_filter_del(pf, &node->input);
5536 i40e_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
5538 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5541 ret = i40e_flow_flush_fdir_filter(pf);
5543 rte_flow_error_set(error, -ret,
5544 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5545 "Failed to flush FDIR flows.");
5549 ret = i40e_flow_flush_ethertype_filter(pf);
5551 rte_flow_error_set(error, -ret,
5552 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5553 "Failed to ethertype flush flows.");
5557 ret = i40e_flow_flush_tunnel_filter(pf);
5559 rte_flow_error_set(error, -ret,
5560 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5561 "Failed to flush tunnel flows.");
5565 ret = i40e_flow_flush_rss_filter(dev);
5567 rte_flow_error_set(error, -ret,
5568 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
5569 "Failed to flush RSS flows.");
5577 i40e_flow_flush_fdir_filter(struct i40e_pf *pf)
5579 struct rte_eth_dev *dev = pf->adapter->eth_dev;
5580 struct i40e_fdir_info *fdir_info = &pf->fdir;
5581 struct i40e_fdir_filter *fdir_filter;
5582 enum i40e_filter_pctype pctype;
5583 struct rte_flow *flow;
5587 ret = i40e_fdir_flush(dev);
5589 /* Delete FDIR filters in FDIR list. */
5590 while ((fdir_filter = TAILQ_FIRST(&fdir_info->fdir_list))) {
5591 ret = i40e_sw_fdir_filter_del(pf,
5592 &fdir_filter->fdir.input);
5597 /* Delete FDIR flows in flow list. */
5598 TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
5599 if (flow->filter_type == RTE_ETH_FILTER_FDIR) {
5600 TAILQ_REMOVE(&pf->flow_list, flow, node);
5605 fdir_info->fdir_actual_cnt = 0;
5606 fdir_info->fdir_guarantee_free_space =
5607 fdir_info->fdir_guarantee_total_space;
5609 for (pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
5610 pctype <= I40E_FILTER_PCTYPE_L2_PAYLOAD; pctype++)
5611 pf->fdir.inset_flag[pctype] = 0;
5613 /* Disable FDIR processing as all FDIR rules are now flushed */
5614 i40e_fdir_rx_proc_enable(dev, 0);
5620 /* Flush all ethertype filters */
5622 i40e_flow_flush_ethertype_filter(struct i40e_pf *pf)
5624 struct i40e_ethertype_filter_list
5625 *ethertype_list = &pf->ethertype.ethertype_list;
5626 struct i40e_ethertype_filter *filter;
5627 struct rte_flow *flow;
5631 while ((filter = TAILQ_FIRST(ethertype_list))) {
5632 ret = i40e_flow_destroy_ethertype_filter(pf, filter);
5637 /* Delete ethertype flows in flow list. */
5638 TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
5639 if (flow->filter_type == RTE_ETH_FILTER_ETHERTYPE) {
5640 TAILQ_REMOVE(&pf->flow_list, flow, node);
5648 /* Flush all tunnel filters */
5650 i40e_flow_flush_tunnel_filter(struct i40e_pf *pf)
5652 struct i40e_tunnel_filter_list
5653 *tunnel_list = &pf->tunnel.tunnel_list;
5654 struct i40e_tunnel_filter *filter;
5655 struct rte_flow *flow;
5659 while ((filter = TAILQ_FIRST(tunnel_list))) {
5660 ret = i40e_flow_destroy_tunnel_filter(pf, filter);
5665 /* Delete tunnel flows in flow list. */
5666 TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
5667 if (flow->filter_type == RTE_ETH_FILTER_TUNNEL) {
5668 TAILQ_REMOVE(&pf->flow_list, flow, node);
5676 /* remove the RSS filter */
5678 i40e_flow_flush_rss_filter(struct rte_eth_dev *dev)
5680 struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private);
5681 struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private);
5682 struct rte_flow *flow;
5684 int32_t ret = -EINVAL;
5686 ret = i40e_flush_queue_region_all_conf(dev, hw, pf, 0);
5688 /* Delete RSS flows in flow list. */
5689 TAILQ_FOREACH_SAFE(flow, &pf->flow_list, node, temp) {
5690 if (flow->filter_type != RTE_ETH_FILTER_HASH)
5694 ret = i40e_config_rss_filter_del(dev,
5695 &((struct i40e_rss_filter *)flow->rule)->rss_filter_info);
5699 TAILQ_REMOVE(&pf->flow_list, flow, node);
5707 i40e_flow_query(struct rte_eth_dev *dev __rte_unused,
5708 struct rte_flow *flow,
5709 const struct rte_flow_action *actions,
5710 void *data, struct rte_flow_error *error)
5712 struct i40e_rss_filter *rss_rule = (struct i40e_rss_filter *)flow->rule;
5713 enum rte_filter_type filter_type = flow->filter_type;
5714 struct rte_flow_action_rss *rss_conf = data;
5717 rte_flow_error_set(error, EINVAL,
5718 RTE_FLOW_ERROR_TYPE_HANDLE,
5719 NULL, "Invalid rule");
5723 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
5724 switch (actions->type) {
5725 case RTE_FLOW_ACTION_TYPE_VOID:
5727 case RTE_FLOW_ACTION_TYPE_RSS:
5728 if (filter_type != RTE_ETH_FILTER_HASH) {
5729 rte_flow_error_set(error, ENOTSUP,
5730 RTE_FLOW_ERROR_TYPE_ACTION,
5732 "action not supported");
5735 rte_memcpy(rss_conf,
5736 &rss_rule->rss_filter_info.conf,
5737 sizeof(struct rte_flow_action_rss));
5740 return rte_flow_error_set(error, ENOTSUP,
5741 RTE_FLOW_ERROR_TYPE_ACTION,
5743 "action not supported");