1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
15 #include <rte_malloc.h>
16 #include <rte_tailq.h>
19 #include "iavf_generic_flow.h"
21 static struct iavf_engine_list engine_list =
22 TAILQ_HEAD_INITIALIZER(engine_list);
24 static int iavf_flow_validate(struct rte_eth_dev *dev,
25 const struct rte_flow_attr *attr,
26 const struct rte_flow_item pattern[],
27 const struct rte_flow_action actions[],
28 struct rte_flow_error *error);
29 static struct rte_flow *iavf_flow_create(struct rte_eth_dev *dev,
30 const struct rte_flow_attr *attr,
31 const struct rte_flow_item pattern[],
32 const struct rte_flow_action actions[],
33 struct rte_flow_error *error);
34 static int iavf_flow_destroy(struct rte_eth_dev *dev,
35 struct rte_flow *flow,
36 struct rte_flow_error *error);
37 static int iavf_flow_query(struct rte_eth_dev *dev,
38 struct rte_flow *flow,
39 const struct rte_flow_action *actions,
41 struct rte_flow_error *error);
43 const struct rte_flow_ops iavf_flow_ops = {
44 .validate = iavf_flow_validate,
45 .create = iavf_flow_create,
46 .destroy = iavf_flow_destroy,
47 .flush = iavf_flow_flush,
48 .query = iavf_flow_query,
52 enum rte_flow_item_type iavf_pattern_empty[] = {
53 RTE_FLOW_ITEM_TYPE_END,
57 enum rte_flow_item_type iavf_pattern_ethertype[] = {
58 RTE_FLOW_ITEM_TYPE_ETH,
59 RTE_FLOW_ITEM_TYPE_END,
62 enum rte_flow_item_type iavf_pattern_ethertype_vlan[] = {
63 RTE_FLOW_ITEM_TYPE_ETH,
64 RTE_FLOW_ITEM_TYPE_VLAN,
65 RTE_FLOW_ITEM_TYPE_END,
68 enum rte_flow_item_type iavf_pattern_ethertype_qinq[] = {
69 RTE_FLOW_ITEM_TYPE_ETH,
70 RTE_FLOW_ITEM_TYPE_VLAN,
71 RTE_FLOW_ITEM_TYPE_VLAN,
72 RTE_FLOW_ITEM_TYPE_END,
76 enum rte_flow_item_type iavf_pattern_eth_arp[] = {
77 RTE_FLOW_ITEM_TYPE_ETH,
78 RTE_FLOW_ITEM_TYPE_ARP_ETH_IPV4,
79 RTE_FLOW_ITEM_TYPE_END,
83 enum rte_flow_item_type iavf_pattern_eth_ipv4[] = {
84 RTE_FLOW_ITEM_TYPE_ETH,
85 RTE_FLOW_ITEM_TYPE_IPV4,
86 RTE_FLOW_ITEM_TYPE_END,
89 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4[] = {
90 RTE_FLOW_ITEM_TYPE_ETH,
91 RTE_FLOW_ITEM_TYPE_VLAN,
92 RTE_FLOW_ITEM_TYPE_IPV4,
93 RTE_FLOW_ITEM_TYPE_END,
96 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4[] = {
97 RTE_FLOW_ITEM_TYPE_ETH,
98 RTE_FLOW_ITEM_TYPE_VLAN,
99 RTE_FLOW_ITEM_TYPE_VLAN,
100 RTE_FLOW_ITEM_TYPE_IPV4,
101 RTE_FLOW_ITEM_TYPE_END,
104 enum rte_flow_item_type iavf_pattern_eth_ipv4_udp[] = {
105 RTE_FLOW_ITEM_TYPE_ETH,
106 RTE_FLOW_ITEM_TYPE_IPV4,
107 RTE_FLOW_ITEM_TYPE_UDP,
108 RTE_FLOW_ITEM_TYPE_END,
111 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_udp[] = {
112 RTE_FLOW_ITEM_TYPE_ETH,
113 RTE_FLOW_ITEM_TYPE_VLAN,
114 RTE_FLOW_ITEM_TYPE_IPV4,
115 RTE_FLOW_ITEM_TYPE_UDP,
116 RTE_FLOW_ITEM_TYPE_END,
119 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_udp[] = {
120 RTE_FLOW_ITEM_TYPE_ETH,
121 RTE_FLOW_ITEM_TYPE_VLAN,
122 RTE_FLOW_ITEM_TYPE_VLAN,
123 RTE_FLOW_ITEM_TYPE_IPV4,
124 RTE_FLOW_ITEM_TYPE_UDP,
125 RTE_FLOW_ITEM_TYPE_END,
128 enum rte_flow_item_type iavf_pattern_eth_ipv4_tcp[] = {
129 RTE_FLOW_ITEM_TYPE_ETH,
130 RTE_FLOW_ITEM_TYPE_IPV4,
131 RTE_FLOW_ITEM_TYPE_TCP,
132 RTE_FLOW_ITEM_TYPE_END,
135 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_tcp[] = {
136 RTE_FLOW_ITEM_TYPE_ETH,
137 RTE_FLOW_ITEM_TYPE_VLAN,
138 RTE_FLOW_ITEM_TYPE_IPV4,
139 RTE_FLOW_ITEM_TYPE_TCP,
140 RTE_FLOW_ITEM_TYPE_END,
143 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_tcp[] = {
144 RTE_FLOW_ITEM_TYPE_ETH,
145 RTE_FLOW_ITEM_TYPE_VLAN,
146 RTE_FLOW_ITEM_TYPE_VLAN,
147 RTE_FLOW_ITEM_TYPE_IPV4,
148 RTE_FLOW_ITEM_TYPE_TCP,
149 RTE_FLOW_ITEM_TYPE_END,
152 enum rte_flow_item_type iavf_pattern_eth_ipv4_sctp[] = {
153 RTE_FLOW_ITEM_TYPE_ETH,
154 RTE_FLOW_ITEM_TYPE_IPV4,
155 RTE_FLOW_ITEM_TYPE_SCTP,
156 RTE_FLOW_ITEM_TYPE_END,
159 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_sctp[] = {
160 RTE_FLOW_ITEM_TYPE_ETH,
161 RTE_FLOW_ITEM_TYPE_VLAN,
162 RTE_FLOW_ITEM_TYPE_IPV4,
163 RTE_FLOW_ITEM_TYPE_SCTP,
164 RTE_FLOW_ITEM_TYPE_END,
167 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_sctp[] = {
168 RTE_FLOW_ITEM_TYPE_ETH,
169 RTE_FLOW_ITEM_TYPE_VLAN,
170 RTE_FLOW_ITEM_TYPE_VLAN,
171 RTE_FLOW_ITEM_TYPE_IPV4,
172 RTE_FLOW_ITEM_TYPE_SCTP,
173 RTE_FLOW_ITEM_TYPE_END,
176 enum rte_flow_item_type iavf_pattern_eth_ipv4_icmp[] = {
177 RTE_FLOW_ITEM_TYPE_ETH,
178 RTE_FLOW_ITEM_TYPE_IPV4,
179 RTE_FLOW_ITEM_TYPE_ICMP,
180 RTE_FLOW_ITEM_TYPE_END,
183 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_icmp[] = {
184 RTE_FLOW_ITEM_TYPE_ETH,
185 RTE_FLOW_ITEM_TYPE_VLAN,
186 RTE_FLOW_ITEM_TYPE_IPV4,
187 RTE_FLOW_ITEM_TYPE_ICMP,
188 RTE_FLOW_ITEM_TYPE_END,
191 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_icmp[] = {
192 RTE_FLOW_ITEM_TYPE_ETH,
193 RTE_FLOW_ITEM_TYPE_VLAN,
194 RTE_FLOW_ITEM_TYPE_VLAN,
195 RTE_FLOW_ITEM_TYPE_IPV4,
196 RTE_FLOW_ITEM_TYPE_ICMP,
197 RTE_FLOW_ITEM_TYPE_END,
200 /* non-tunnel IPv6 */
201 enum rte_flow_item_type iavf_pattern_eth_ipv6[] = {
202 RTE_FLOW_ITEM_TYPE_ETH,
203 RTE_FLOW_ITEM_TYPE_IPV6,
204 RTE_FLOW_ITEM_TYPE_END,
207 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6[] = {
208 RTE_FLOW_ITEM_TYPE_ETH,
209 RTE_FLOW_ITEM_TYPE_VLAN,
210 RTE_FLOW_ITEM_TYPE_IPV6,
211 RTE_FLOW_ITEM_TYPE_END,
214 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6[] = {
215 RTE_FLOW_ITEM_TYPE_ETH,
216 RTE_FLOW_ITEM_TYPE_VLAN,
217 RTE_FLOW_ITEM_TYPE_VLAN,
218 RTE_FLOW_ITEM_TYPE_IPV6,
219 RTE_FLOW_ITEM_TYPE_END,
222 enum rte_flow_item_type iavf_pattern_eth_ipv6_udp[] = {
223 RTE_FLOW_ITEM_TYPE_ETH,
224 RTE_FLOW_ITEM_TYPE_IPV6,
225 RTE_FLOW_ITEM_TYPE_UDP,
226 RTE_FLOW_ITEM_TYPE_END,
229 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_udp[] = {
230 RTE_FLOW_ITEM_TYPE_ETH,
231 RTE_FLOW_ITEM_TYPE_VLAN,
232 RTE_FLOW_ITEM_TYPE_IPV6,
233 RTE_FLOW_ITEM_TYPE_UDP,
234 RTE_FLOW_ITEM_TYPE_END,
237 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_udp[] = {
238 RTE_FLOW_ITEM_TYPE_ETH,
239 RTE_FLOW_ITEM_TYPE_VLAN,
240 RTE_FLOW_ITEM_TYPE_VLAN,
241 RTE_FLOW_ITEM_TYPE_IPV6,
242 RTE_FLOW_ITEM_TYPE_UDP,
243 RTE_FLOW_ITEM_TYPE_END,
246 enum rte_flow_item_type iavf_pattern_eth_ipv6_tcp[] = {
247 RTE_FLOW_ITEM_TYPE_ETH,
248 RTE_FLOW_ITEM_TYPE_IPV6,
249 RTE_FLOW_ITEM_TYPE_TCP,
250 RTE_FLOW_ITEM_TYPE_END,
253 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_tcp[] = {
254 RTE_FLOW_ITEM_TYPE_ETH,
255 RTE_FLOW_ITEM_TYPE_VLAN,
256 RTE_FLOW_ITEM_TYPE_IPV6,
257 RTE_FLOW_ITEM_TYPE_TCP,
258 RTE_FLOW_ITEM_TYPE_END,
261 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_tcp[] = {
262 RTE_FLOW_ITEM_TYPE_ETH,
263 RTE_FLOW_ITEM_TYPE_VLAN,
264 RTE_FLOW_ITEM_TYPE_VLAN,
265 RTE_FLOW_ITEM_TYPE_IPV6,
266 RTE_FLOW_ITEM_TYPE_TCP,
267 RTE_FLOW_ITEM_TYPE_END,
270 enum rte_flow_item_type iavf_pattern_eth_ipv6_sctp[] = {
271 RTE_FLOW_ITEM_TYPE_ETH,
272 RTE_FLOW_ITEM_TYPE_IPV6,
273 RTE_FLOW_ITEM_TYPE_SCTP,
274 RTE_FLOW_ITEM_TYPE_END,
277 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_sctp[] = {
278 RTE_FLOW_ITEM_TYPE_ETH,
279 RTE_FLOW_ITEM_TYPE_VLAN,
280 RTE_FLOW_ITEM_TYPE_IPV6,
281 RTE_FLOW_ITEM_TYPE_SCTP,
282 RTE_FLOW_ITEM_TYPE_END,
285 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_sctp[] = {
286 RTE_FLOW_ITEM_TYPE_ETH,
287 RTE_FLOW_ITEM_TYPE_VLAN,
288 RTE_FLOW_ITEM_TYPE_VLAN,
289 RTE_FLOW_ITEM_TYPE_IPV6,
290 RTE_FLOW_ITEM_TYPE_SCTP,
291 RTE_FLOW_ITEM_TYPE_END,
294 enum rte_flow_item_type iavf_pattern_eth_ipv6_icmp6[] = {
295 RTE_FLOW_ITEM_TYPE_ETH,
296 RTE_FLOW_ITEM_TYPE_IPV6,
297 RTE_FLOW_ITEM_TYPE_ICMP6,
298 RTE_FLOW_ITEM_TYPE_END,
301 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_icmp6[] = {
302 RTE_FLOW_ITEM_TYPE_ETH,
303 RTE_FLOW_ITEM_TYPE_VLAN,
304 RTE_FLOW_ITEM_TYPE_IPV6,
305 RTE_FLOW_ITEM_TYPE_ICMP6,
306 RTE_FLOW_ITEM_TYPE_END,
309 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_icmp6[] = {
310 RTE_FLOW_ITEM_TYPE_ETH,
311 RTE_FLOW_ITEM_TYPE_VLAN,
312 RTE_FLOW_ITEM_TYPE_VLAN,
313 RTE_FLOW_ITEM_TYPE_IPV6,
314 RTE_FLOW_ITEM_TYPE_ICMP6,
315 RTE_FLOW_ITEM_TYPE_END,
319 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu[] = {
320 RTE_FLOW_ITEM_TYPE_ETH,
321 RTE_FLOW_ITEM_TYPE_IPV4,
322 RTE_FLOW_ITEM_TYPE_UDP,
323 RTE_FLOW_ITEM_TYPE_GTPU,
324 RTE_FLOW_ITEM_TYPE_END,
327 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh[] = {
328 RTE_FLOW_ITEM_TYPE_ETH,
329 RTE_FLOW_ITEM_TYPE_IPV4,
330 RTE_FLOW_ITEM_TYPE_UDP,
331 RTE_FLOW_ITEM_TYPE_GTPU,
332 RTE_FLOW_ITEM_TYPE_GTP_PSC,
333 RTE_FLOW_ITEM_TYPE_END,
336 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_ipv4[] = {
337 RTE_FLOW_ITEM_TYPE_ETH,
338 RTE_FLOW_ITEM_TYPE_IPV4,
339 RTE_FLOW_ITEM_TYPE_UDP,
340 RTE_FLOW_ITEM_TYPE_GTPU,
341 RTE_FLOW_ITEM_TYPE_IPV4,
342 RTE_FLOW_ITEM_TYPE_END,
345 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4[] = {
346 RTE_FLOW_ITEM_TYPE_ETH,
347 RTE_FLOW_ITEM_TYPE_IPV4,
348 RTE_FLOW_ITEM_TYPE_UDP,
349 RTE_FLOW_ITEM_TYPE_GTPU,
350 RTE_FLOW_ITEM_TYPE_GTP_PSC,
351 RTE_FLOW_ITEM_TYPE_IPV4,
352 RTE_FLOW_ITEM_TYPE_END,
355 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4_udp[] = {
356 RTE_FLOW_ITEM_TYPE_ETH,
357 RTE_FLOW_ITEM_TYPE_IPV4,
358 RTE_FLOW_ITEM_TYPE_UDP,
359 RTE_FLOW_ITEM_TYPE_GTPU,
360 RTE_FLOW_ITEM_TYPE_GTP_PSC,
361 RTE_FLOW_ITEM_TYPE_IPV4,
362 RTE_FLOW_ITEM_TYPE_UDP,
363 RTE_FLOW_ITEM_TYPE_END,
366 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4_tcp[] = {
367 RTE_FLOW_ITEM_TYPE_ETH,
368 RTE_FLOW_ITEM_TYPE_IPV4,
369 RTE_FLOW_ITEM_TYPE_UDP,
370 RTE_FLOW_ITEM_TYPE_GTPU,
371 RTE_FLOW_ITEM_TYPE_GTP_PSC,
372 RTE_FLOW_ITEM_TYPE_IPV4,
373 RTE_FLOW_ITEM_TYPE_TCP,
374 RTE_FLOW_ITEM_TYPE_END,
378 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4_icmp[] = {
379 RTE_FLOW_ITEM_TYPE_ETH,
380 RTE_FLOW_ITEM_TYPE_IPV4,
381 RTE_FLOW_ITEM_TYPE_UDP,
382 RTE_FLOW_ITEM_TYPE_GTPU,
383 RTE_FLOW_ITEM_TYPE_GTP_PSC,
384 RTE_FLOW_ITEM_TYPE_IPV4,
385 RTE_FLOW_ITEM_TYPE_ICMP,
386 RTE_FLOW_ITEM_TYPE_END,
390 enum rte_flow_item_type iavf_pattern_eth_ipv4_esp[] = {
391 RTE_FLOW_ITEM_TYPE_ETH,
392 RTE_FLOW_ITEM_TYPE_IPV4,
393 RTE_FLOW_ITEM_TYPE_ESP,
394 RTE_FLOW_ITEM_TYPE_END,
397 enum rte_flow_item_type iavf_pattern_eth_ipv4_udp_esp[] = {
398 RTE_FLOW_ITEM_TYPE_ETH,
399 RTE_FLOW_ITEM_TYPE_IPV4,
400 RTE_FLOW_ITEM_TYPE_UDP,
401 RTE_FLOW_ITEM_TYPE_ESP,
402 RTE_FLOW_ITEM_TYPE_END,
405 enum rte_flow_item_type iavf_pattern_eth_ipv6_esp[] = {
406 RTE_FLOW_ITEM_TYPE_ETH,
407 RTE_FLOW_ITEM_TYPE_IPV6,
408 RTE_FLOW_ITEM_TYPE_ESP,
409 RTE_FLOW_ITEM_TYPE_END,
412 enum rte_flow_item_type iavf_pattern_eth_ipv6_udp_esp[] = {
413 RTE_FLOW_ITEM_TYPE_ETH,
414 RTE_FLOW_ITEM_TYPE_IPV6,
415 RTE_FLOW_ITEM_TYPE_UDP,
416 RTE_FLOW_ITEM_TYPE_ESP,
417 RTE_FLOW_ITEM_TYPE_END,
421 enum rte_flow_item_type iavf_pattern_eth_ipv4_ah[] = {
422 RTE_FLOW_ITEM_TYPE_ETH,
423 RTE_FLOW_ITEM_TYPE_IPV4,
424 RTE_FLOW_ITEM_TYPE_AH,
425 RTE_FLOW_ITEM_TYPE_END,
428 enum rte_flow_item_type iavf_pattern_eth_ipv6_ah[] = {
429 RTE_FLOW_ITEM_TYPE_ETH,
430 RTE_FLOW_ITEM_TYPE_IPV6,
431 RTE_FLOW_ITEM_TYPE_AH,
432 RTE_FLOW_ITEM_TYPE_END,
436 enum rte_flow_item_type iavf_pattern_eth_ipv4_l2tpv3[] = {
437 RTE_FLOW_ITEM_TYPE_ETH,
438 RTE_FLOW_ITEM_TYPE_IPV4,
439 RTE_FLOW_ITEM_TYPE_L2TPV3OIP,
440 RTE_FLOW_ITEM_TYPE_END,
443 enum rte_flow_item_type iavf_pattern_eth_ipv6_l2tpv3[] = {
444 RTE_FLOW_ITEM_TYPE_ETH,
445 RTE_FLOW_ITEM_TYPE_IPV6,
446 RTE_FLOW_ITEM_TYPE_L2TPV3OIP,
447 RTE_FLOW_ITEM_TYPE_END,
451 enum rte_flow_item_type iavf_pattern_eth_ipv4_pfcp[] = {
452 RTE_FLOW_ITEM_TYPE_ETH,
453 RTE_FLOW_ITEM_TYPE_IPV4,
454 RTE_FLOW_ITEM_TYPE_UDP,
455 RTE_FLOW_ITEM_TYPE_PFCP,
456 RTE_FLOW_ITEM_TYPE_END,
459 enum rte_flow_item_type iavf_pattern_eth_ipv6_pfcp[] = {
460 RTE_FLOW_ITEM_TYPE_ETH,
461 RTE_FLOW_ITEM_TYPE_IPV6,
462 RTE_FLOW_ITEM_TYPE_UDP,
463 RTE_FLOW_ITEM_TYPE_PFCP,
464 RTE_FLOW_ITEM_TYPE_END,
467 typedef struct iavf_flow_engine * (*parse_engine_t)(struct iavf_adapter *ad,
468 struct rte_flow *flow,
469 struct iavf_parser_list *parser_list,
470 const struct rte_flow_item pattern[],
471 const struct rte_flow_action actions[],
472 struct rte_flow_error *error);
475 iavf_register_flow_engine(struct iavf_flow_engine *engine)
477 TAILQ_INSERT_TAIL(&engine_list, engine, node);
481 iavf_flow_init(struct iavf_adapter *ad)
484 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
486 struct iavf_flow_engine *engine;
488 TAILQ_INIT(&vf->flow_list);
489 TAILQ_INIT(&vf->rss_parser_list);
490 TAILQ_INIT(&vf->dist_parser_list);
491 rte_spinlock_init(&vf->flow_ops_lock);
493 TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
494 if (engine->init == NULL) {
495 PMD_INIT_LOG(ERR, "Invalid engine type (%d)",
500 ret = engine->init(ad);
501 if (ret && ret != -ENOTSUP) {
502 PMD_INIT_LOG(ERR, "Failed to initialize engine %d",
511 iavf_flow_uninit(struct iavf_adapter *ad)
513 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
514 struct iavf_flow_engine *engine;
515 struct rte_flow *p_flow;
516 struct iavf_flow_parser_node *p_parser;
519 TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
524 /* Remove all flows */
525 while ((p_flow = TAILQ_FIRST(&vf->flow_list))) {
526 TAILQ_REMOVE(&vf->flow_list, p_flow, node);
527 if (p_flow->engine->free)
528 p_flow->engine->free(p_flow);
532 /* Cleanup parser list */
533 while ((p_parser = TAILQ_FIRST(&vf->rss_parser_list))) {
534 TAILQ_REMOVE(&vf->rss_parser_list, p_parser, node);
538 while ((p_parser = TAILQ_FIRST(&vf->dist_parser_list))) {
539 TAILQ_REMOVE(&vf->dist_parser_list, p_parser, node);
545 iavf_register_parser(struct iavf_flow_parser *parser,
546 struct iavf_adapter *ad)
548 struct iavf_parser_list *list = NULL;
549 struct iavf_flow_parser_node *parser_node;
550 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
552 parser_node = rte_zmalloc("iavf_parser", sizeof(*parser_node), 0);
553 if (parser_node == NULL) {
554 PMD_DRV_LOG(ERR, "Failed to allocate memory.");
557 parser_node->parser = parser;
559 if (parser->engine->type == IAVF_FLOW_ENGINE_HASH) {
560 list = &vf->rss_parser_list;
561 TAILQ_INSERT_TAIL(list, parser_node, node);
562 } else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR) {
563 list = &vf->dist_parser_list;
564 TAILQ_INSERT_HEAD(list, parser_node, node);
573 iavf_unregister_parser(struct iavf_flow_parser *parser,
574 struct iavf_adapter *ad)
576 struct iavf_parser_list *list = NULL;
577 struct iavf_flow_parser_node *p_parser;
578 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
581 if (parser->engine->type == IAVF_FLOW_ENGINE_HASH)
582 list = &vf->rss_parser_list;
583 else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR)
584 list = &vf->dist_parser_list;
589 TAILQ_FOREACH_SAFE(p_parser, list, node, temp) {
590 if (p_parser->parser->engine->type == parser->engine->type) {
591 TAILQ_REMOVE(list, p_parser, node);
598 iavf_flow_valid_attr(const struct rte_flow_attr *attr,
599 struct rte_flow_error *error)
601 /* Must be input direction */
602 if (!attr->ingress) {
603 rte_flow_error_set(error, EINVAL,
604 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
605 attr, "Only support ingress.");
611 rte_flow_error_set(error, EINVAL,
612 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
613 attr, "Not support egress.");
618 if (attr->priority) {
619 rte_flow_error_set(error, EINVAL,
620 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
621 attr, "Not support priority.");
627 rte_flow_error_set(error, EINVAL,
628 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
629 attr, "Not support group.");
636 /* Find the first VOID or non-VOID item pointer */
637 static const struct rte_flow_item *
638 iavf_find_first_item(const struct rte_flow_item *item, bool is_void)
642 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
644 is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
646 is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
654 /* Skip all VOID items of the pattern */
656 iavf_pattern_skip_void_item(struct rte_flow_item *items,
657 const struct rte_flow_item *pattern)
659 uint32_t cpy_count = 0;
660 const struct rte_flow_item *pb = pattern, *pe = pattern;
663 /* Find a non-void item first */
664 pb = iavf_find_first_item(pb, false);
665 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
670 /* Find a void item */
671 pe = iavf_find_first_item(pb + 1, true);
674 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
678 if (pe->type == RTE_FLOW_ITEM_TYPE_END)
683 /* Copy the END item. */
684 rte_memcpy(items, pe, sizeof(struct rte_flow_item));
687 /* Check if the pattern matches a supported item type array */
689 iavf_match_pattern(enum rte_flow_item_type *item_array,
690 const struct rte_flow_item *pattern)
692 const struct rte_flow_item *item = pattern;
694 while ((*item_array == item->type) &&
695 (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
700 return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
701 item->type == RTE_FLOW_ITEM_TYPE_END);
704 struct iavf_pattern_match_item *
705 iavf_search_pattern_match_item(const struct rte_flow_item pattern[],
706 struct iavf_pattern_match_item *array,
708 struct rte_flow_error *error)
711 struct iavf_pattern_match_item *pattern_match_item;
712 /* need free by each filter */
713 struct rte_flow_item *items; /* used for pattern without VOID items */
714 uint32_t item_num = 0; /* non-void item number */
716 /* Get the non-void item number of pattern */
717 while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
718 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
724 items = rte_zmalloc("iavf_pattern",
725 item_num * sizeof(struct rte_flow_item), 0);
727 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
728 NULL, "No memory for PMD internal items.");
731 pattern_match_item = rte_zmalloc("iavf_pattern_match_item",
732 sizeof(struct iavf_pattern_match_item), 0);
733 if (!pattern_match_item) {
734 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
735 NULL, "Failed to allocate memory.");
739 iavf_pattern_skip_void_item(items, pattern);
741 for (i = 0; i < array_len; i++)
742 if (iavf_match_pattern(array[i].pattern_list,
744 pattern_match_item->input_set_mask =
745 array[i].input_set_mask;
746 pattern_match_item->pattern_list =
747 array[i].pattern_list;
748 pattern_match_item->meta = array[i].meta;
750 return pattern_match_item;
752 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
753 pattern, "Unsupported pattern");
756 rte_free(pattern_match_item);
760 static struct iavf_flow_engine *
761 iavf_parse_engine_create(struct iavf_adapter *ad,
762 struct rte_flow *flow,
763 struct iavf_parser_list *parser_list,
764 const struct rte_flow_item pattern[],
765 const struct rte_flow_action actions[],
766 struct rte_flow_error *error)
768 struct iavf_flow_engine *engine = NULL;
769 struct iavf_flow_parser_node *parser_node;
773 TAILQ_FOREACH_SAFE(parser_node, parser_list, node, temp) {
774 if (parser_node->parser->parse_pattern_action(ad,
775 parser_node->parser->array,
776 parser_node->parser->array_len,
777 pattern, actions, &meta, error) < 0)
780 engine = parser_node->parser->engine;
782 RTE_ASSERT(engine->create != NULL);
783 if (!(engine->create(ad, flow, meta, error)))
789 static struct iavf_flow_engine *
790 iavf_parse_engine_validate(struct iavf_adapter *ad,
791 struct rte_flow *flow,
792 struct iavf_parser_list *parser_list,
793 const struct rte_flow_item pattern[],
794 const struct rte_flow_action actions[],
795 struct rte_flow_error *error)
797 struct iavf_flow_engine *engine = NULL;
798 struct iavf_flow_parser_node *parser_node;
802 TAILQ_FOREACH_SAFE(parser_node, parser_list, node, temp) {
803 if (parser_node->parser->parse_pattern_action(ad,
804 parser_node->parser->array,
805 parser_node->parser->array_len,
806 pattern, actions, &meta, error) < 0)
809 engine = parser_node->parser->engine;
810 if (engine->validation == NULL) {
811 rte_flow_error_set(error, EINVAL,
812 RTE_FLOW_ERROR_TYPE_HANDLE,
813 NULL, "Validation not support");
817 if (engine->validation(ad, flow, meta, error)) {
818 rte_flow_error_set(error, EINVAL,
819 RTE_FLOW_ERROR_TYPE_HANDLE,
820 NULL, "Validation failed");
829 iavf_flow_process_filter(struct rte_eth_dev *dev,
830 struct rte_flow *flow,
831 const struct rte_flow_attr *attr,
832 const struct rte_flow_item pattern[],
833 const struct rte_flow_action actions[],
834 struct iavf_flow_engine **engine,
835 parse_engine_t iavf_parse_engine,
836 struct rte_flow_error *error)
838 int ret = IAVF_ERR_CONFIG;
839 struct iavf_adapter *ad =
840 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
841 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
844 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
845 NULL, "NULL pattern.");
850 rte_flow_error_set(error, EINVAL,
851 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
852 NULL, "NULL action.");
857 rte_flow_error_set(error, EINVAL,
858 RTE_FLOW_ERROR_TYPE_ATTR,
859 NULL, "NULL attribute.");
863 ret = iavf_flow_valid_attr(attr, error);
867 *engine = iavf_parse_engine(ad, flow, &vf->rss_parser_list, pattern,
872 *engine = iavf_parse_engine(ad, flow, &vf->dist_parser_list, pattern,
882 iavf_flow_validate(struct rte_eth_dev *dev,
883 const struct rte_flow_attr *attr,
884 const struct rte_flow_item pattern[],
885 const struct rte_flow_action actions[],
886 struct rte_flow_error *error)
888 struct iavf_flow_engine *engine;
890 return iavf_flow_process_filter(dev, NULL, attr, pattern, actions,
891 &engine, iavf_parse_engine_validate, error);
894 static struct rte_flow *
895 iavf_flow_create(struct rte_eth_dev *dev,
896 const struct rte_flow_attr *attr,
897 const struct rte_flow_item pattern[],
898 const struct rte_flow_action actions[],
899 struct rte_flow_error *error)
901 struct iavf_adapter *ad =
902 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
903 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
904 struct iavf_flow_engine *engine = NULL;
905 struct rte_flow *flow = NULL;
908 flow = rte_zmalloc("iavf_flow", sizeof(struct rte_flow), 0);
910 rte_flow_error_set(error, ENOMEM,
911 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
912 "Failed to allocate memory");
916 ret = iavf_flow_process_filter(dev, flow, attr, pattern, actions,
917 &engine, iavf_parse_engine_create, error);
919 PMD_DRV_LOG(ERR, "Failed to create flow");
925 flow->engine = engine;
926 TAILQ_INSERT_TAIL(&vf->flow_list, flow, node);
927 PMD_DRV_LOG(INFO, "Succeeded to create (%d) flow", engine->type);
930 rte_spinlock_unlock(&vf->flow_ops_lock);
935 iavf_flow_destroy(struct rte_eth_dev *dev,
936 struct rte_flow *flow,
937 struct rte_flow_error *error)
939 struct iavf_adapter *ad =
940 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
941 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
944 if (!flow || !flow->engine || !flow->engine->destroy) {
945 rte_flow_error_set(error, EINVAL,
946 RTE_FLOW_ERROR_TYPE_HANDLE,
947 NULL, "Invalid flow");
951 rte_spinlock_lock(&vf->flow_ops_lock);
953 ret = flow->engine->destroy(ad, flow, error);
956 TAILQ_REMOVE(&vf->flow_list, flow, node);
959 PMD_DRV_LOG(ERR, "Failed to destroy flow");
962 rte_spinlock_unlock(&vf->flow_ops_lock);
968 iavf_flow_flush(struct rte_eth_dev *dev,
969 struct rte_flow_error *error)
971 struct iavf_adapter *ad =
972 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
973 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
974 struct rte_flow *p_flow;
978 TAILQ_FOREACH_SAFE(p_flow, &vf->flow_list, node, temp) {
979 ret = iavf_flow_destroy(dev, p_flow, error);
981 PMD_DRV_LOG(ERR, "Failed to flush flows");
990 iavf_flow_query(struct rte_eth_dev *dev,
991 struct rte_flow *flow,
992 const struct rte_flow_action *actions,
994 struct rte_flow_error *error)
997 struct iavf_adapter *ad =
998 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
999 struct rte_flow_query_count *count = data;
1001 if (!flow || !flow->engine || !flow->engine->query_count) {
1002 rte_flow_error_set(error, EINVAL,
1003 RTE_FLOW_ERROR_TYPE_HANDLE,
1004 NULL, "Invalid flow");
1008 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1009 switch (actions->type) {
1010 case RTE_FLOW_ACTION_TYPE_VOID:
1012 case RTE_FLOW_ACTION_TYPE_COUNT:
1013 ret = flow->engine->query_count(ad, flow, count, error);
1016 return rte_flow_error_set(error, ENOTSUP,
1017 RTE_FLOW_ERROR_TYPE_ACTION,
1019 "action not supported");