1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2019 Intel Corporation
13 #include <rte_ether.h>
14 #include <rte_ethdev_driver.h>
15 #include <rte_malloc.h>
16 #include <rte_tailq.h>
19 #include "iavf_generic_flow.h"
21 static struct iavf_engine_list engine_list =
22 TAILQ_HEAD_INITIALIZER(engine_list);
24 static int iavf_flow_validate(struct rte_eth_dev *dev,
25 const struct rte_flow_attr *attr,
26 const struct rte_flow_item pattern[],
27 const struct rte_flow_action actions[],
28 struct rte_flow_error *error);
29 static struct rte_flow *iavf_flow_create(struct rte_eth_dev *dev,
30 const struct rte_flow_attr *attr,
31 const struct rte_flow_item pattern[],
32 const struct rte_flow_action actions[],
33 struct rte_flow_error *error);
34 static int iavf_flow_destroy(struct rte_eth_dev *dev,
35 struct rte_flow *flow,
36 struct rte_flow_error *error);
37 static int iavf_flow_flush(struct rte_eth_dev *dev,
38 struct rte_flow_error *error);
39 static int iavf_flow_query(struct rte_eth_dev *dev,
40 struct rte_flow *flow,
41 const struct rte_flow_action *actions,
43 struct rte_flow_error *error);
45 const struct rte_flow_ops iavf_flow_ops = {
46 .validate = iavf_flow_validate,
47 .create = iavf_flow_create,
48 .destroy = iavf_flow_destroy,
49 .flush = iavf_flow_flush,
50 .query = iavf_flow_query,
54 enum rte_flow_item_type iavf_pattern_empty[] = {
55 RTE_FLOW_ITEM_TYPE_END,
59 enum rte_flow_item_type iavf_pattern_ethertype[] = {
60 RTE_FLOW_ITEM_TYPE_ETH,
61 RTE_FLOW_ITEM_TYPE_END,
64 enum rte_flow_item_type iavf_pattern_ethertype_vlan[] = {
65 RTE_FLOW_ITEM_TYPE_ETH,
66 RTE_FLOW_ITEM_TYPE_VLAN,
67 RTE_FLOW_ITEM_TYPE_END,
70 enum rte_flow_item_type iavf_pattern_ethertype_qinq[] = {
71 RTE_FLOW_ITEM_TYPE_ETH,
72 RTE_FLOW_ITEM_TYPE_VLAN,
73 RTE_FLOW_ITEM_TYPE_VLAN,
74 RTE_FLOW_ITEM_TYPE_END,
78 enum rte_flow_item_type iavf_pattern_eth_arp[] = {
79 RTE_FLOW_ITEM_TYPE_ETH,
80 RTE_FLOW_ITEM_TYPE_ARP_ETH_IPV4,
81 RTE_FLOW_ITEM_TYPE_END,
85 enum rte_flow_item_type iavf_pattern_eth_ipv4[] = {
86 RTE_FLOW_ITEM_TYPE_ETH,
87 RTE_FLOW_ITEM_TYPE_IPV4,
88 RTE_FLOW_ITEM_TYPE_END,
91 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4[] = {
92 RTE_FLOW_ITEM_TYPE_ETH,
93 RTE_FLOW_ITEM_TYPE_VLAN,
94 RTE_FLOW_ITEM_TYPE_IPV4,
95 RTE_FLOW_ITEM_TYPE_END,
98 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4[] = {
99 RTE_FLOW_ITEM_TYPE_ETH,
100 RTE_FLOW_ITEM_TYPE_VLAN,
101 RTE_FLOW_ITEM_TYPE_VLAN,
102 RTE_FLOW_ITEM_TYPE_IPV4,
103 RTE_FLOW_ITEM_TYPE_END,
106 enum rte_flow_item_type iavf_pattern_eth_ipv4_udp[] = {
107 RTE_FLOW_ITEM_TYPE_ETH,
108 RTE_FLOW_ITEM_TYPE_IPV4,
109 RTE_FLOW_ITEM_TYPE_UDP,
110 RTE_FLOW_ITEM_TYPE_END,
113 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_udp[] = {
114 RTE_FLOW_ITEM_TYPE_ETH,
115 RTE_FLOW_ITEM_TYPE_VLAN,
116 RTE_FLOW_ITEM_TYPE_IPV4,
117 RTE_FLOW_ITEM_TYPE_UDP,
118 RTE_FLOW_ITEM_TYPE_END,
121 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_udp[] = {
122 RTE_FLOW_ITEM_TYPE_ETH,
123 RTE_FLOW_ITEM_TYPE_VLAN,
124 RTE_FLOW_ITEM_TYPE_VLAN,
125 RTE_FLOW_ITEM_TYPE_IPV4,
126 RTE_FLOW_ITEM_TYPE_UDP,
127 RTE_FLOW_ITEM_TYPE_END,
130 enum rte_flow_item_type iavf_pattern_eth_ipv4_tcp[] = {
131 RTE_FLOW_ITEM_TYPE_ETH,
132 RTE_FLOW_ITEM_TYPE_IPV4,
133 RTE_FLOW_ITEM_TYPE_TCP,
134 RTE_FLOW_ITEM_TYPE_END,
137 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_tcp[] = {
138 RTE_FLOW_ITEM_TYPE_ETH,
139 RTE_FLOW_ITEM_TYPE_VLAN,
140 RTE_FLOW_ITEM_TYPE_IPV4,
141 RTE_FLOW_ITEM_TYPE_TCP,
142 RTE_FLOW_ITEM_TYPE_END,
145 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_tcp[] = {
146 RTE_FLOW_ITEM_TYPE_ETH,
147 RTE_FLOW_ITEM_TYPE_VLAN,
148 RTE_FLOW_ITEM_TYPE_VLAN,
149 RTE_FLOW_ITEM_TYPE_IPV4,
150 RTE_FLOW_ITEM_TYPE_TCP,
151 RTE_FLOW_ITEM_TYPE_END,
154 enum rte_flow_item_type iavf_pattern_eth_ipv4_sctp[] = {
155 RTE_FLOW_ITEM_TYPE_ETH,
156 RTE_FLOW_ITEM_TYPE_IPV4,
157 RTE_FLOW_ITEM_TYPE_SCTP,
158 RTE_FLOW_ITEM_TYPE_END,
161 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_sctp[] = {
162 RTE_FLOW_ITEM_TYPE_ETH,
163 RTE_FLOW_ITEM_TYPE_VLAN,
164 RTE_FLOW_ITEM_TYPE_IPV4,
165 RTE_FLOW_ITEM_TYPE_SCTP,
166 RTE_FLOW_ITEM_TYPE_END,
169 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_sctp[] = {
170 RTE_FLOW_ITEM_TYPE_ETH,
171 RTE_FLOW_ITEM_TYPE_VLAN,
172 RTE_FLOW_ITEM_TYPE_VLAN,
173 RTE_FLOW_ITEM_TYPE_IPV4,
174 RTE_FLOW_ITEM_TYPE_SCTP,
175 RTE_FLOW_ITEM_TYPE_END,
178 enum rte_flow_item_type iavf_pattern_eth_ipv4_icmp[] = {
179 RTE_FLOW_ITEM_TYPE_ETH,
180 RTE_FLOW_ITEM_TYPE_IPV4,
181 RTE_FLOW_ITEM_TYPE_ICMP,
182 RTE_FLOW_ITEM_TYPE_END,
185 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv4_icmp[] = {
186 RTE_FLOW_ITEM_TYPE_ETH,
187 RTE_FLOW_ITEM_TYPE_VLAN,
188 RTE_FLOW_ITEM_TYPE_IPV4,
189 RTE_FLOW_ITEM_TYPE_ICMP,
190 RTE_FLOW_ITEM_TYPE_END,
193 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv4_icmp[] = {
194 RTE_FLOW_ITEM_TYPE_ETH,
195 RTE_FLOW_ITEM_TYPE_VLAN,
196 RTE_FLOW_ITEM_TYPE_VLAN,
197 RTE_FLOW_ITEM_TYPE_IPV4,
198 RTE_FLOW_ITEM_TYPE_ICMP,
199 RTE_FLOW_ITEM_TYPE_END,
202 /* non-tunnel IPv6 */
203 enum rte_flow_item_type iavf_pattern_eth_ipv6[] = {
204 RTE_FLOW_ITEM_TYPE_ETH,
205 RTE_FLOW_ITEM_TYPE_IPV6,
206 RTE_FLOW_ITEM_TYPE_END,
209 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6[] = {
210 RTE_FLOW_ITEM_TYPE_ETH,
211 RTE_FLOW_ITEM_TYPE_VLAN,
212 RTE_FLOW_ITEM_TYPE_IPV6,
213 RTE_FLOW_ITEM_TYPE_END,
216 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6[] = {
217 RTE_FLOW_ITEM_TYPE_ETH,
218 RTE_FLOW_ITEM_TYPE_VLAN,
219 RTE_FLOW_ITEM_TYPE_VLAN,
220 RTE_FLOW_ITEM_TYPE_IPV6,
221 RTE_FLOW_ITEM_TYPE_END,
224 enum rte_flow_item_type iavf_pattern_eth_ipv6_udp[] = {
225 RTE_FLOW_ITEM_TYPE_ETH,
226 RTE_FLOW_ITEM_TYPE_IPV6,
227 RTE_FLOW_ITEM_TYPE_UDP,
228 RTE_FLOW_ITEM_TYPE_END,
231 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_udp[] = {
232 RTE_FLOW_ITEM_TYPE_ETH,
233 RTE_FLOW_ITEM_TYPE_VLAN,
234 RTE_FLOW_ITEM_TYPE_IPV6,
235 RTE_FLOW_ITEM_TYPE_UDP,
236 RTE_FLOW_ITEM_TYPE_END,
239 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_udp[] = {
240 RTE_FLOW_ITEM_TYPE_ETH,
241 RTE_FLOW_ITEM_TYPE_VLAN,
242 RTE_FLOW_ITEM_TYPE_VLAN,
243 RTE_FLOW_ITEM_TYPE_IPV6,
244 RTE_FLOW_ITEM_TYPE_UDP,
245 RTE_FLOW_ITEM_TYPE_END,
248 enum rte_flow_item_type iavf_pattern_eth_ipv6_tcp[] = {
249 RTE_FLOW_ITEM_TYPE_ETH,
250 RTE_FLOW_ITEM_TYPE_IPV6,
251 RTE_FLOW_ITEM_TYPE_TCP,
252 RTE_FLOW_ITEM_TYPE_END,
255 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_tcp[] = {
256 RTE_FLOW_ITEM_TYPE_ETH,
257 RTE_FLOW_ITEM_TYPE_VLAN,
258 RTE_FLOW_ITEM_TYPE_IPV6,
259 RTE_FLOW_ITEM_TYPE_TCP,
260 RTE_FLOW_ITEM_TYPE_END,
263 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_tcp[] = {
264 RTE_FLOW_ITEM_TYPE_ETH,
265 RTE_FLOW_ITEM_TYPE_VLAN,
266 RTE_FLOW_ITEM_TYPE_VLAN,
267 RTE_FLOW_ITEM_TYPE_IPV6,
268 RTE_FLOW_ITEM_TYPE_TCP,
269 RTE_FLOW_ITEM_TYPE_END,
272 enum rte_flow_item_type iavf_pattern_eth_ipv6_sctp[] = {
273 RTE_FLOW_ITEM_TYPE_ETH,
274 RTE_FLOW_ITEM_TYPE_IPV6,
275 RTE_FLOW_ITEM_TYPE_SCTP,
276 RTE_FLOW_ITEM_TYPE_END,
279 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_sctp[] = {
280 RTE_FLOW_ITEM_TYPE_ETH,
281 RTE_FLOW_ITEM_TYPE_VLAN,
282 RTE_FLOW_ITEM_TYPE_IPV6,
283 RTE_FLOW_ITEM_TYPE_SCTP,
284 RTE_FLOW_ITEM_TYPE_END,
287 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_sctp[] = {
288 RTE_FLOW_ITEM_TYPE_ETH,
289 RTE_FLOW_ITEM_TYPE_VLAN,
290 RTE_FLOW_ITEM_TYPE_VLAN,
291 RTE_FLOW_ITEM_TYPE_IPV6,
292 RTE_FLOW_ITEM_TYPE_SCTP,
293 RTE_FLOW_ITEM_TYPE_END,
296 enum rte_flow_item_type iavf_pattern_eth_ipv6_icmp6[] = {
297 RTE_FLOW_ITEM_TYPE_ETH,
298 RTE_FLOW_ITEM_TYPE_IPV6,
299 RTE_FLOW_ITEM_TYPE_ICMP6,
300 RTE_FLOW_ITEM_TYPE_END,
303 enum rte_flow_item_type iavf_pattern_eth_vlan_ipv6_icmp6[] = {
304 RTE_FLOW_ITEM_TYPE_ETH,
305 RTE_FLOW_ITEM_TYPE_VLAN,
306 RTE_FLOW_ITEM_TYPE_IPV6,
307 RTE_FLOW_ITEM_TYPE_ICMP6,
308 RTE_FLOW_ITEM_TYPE_END,
311 enum rte_flow_item_type iavf_pattern_eth_qinq_ipv6_icmp6[] = {
312 RTE_FLOW_ITEM_TYPE_ETH,
313 RTE_FLOW_ITEM_TYPE_VLAN,
314 RTE_FLOW_ITEM_TYPE_VLAN,
315 RTE_FLOW_ITEM_TYPE_IPV6,
316 RTE_FLOW_ITEM_TYPE_ICMP6,
317 RTE_FLOW_ITEM_TYPE_END,
321 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_ipv4[] = {
322 RTE_FLOW_ITEM_TYPE_ETH,
323 RTE_FLOW_ITEM_TYPE_IPV4,
324 RTE_FLOW_ITEM_TYPE_UDP,
325 RTE_FLOW_ITEM_TYPE_GTPU,
326 RTE_FLOW_ITEM_TYPE_IPV4,
327 RTE_FLOW_ITEM_TYPE_END,
330 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4[] = {
331 RTE_FLOW_ITEM_TYPE_ETH,
332 RTE_FLOW_ITEM_TYPE_IPV4,
333 RTE_FLOW_ITEM_TYPE_UDP,
334 RTE_FLOW_ITEM_TYPE_GTPU,
335 RTE_FLOW_ITEM_TYPE_GTP_PSC,
336 RTE_FLOW_ITEM_TYPE_IPV4,
337 RTE_FLOW_ITEM_TYPE_END,
340 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4_udp[] = {
341 RTE_FLOW_ITEM_TYPE_ETH,
342 RTE_FLOW_ITEM_TYPE_IPV4,
343 RTE_FLOW_ITEM_TYPE_UDP,
344 RTE_FLOW_ITEM_TYPE_GTPU,
345 RTE_FLOW_ITEM_TYPE_GTP_PSC,
346 RTE_FLOW_ITEM_TYPE_IPV4,
347 RTE_FLOW_ITEM_TYPE_UDP,
348 RTE_FLOW_ITEM_TYPE_END,
351 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4_tcp[] = {
352 RTE_FLOW_ITEM_TYPE_ETH,
353 RTE_FLOW_ITEM_TYPE_IPV4,
354 RTE_FLOW_ITEM_TYPE_UDP,
355 RTE_FLOW_ITEM_TYPE_GTPU,
356 RTE_FLOW_ITEM_TYPE_GTP_PSC,
357 RTE_FLOW_ITEM_TYPE_IPV4,
358 RTE_FLOW_ITEM_TYPE_TCP,
359 RTE_FLOW_ITEM_TYPE_END,
363 enum rte_flow_item_type iavf_pattern_eth_ipv4_gtpu_eh_ipv4_icmp[] = {
364 RTE_FLOW_ITEM_TYPE_ETH,
365 RTE_FLOW_ITEM_TYPE_IPV4,
366 RTE_FLOW_ITEM_TYPE_UDP,
367 RTE_FLOW_ITEM_TYPE_GTPU,
368 RTE_FLOW_ITEM_TYPE_GTP_PSC,
369 RTE_FLOW_ITEM_TYPE_IPV4,
370 RTE_FLOW_ITEM_TYPE_ICMP,
371 RTE_FLOW_ITEM_TYPE_END,
374 typedef struct iavf_flow_engine * (*parse_engine_t)(struct iavf_adapter *ad,
375 struct rte_flow *flow,
376 struct iavf_parser_list *parser_list,
377 const struct rte_flow_item pattern[],
378 const struct rte_flow_action actions[],
379 struct rte_flow_error *error);
382 iavf_register_flow_engine(struct iavf_flow_engine *engine)
384 TAILQ_INSERT_TAIL(&engine_list, engine, node);
388 iavf_flow_init(struct iavf_adapter *ad)
391 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
393 struct iavf_flow_engine *engine;
395 TAILQ_INIT(&vf->flow_list);
396 TAILQ_INIT(&vf->rss_parser_list);
397 TAILQ_INIT(&vf->dist_parser_list);
398 rte_spinlock_init(&vf->flow_ops_lock);
400 TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
401 if (engine->init == NULL) {
402 PMD_INIT_LOG(ERR, "Invalid engine type (%d)",
407 ret = engine->init(ad);
408 if (ret && ret != -ENOTSUP) {
409 PMD_INIT_LOG(ERR, "Failed to initialize engine %d",
418 iavf_flow_uninit(struct iavf_adapter *ad)
420 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
421 struct iavf_flow_engine *engine;
422 struct rte_flow *p_flow;
423 struct iavf_flow_parser_node *p_parser;
426 TAILQ_FOREACH_SAFE(engine, &engine_list, node, temp) {
431 /* Remove all flows */
432 while ((p_flow = TAILQ_FIRST(&vf->flow_list))) {
433 TAILQ_REMOVE(&vf->flow_list, p_flow, node);
434 if (p_flow->engine->free)
435 p_flow->engine->free(p_flow);
439 /* Cleanup parser list */
440 while ((p_parser = TAILQ_FIRST(&vf->rss_parser_list))) {
441 TAILQ_REMOVE(&vf->rss_parser_list, p_parser, node);
445 while ((p_parser = TAILQ_FIRST(&vf->dist_parser_list))) {
446 TAILQ_REMOVE(&vf->dist_parser_list, p_parser, node);
452 iavf_register_parser(struct iavf_flow_parser *parser,
453 struct iavf_adapter *ad)
455 struct iavf_parser_list *list = NULL;
456 struct iavf_flow_parser_node *parser_node;
457 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
459 parser_node = rte_zmalloc("iavf_parser", sizeof(*parser_node), 0);
460 if (parser_node == NULL) {
461 PMD_DRV_LOG(ERR, "Failed to allocate memory.");
464 parser_node->parser = parser;
466 if (parser->engine->type == IAVF_FLOW_ENGINE_HASH) {
467 list = &vf->rss_parser_list;
468 TAILQ_INSERT_TAIL(list, parser_node, node);
469 } else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR) {
470 list = &vf->dist_parser_list;
471 TAILQ_INSERT_HEAD(list, parser_node, node);
480 iavf_unregister_parser(struct iavf_flow_parser *parser,
481 struct iavf_adapter *ad)
483 struct iavf_parser_list *list = NULL;
484 struct iavf_flow_parser_node *p_parser;
485 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
488 if (parser->engine->type == IAVF_FLOW_ENGINE_HASH)
489 list = &vf->rss_parser_list;
490 else if (parser->engine->type == IAVF_FLOW_ENGINE_FDIR)
491 list = &vf->dist_parser_list;
496 TAILQ_FOREACH_SAFE(p_parser, list, node, temp) {
497 if (p_parser->parser->engine->type == parser->engine->type) {
498 TAILQ_REMOVE(list, p_parser, node);
505 iavf_flow_valid_attr(const struct rte_flow_attr *attr,
506 struct rte_flow_error *error)
508 /* Must be input direction */
509 if (!attr->ingress) {
510 rte_flow_error_set(error, EINVAL,
511 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
512 attr, "Only support ingress.");
518 rte_flow_error_set(error, EINVAL,
519 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
520 attr, "Not support egress.");
525 if (attr->priority) {
526 rte_flow_error_set(error, EINVAL,
527 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
528 attr, "Not support priority.");
534 rte_flow_error_set(error, EINVAL,
535 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
536 attr, "Not support group.");
543 /* Find the first VOID or non-VOID item pointer */
544 static const struct rte_flow_item *
545 iavf_find_first_item(const struct rte_flow_item *item, bool is_void)
549 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
551 is_find = item->type == RTE_FLOW_ITEM_TYPE_VOID;
553 is_find = item->type != RTE_FLOW_ITEM_TYPE_VOID;
561 /* Skip all VOID items of the pattern */
563 iavf_pattern_skip_void_item(struct rte_flow_item *items,
564 const struct rte_flow_item *pattern)
566 uint32_t cpy_count = 0;
567 const struct rte_flow_item *pb = pattern, *pe = pattern;
570 /* Find a non-void item first */
571 pb = iavf_find_first_item(pb, false);
572 if (pb->type == RTE_FLOW_ITEM_TYPE_END) {
577 /* Find a void item */
578 pe = iavf_find_first_item(pb + 1, true);
581 rte_memcpy(items, pb, sizeof(struct rte_flow_item) * cpy_count);
585 if (pe->type == RTE_FLOW_ITEM_TYPE_END)
590 /* Copy the END item. */
591 rte_memcpy(items, pe, sizeof(struct rte_flow_item));
594 /* Check if the pattern matches a supported item type array */
596 iavf_match_pattern(enum rte_flow_item_type *item_array,
597 const struct rte_flow_item *pattern)
599 const struct rte_flow_item *item = pattern;
601 while ((*item_array == item->type) &&
602 (*item_array != RTE_FLOW_ITEM_TYPE_END)) {
607 return (*item_array == RTE_FLOW_ITEM_TYPE_END &&
608 item->type == RTE_FLOW_ITEM_TYPE_END);
611 struct iavf_pattern_match_item *
612 iavf_search_pattern_match_item(const struct rte_flow_item pattern[],
613 struct iavf_pattern_match_item *array,
615 struct rte_flow_error *error)
618 struct iavf_pattern_match_item *pattern_match_item;
619 /* need free by each filter */
620 struct rte_flow_item *items; /* used for pattern without VOID items */
621 uint32_t item_num = 0; /* non-void item number */
623 /* Get the non-void item number of pattern */
624 while ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_END) {
625 if ((pattern + i)->type != RTE_FLOW_ITEM_TYPE_VOID)
631 items = rte_zmalloc("iavf_pattern",
632 item_num * sizeof(struct rte_flow_item), 0);
634 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
635 NULL, "No memory for PMD internal items.");
638 pattern_match_item = rte_zmalloc("iavf_pattern_match_item",
639 sizeof(struct iavf_pattern_match_item), 0);
640 if (!pattern_match_item) {
641 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
642 NULL, "Failed to allocate memory.");
646 iavf_pattern_skip_void_item(items, pattern);
648 for (i = 0; i < array_len; i++)
649 if (iavf_match_pattern(array[i].pattern_list,
651 pattern_match_item->input_set_mask =
652 array[i].input_set_mask;
653 pattern_match_item->pattern_list =
654 array[i].pattern_list;
655 pattern_match_item->meta = array[i].meta;
657 return pattern_match_item;
659 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
660 pattern, "Unsupported pattern");
663 rte_free(pattern_match_item);
667 static struct iavf_flow_engine *
668 iavf_parse_engine_create(struct iavf_adapter *ad,
669 struct rte_flow *flow,
670 struct iavf_parser_list *parser_list,
671 const struct rte_flow_item pattern[],
672 const struct rte_flow_action actions[],
673 struct rte_flow_error *error)
675 struct iavf_flow_engine *engine = NULL;
676 struct iavf_flow_parser_node *parser_node;
680 TAILQ_FOREACH_SAFE(parser_node, parser_list, node, temp) {
681 if (parser_node->parser->parse_pattern_action(ad,
682 parser_node->parser->array,
683 parser_node->parser->array_len,
684 pattern, actions, &meta, error) < 0)
687 engine = parser_node->parser->engine;
689 RTE_ASSERT(engine->create != NULL);
690 if (!(engine->create(ad, flow, meta, error)))
696 static struct iavf_flow_engine *
697 iavf_parse_engine_validate(struct iavf_adapter *ad,
698 struct rte_flow *flow,
699 struct iavf_parser_list *parser_list,
700 const struct rte_flow_item pattern[],
701 const struct rte_flow_action actions[],
702 struct rte_flow_error *error)
704 struct iavf_flow_engine *engine = NULL;
705 struct iavf_flow_parser_node *parser_node;
709 TAILQ_FOREACH_SAFE(parser_node, parser_list, node, temp) {
710 if (parser_node->parser->parse_pattern_action(ad,
711 parser_node->parser->array,
712 parser_node->parser->array_len,
713 pattern, actions, &meta, error) < 0)
716 engine = parser_node->parser->engine;
717 if (engine->validation == NULL) {
718 rte_flow_error_set(error, EINVAL,
719 RTE_FLOW_ERROR_TYPE_HANDLE,
720 NULL, "Validation not support");
724 if (engine->validation(ad, flow, meta, error)) {
725 rte_flow_error_set(error, EINVAL,
726 RTE_FLOW_ERROR_TYPE_HANDLE,
727 NULL, "Validation failed");
736 iavf_flow_process_filter(struct rte_eth_dev *dev,
737 struct rte_flow *flow,
738 const struct rte_flow_attr *attr,
739 const struct rte_flow_item pattern[],
740 const struct rte_flow_action actions[],
741 struct iavf_flow_engine **engine,
742 parse_engine_t iavf_parse_engine,
743 struct rte_flow_error *error)
745 int ret = IAVF_ERR_CONFIG;
746 struct iavf_adapter *ad =
747 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
748 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
751 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
752 NULL, "NULL pattern.");
757 rte_flow_error_set(error, EINVAL,
758 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
759 NULL, "NULL action.");
764 rte_flow_error_set(error, EINVAL,
765 RTE_FLOW_ERROR_TYPE_ATTR,
766 NULL, "NULL attribute.");
770 ret = iavf_flow_valid_attr(attr, error);
774 *engine = iavf_parse_engine(ad, flow, &vf->rss_parser_list, pattern,
779 *engine = iavf_parse_engine(ad, flow, &vf->dist_parser_list, pattern,
789 iavf_flow_validate(struct rte_eth_dev *dev,
790 const struct rte_flow_attr *attr,
791 const struct rte_flow_item pattern[],
792 const struct rte_flow_action actions[],
793 struct rte_flow_error *error)
795 struct iavf_flow_engine *engine;
797 return iavf_flow_process_filter(dev, NULL, attr, pattern, actions,
798 &engine, iavf_parse_engine_validate, error);
801 static struct rte_flow *
802 iavf_flow_create(struct rte_eth_dev *dev,
803 const struct rte_flow_attr *attr,
804 const struct rte_flow_item pattern[],
805 const struct rte_flow_action actions[],
806 struct rte_flow_error *error)
808 struct iavf_adapter *ad =
809 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
810 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
811 struct iavf_flow_engine *engine = NULL;
812 struct rte_flow *flow = NULL;
815 flow = rte_zmalloc("iavf_flow", sizeof(struct rte_flow), 0);
817 rte_flow_error_set(error, ENOMEM,
818 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
819 "Failed to allocate memory");
823 ret = iavf_flow_process_filter(dev, flow, attr, pattern, actions,
824 &engine, iavf_parse_engine_create, error);
826 PMD_DRV_LOG(ERR, "Failed to create flow");
832 flow->engine = engine;
833 TAILQ_INSERT_TAIL(&vf->flow_list, flow, node);
834 PMD_DRV_LOG(INFO, "Succeeded to create (%d) flow", engine->type);
837 rte_spinlock_unlock(&vf->flow_ops_lock);
842 iavf_flow_destroy(struct rte_eth_dev *dev,
843 struct rte_flow *flow,
844 struct rte_flow_error *error)
846 struct iavf_adapter *ad =
847 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
848 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
851 if (!flow || !flow->engine || !flow->engine->destroy) {
852 rte_flow_error_set(error, EINVAL,
853 RTE_FLOW_ERROR_TYPE_HANDLE,
854 NULL, "Invalid flow");
858 rte_spinlock_lock(&vf->flow_ops_lock);
860 ret = flow->engine->destroy(ad, flow, error);
863 TAILQ_REMOVE(&vf->flow_list, flow, node);
866 PMD_DRV_LOG(ERR, "Failed to destroy flow");
869 rte_spinlock_unlock(&vf->flow_ops_lock);
875 iavf_flow_flush(struct rte_eth_dev *dev,
876 struct rte_flow_error *error)
878 struct iavf_adapter *ad =
879 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
880 struct iavf_info *vf = IAVF_DEV_PRIVATE_TO_VF(ad);
881 struct rte_flow *p_flow;
885 TAILQ_FOREACH_SAFE(p_flow, &vf->flow_list, node, temp) {
886 ret = iavf_flow_destroy(dev, p_flow, error);
888 PMD_DRV_LOG(ERR, "Failed to flush flows");
897 iavf_flow_query(struct rte_eth_dev *dev,
898 struct rte_flow *flow,
899 const struct rte_flow_action *actions,
901 struct rte_flow_error *error)
904 struct iavf_adapter *ad =
905 IAVF_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private);
906 struct rte_flow_query_count *count = data;
908 if (!flow || !flow->engine || !flow->engine->query_count) {
909 rte_flow_error_set(error, EINVAL,
910 RTE_FLOW_ERROR_TYPE_HANDLE,
911 NULL, "Invalid flow");
915 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
916 switch (actions->type) {
917 case RTE_FLOW_ACTION_TYPE_VOID:
919 case RTE_FLOW_ACTION_TYPE_COUNT:
920 ret = flow->engine->query_count(ad, flow, count, error);
923 return rte_flow_error_set(error, ENOTSUP,
924 RTE_FLOW_ERROR_TYPE_ACTION,
926 "action not supported");