1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2016 6WIND S.A.
3 * Copyright 2016 Mellanox Technologies, Ltd
6 #include <netinet/in.h>
13 #include <rte_common.h>
14 #include <rte_ether.h>
15 #include <rte_ethdev_driver.h>
16 #include <rte_eal_paging.h>
18 #include <rte_cycles.h>
19 #include <rte_flow_driver.h>
20 #include <rte_malloc.h>
23 #include <mlx5_glue.h>
24 #include <mlx5_devx_cmds.h>
26 #include <mlx5_malloc.h>
28 #include "mlx5_defs.h"
30 #include "mlx5_flow.h"
31 #include "mlx5_flow_os.h"
32 #include "mlx5_rxtx.h"
33 #include "mlx5_common_os.h"
34 #include "rte_pmd_mlx5.h"
36 static struct mlx5_flow_tunnel *
37 mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id);
39 mlx5_flow_tunnel_free(struct rte_eth_dev *dev, struct mlx5_flow_tunnel *tunnel);
40 static const struct mlx5_flow_tbl_data_entry *
41 tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark);
43 mlx5_get_flow_tunnel(struct rte_eth_dev *dev,
44 const struct rte_flow_tunnel *app_tunnel,
45 struct mlx5_flow_tunnel **tunnel);
48 /** Device flow drivers. */
49 extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops;
51 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops;
53 const struct mlx5_flow_driver_ops *flow_drv_ops[] = {
54 [MLX5_FLOW_TYPE_MIN] = &mlx5_flow_null_drv_ops,
55 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
56 [MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops,
58 [MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops,
59 [MLX5_FLOW_TYPE_MAX] = &mlx5_flow_null_drv_ops
62 /** Helper macro to build input graph for mlx5_flow_expand_rss(). */
63 #define MLX5_FLOW_EXPAND_RSS_NEXT(...) \
68 /** Node object of input graph for mlx5_flow_expand_rss(). */
69 struct mlx5_flow_expand_node {
70 const int *const next;
72 * List of next node indexes. Index 0 is interpreted as a terminator.
74 const enum rte_flow_item_type type;
75 /**< Pattern item type of current node. */
78 * RSS types bit-field associated with this node
79 * (see ETH_RSS_* definitions).
83 /** Object returned by mlx5_flow_expand_rss(). */
84 struct mlx5_flow_expand_rss {
86 /**< Number of entries @p patterns and @p priorities. */
88 struct rte_flow_item *pattern; /**< Expanded pattern array. */
89 uint32_t priority; /**< Priority offset for each expansion. */
93 static enum rte_flow_item_type
94 mlx5_flow_expand_rss_item_complete(const struct rte_flow_item *item)
96 enum rte_flow_item_type ret = RTE_FLOW_ITEM_TYPE_VOID;
97 uint16_t ether_type = 0;
98 uint16_t ether_type_m;
99 uint8_t ip_next_proto = 0;
100 uint8_t ip_next_proto_m;
102 if (item == NULL || item->spec == NULL)
104 switch (item->type) {
105 case RTE_FLOW_ITEM_TYPE_ETH:
107 ether_type_m = ((const struct rte_flow_item_eth *)
110 ether_type_m = rte_flow_item_eth_mask.type;
111 if (ether_type_m != RTE_BE16(0xFFFF))
113 ether_type = ((const struct rte_flow_item_eth *)
115 if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4)
116 ret = RTE_FLOW_ITEM_TYPE_IPV4;
117 else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6)
118 ret = RTE_FLOW_ITEM_TYPE_IPV6;
119 else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN)
120 ret = RTE_FLOW_ITEM_TYPE_VLAN;
122 ret = RTE_FLOW_ITEM_TYPE_END;
124 case RTE_FLOW_ITEM_TYPE_VLAN:
126 ether_type_m = ((const struct rte_flow_item_vlan *)
127 (item->mask))->inner_type;
129 ether_type_m = rte_flow_item_vlan_mask.inner_type;
130 if (ether_type_m != RTE_BE16(0xFFFF))
132 ether_type = ((const struct rte_flow_item_vlan *)
133 (item->spec))->inner_type;
134 if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4)
135 ret = RTE_FLOW_ITEM_TYPE_IPV4;
136 else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6)
137 ret = RTE_FLOW_ITEM_TYPE_IPV6;
138 else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN)
139 ret = RTE_FLOW_ITEM_TYPE_VLAN;
141 ret = RTE_FLOW_ITEM_TYPE_END;
143 case RTE_FLOW_ITEM_TYPE_IPV4:
145 ip_next_proto_m = ((const struct rte_flow_item_ipv4 *)
146 (item->mask))->hdr.next_proto_id;
149 rte_flow_item_ipv4_mask.hdr.next_proto_id;
150 if (ip_next_proto_m != 0xFF)
152 ip_next_proto = ((const struct rte_flow_item_ipv4 *)
153 (item->spec))->hdr.next_proto_id;
154 if (ip_next_proto == IPPROTO_UDP)
155 ret = RTE_FLOW_ITEM_TYPE_UDP;
156 else if (ip_next_proto == IPPROTO_TCP)
157 ret = RTE_FLOW_ITEM_TYPE_TCP;
158 else if (ip_next_proto == IPPROTO_IP)
159 ret = RTE_FLOW_ITEM_TYPE_IPV4;
160 else if (ip_next_proto == IPPROTO_IPV6)
161 ret = RTE_FLOW_ITEM_TYPE_IPV6;
163 ret = RTE_FLOW_ITEM_TYPE_END;
165 case RTE_FLOW_ITEM_TYPE_IPV6:
167 ip_next_proto_m = ((const struct rte_flow_item_ipv6 *)
168 (item->mask))->hdr.proto;
171 rte_flow_item_ipv6_mask.hdr.proto;
172 if (ip_next_proto_m != 0xFF)
174 ip_next_proto = ((const struct rte_flow_item_ipv6 *)
175 (item->spec))->hdr.proto;
176 if (ip_next_proto == IPPROTO_UDP)
177 ret = RTE_FLOW_ITEM_TYPE_UDP;
178 else if (ip_next_proto == IPPROTO_TCP)
179 ret = RTE_FLOW_ITEM_TYPE_TCP;
180 else if (ip_next_proto == IPPROTO_IP)
181 ret = RTE_FLOW_ITEM_TYPE_IPV4;
182 else if (ip_next_proto == IPPROTO_IPV6)
183 ret = RTE_FLOW_ITEM_TYPE_IPV6;
185 ret = RTE_FLOW_ITEM_TYPE_END;
188 ret = RTE_FLOW_ITEM_TYPE_VOID;
195 * Expand RSS flows into several possible flows according to the RSS hash
196 * fields requested and the driver capabilities.
199 * Buffer to store the result expansion.
201 * Buffer size in bytes. If 0, @p buf can be NULL.
205 * RSS types to expand (see ETH_RSS_* definitions).
207 * Input graph to expand @p pattern according to @p types.
208 * @param[in] graph_root_index
209 * Index of root node in @p graph, typically 0.
212 * A positive value representing the size of @p buf in bytes regardless of
213 * @p size on success, a negative errno value otherwise and rte_errno is
214 * set, the following errors are defined:
216 * -E2BIG: graph-depth @p graph is too deep.
219 mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size,
220 const struct rte_flow_item *pattern, uint64_t types,
221 const struct mlx5_flow_expand_node graph[],
222 int graph_root_index)
225 const struct rte_flow_item *item;
226 const struct mlx5_flow_expand_node *node = &graph[graph_root_index];
227 const int *next_node;
228 const int *stack[elt_n];
230 struct rte_flow_item flow_items[elt_n];
233 size_t user_pattern_size = 0;
235 const struct mlx5_flow_expand_node *next = NULL;
236 struct rte_flow_item missed_item;
239 const struct rte_flow_item *last_item = NULL;
241 memset(&missed_item, 0, sizeof(missed_item));
242 lsize = offsetof(struct mlx5_flow_expand_rss, entry) +
243 elt_n * sizeof(buf->entry[0]);
245 buf->entry[0].priority = 0;
246 buf->entry[0].pattern = (void *)&buf->entry[elt_n];
248 addr = buf->entry[0].pattern;
250 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
251 if (item->type != RTE_FLOW_ITEM_TYPE_VOID)
253 for (i = 0; node->next && node->next[i]; ++i) {
254 next = &graph[node->next[i]];
255 if (next->type == item->type)
260 user_pattern_size += sizeof(*item);
262 user_pattern_size += sizeof(*item); /* Handle END item. */
263 lsize += user_pattern_size;
264 /* Copy the user pattern in the first entry of the buffer. */
266 rte_memcpy(addr, pattern, user_pattern_size);
267 addr = (void *)(((uintptr_t)addr) + user_pattern_size);
270 /* Start expanding. */
271 memset(flow_items, 0, sizeof(flow_items));
272 user_pattern_size -= sizeof(*item);
274 * Check if the last valid item has spec set, need complete pattern,
275 * and the pattern can be used for expansion.
277 missed_item.type = mlx5_flow_expand_rss_item_complete(last_item);
278 if (missed_item.type == RTE_FLOW_ITEM_TYPE_END) {
279 /* Item type END indicates expansion is not required. */
282 if (missed_item.type != RTE_FLOW_ITEM_TYPE_VOID) {
285 for (i = 0; node->next && node->next[i]; ++i) {
286 next = &graph[node->next[i]];
287 if (next->type == missed_item.type) {
288 flow_items[0].type = missed_item.type;
289 flow_items[1].type = RTE_FLOW_ITEM_TYPE_END;
295 if (next && missed) {
296 elt = 2; /* missed item + item end. */
298 lsize += elt * sizeof(*item) + user_pattern_size;
299 if ((node->rss_types & types) && lsize <= size) {
300 buf->entry[buf->entries].priority = 1;
301 buf->entry[buf->entries].pattern = addr;
303 rte_memcpy(addr, buf->entry[0].pattern,
305 addr = (void *)(((uintptr_t)addr) + user_pattern_size);
306 rte_memcpy(addr, flow_items, elt * sizeof(*item));
307 addr = (void *)(((uintptr_t)addr) +
308 elt * sizeof(*item));
311 memset(flow_items, 0, sizeof(flow_items));
312 next_node = node->next;
313 stack[stack_pos] = next_node;
314 node = next_node ? &graph[*next_node] : NULL;
316 flow_items[stack_pos].type = node->type;
317 if (node->rss_types & types) {
319 * compute the number of items to copy from the
320 * expansion and copy it.
321 * When the stack_pos is 0, there are 1 element in it,
322 * plus the addition END item.
325 flow_items[stack_pos + 1].type = RTE_FLOW_ITEM_TYPE_END;
326 lsize += elt * sizeof(*item) + user_pattern_size;
328 size_t n = elt * sizeof(*item);
330 buf->entry[buf->entries].priority =
331 stack_pos + 1 + missed;
332 buf->entry[buf->entries].pattern = addr;
334 rte_memcpy(addr, buf->entry[0].pattern,
336 addr = (void *)(((uintptr_t)addr) +
338 rte_memcpy(addr, &missed_item,
339 missed * sizeof(*item));
340 addr = (void *)(((uintptr_t)addr) +
341 missed * sizeof(*item));
342 rte_memcpy(addr, flow_items, n);
343 addr = (void *)(((uintptr_t)addr) + n);
348 next_node = node->next;
349 if (stack_pos++ == elt_n) {
353 stack[stack_pos] = next_node;
354 } else if (*(next_node + 1)) {
355 /* Follow up with the next possibility. */
358 /* Move to the next path. */
360 next_node = stack[--stack_pos];
362 stack[stack_pos] = next_node;
364 node = *next_node ? &graph[*next_node] : NULL;
366 /* no expanded flows but we have missed item, create one rule for it */
367 if (buf->entries == 1 && missed != 0) {
369 lsize += elt * sizeof(*item) + user_pattern_size;
371 buf->entry[buf->entries].priority = 1;
372 buf->entry[buf->entries].pattern = addr;
374 flow_items[0].type = missed_item.type;
375 flow_items[1].type = RTE_FLOW_ITEM_TYPE_END;
376 rte_memcpy(addr, buf->entry[0].pattern,
378 addr = (void *)(((uintptr_t)addr) + user_pattern_size);
379 rte_memcpy(addr, flow_items, elt * sizeof(*item));
380 addr = (void *)(((uintptr_t)addr) +
381 elt * sizeof(*item));
387 enum mlx5_expansion {
389 MLX5_EXPANSION_ROOT_OUTER,
390 MLX5_EXPANSION_ROOT_ETH_VLAN,
391 MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN,
392 MLX5_EXPANSION_OUTER_ETH,
393 MLX5_EXPANSION_OUTER_ETH_VLAN,
394 MLX5_EXPANSION_OUTER_VLAN,
395 MLX5_EXPANSION_OUTER_IPV4,
396 MLX5_EXPANSION_OUTER_IPV4_UDP,
397 MLX5_EXPANSION_OUTER_IPV4_TCP,
398 MLX5_EXPANSION_OUTER_IPV6,
399 MLX5_EXPANSION_OUTER_IPV6_UDP,
400 MLX5_EXPANSION_OUTER_IPV6_TCP,
401 MLX5_EXPANSION_VXLAN,
402 MLX5_EXPANSION_VXLAN_GPE,
406 MLX5_EXPANSION_ETH_VLAN,
409 MLX5_EXPANSION_IPV4_UDP,
410 MLX5_EXPANSION_IPV4_TCP,
412 MLX5_EXPANSION_IPV6_UDP,
413 MLX5_EXPANSION_IPV6_TCP,
416 /** Supported expansion of items. */
417 static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
418 [MLX5_EXPANSION_ROOT] = {
419 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
421 MLX5_EXPANSION_IPV6),
422 .type = RTE_FLOW_ITEM_TYPE_END,
424 [MLX5_EXPANSION_ROOT_OUTER] = {
425 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH,
426 MLX5_EXPANSION_OUTER_IPV4,
427 MLX5_EXPANSION_OUTER_IPV6),
428 .type = RTE_FLOW_ITEM_TYPE_END,
430 [MLX5_EXPANSION_ROOT_ETH_VLAN] = {
431 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH_VLAN),
432 .type = RTE_FLOW_ITEM_TYPE_END,
434 [MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN] = {
435 .next = MLX5_FLOW_EXPAND_RSS_NEXT
436 (MLX5_EXPANSION_OUTER_ETH_VLAN),
437 .type = RTE_FLOW_ITEM_TYPE_END,
439 [MLX5_EXPANSION_OUTER_ETH] = {
440 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
441 MLX5_EXPANSION_OUTER_IPV6,
442 MLX5_EXPANSION_MPLS),
443 .type = RTE_FLOW_ITEM_TYPE_ETH,
446 [MLX5_EXPANSION_OUTER_ETH_VLAN] = {
447 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_VLAN),
448 .type = RTE_FLOW_ITEM_TYPE_ETH,
451 [MLX5_EXPANSION_OUTER_VLAN] = {
452 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
453 MLX5_EXPANSION_OUTER_IPV6),
454 .type = RTE_FLOW_ITEM_TYPE_VLAN,
456 [MLX5_EXPANSION_OUTER_IPV4] = {
457 .next = MLX5_FLOW_EXPAND_RSS_NEXT
458 (MLX5_EXPANSION_OUTER_IPV4_UDP,
459 MLX5_EXPANSION_OUTER_IPV4_TCP,
462 MLX5_EXPANSION_IPV6),
463 .type = RTE_FLOW_ITEM_TYPE_IPV4,
464 .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
465 ETH_RSS_NONFRAG_IPV4_OTHER,
467 [MLX5_EXPANSION_OUTER_IPV4_UDP] = {
468 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
469 MLX5_EXPANSION_VXLAN_GPE),
470 .type = RTE_FLOW_ITEM_TYPE_UDP,
471 .rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
473 [MLX5_EXPANSION_OUTER_IPV4_TCP] = {
474 .type = RTE_FLOW_ITEM_TYPE_TCP,
475 .rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
477 [MLX5_EXPANSION_OUTER_IPV6] = {
478 .next = MLX5_FLOW_EXPAND_RSS_NEXT
479 (MLX5_EXPANSION_OUTER_IPV6_UDP,
480 MLX5_EXPANSION_OUTER_IPV6_TCP,
482 MLX5_EXPANSION_IPV6),
483 .type = RTE_FLOW_ITEM_TYPE_IPV6,
484 .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
485 ETH_RSS_NONFRAG_IPV6_OTHER,
487 [MLX5_EXPANSION_OUTER_IPV6_UDP] = {
488 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
489 MLX5_EXPANSION_VXLAN_GPE),
490 .type = RTE_FLOW_ITEM_TYPE_UDP,
491 .rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
493 [MLX5_EXPANSION_OUTER_IPV6_TCP] = {
494 .type = RTE_FLOW_ITEM_TYPE_TCP,
495 .rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
497 [MLX5_EXPANSION_VXLAN] = {
498 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
500 MLX5_EXPANSION_IPV6),
501 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
503 [MLX5_EXPANSION_VXLAN_GPE] = {
504 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
506 MLX5_EXPANSION_IPV6),
507 .type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
509 [MLX5_EXPANSION_GRE] = {
510 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4),
511 .type = RTE_FLOW_ITEM_TYPE_GRE,
513 [MLX5_EXPANSION_MPLS] = {
514 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
515 MLX5_EXPANSION_IPV6),
516 .type = RTE_FLOW_ITEM_TYPE_MPLS,
518 [MLX5_EXPANSION_ETH] = {
519 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
520 MLX5_EXPANSION_IPV6),
521 .type = RTE_FLOW_ITEM_TYPE_ETH,
523 [MLX5_EXPANSION_ETH_VLAN] = {
524 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VLAN),
525 .type = RTE_FLOW_ITEM_TYPE_ETH,
527 [MLX5_EXPANSION_VLAN] = {
528 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
529 MLX5_EXPANSION_IPV6),
530 .type = RTE_FLOW_ITEM_TYPE_VLAN,
532 [MLX5_EXPANSION_IPV4] = {
533 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP,
534 MLX5_EXPANSION_IPV4_TCP),
535 .type = RTE_FLOW_ITEM_TYPE_IPV4,
536 .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
537 ETH_RSS_NONFRAG_IPV4_OTHER,
539 [MLX5_EXPANSION_IPV4_UDP] = {
540 .type = RTE_FLOW_ITEM_TYPE_UDP,
541 .rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
543 [MLX5_EXPANSION_IPV4_TCP] = {
544 .type = RTE_FLOW_ITEM_TYPE_TCP,
545 .rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
547 [MLX5_EXPANSION_IPV6] = {
548 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP,
549 MLX5_EXPANSION_IPV6_TCP),
550 .type = RTE_FLOW_ITEM_TYPE_IPV6,
551 .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
552 ETH_RSS_NONFRAG_IPV6_OTHER,
554 [MLX5_EXPANSION_IPV6_UDP] = {
555 .type = RTE_FLOW_ITEM_TYPE_UDP,
556 .rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
558 [MLX5_EXPANSION_IPV6_TCP] = {
559 .type = RTE_FLOW_ITEM_TYPE_TCP,
560 .rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
564 static struct rte_flow_shared_action *
565 mlx5_shared_action_create(struct rte_eth_dev *dev,
566 const struct rte_flow_shared_action_conf *conf,
567 const struct rte_flow_action *action,
568 struct rte_flow_error *error);
569 static int mlx5_shared_action_destroy
570 (struct rte_eth_dev *dev,
571 struct rte_flow_shared_action *shared_action,
572 struct rte_flow_error *error);
573 static int mlx5_shared_action_update
574 (struct rte_eth_dev *dev,
575 struct rte_flow_shared_action *shared_action,
576 const struct rte_flow_action *action,
577 struct rte_flow_error *error);
578 static int mlx5_shared_action_query
579 (struct rte_eth_dev *dev,
580 const struct rte_flow_shared_action *action,
582 struct rte_flow_error *error);
584 mlx5_flow_tunnel_validate(struct rte_eth_dev *dev,
585 struct rte_flow_tunnel *tunnel,
589 if (!is_tunnel_offload_active(dev)) {
590 err_msg = "tunnel offload was not activated";
592 } else if (!tunnel) {
593 err_msg = "no application tunnel";
597 switch (tunnel->type) {
599 err_msg = "unsupported tunnel type";
601 case RTE_FLOW_ITEM_TYPE_VXLAN:
611 mlx5_flow_tunnel_decap_set(struct rte_eth_dev *dev,
612 struct rte_flow_tunnel *app_tunnel,
613 struct rte_flow_action **actions,
614 uint32_t *num_of_actions,
615 struct rte_flow_error *error)
618 struct mlx5_flow_tunnel *tunnel;
619 const char *err_msg = NULL;
620 bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);
623 return rte_flow_error_set(error, EINVAL,
624 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
626 ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
628 return rte_flow_error_set(error, ret,
629 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
630 "failed to initialize pmd tunnel");
632 *actions = &tunnel->action;
638 mlx5_flow_tunnel_match(struct rte_eth_dev *dev,
639 struct rte_flow_tunnel *app_tunnel,
640 struct rte_flow_item **items,
641 uint32_t *num_of_items,
642 struct rte_flow_error *error)
645 struct mlx5_flow_tunnel *tunnel;
646 const char *err_msg = NULL;
647 bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);
650 return rte_flow_error_set(error, EINVAL,
651 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
653 ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
655 return rte_flow_error_set(error, ret,
656 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
657 "failed to initialize pmd tunnel");
659 *items = &tunnel->item;
665 mlx5_flow_item_release(struct rte_eth_dev *dev,
666 struct rte_flow_item *pmd_items,
667 uint32_t num_items, struct rte_flow_error *err)
669 struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
670 struct mlx5_flow_tunnel *tun;
672 LIST_FOREACH(tun, &thub->tunnels, chain) {
673 if (&tun->item == pmd_items)
676 if (!tun || num_items != 1)
677 return rte_flow_error_set(err, EINVAL,
678 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
680 if (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED))
681 mlx5_flow_tunnel_free(dev, tun);
686 mlx5_flow_action_release(struct rte_eth_dev *dev,
687 struct rte_flow_action *pmd_actions,
688 uint32_t num_actions, struct rte_flow_error *err)
690 struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
691 struct mlx5_flow_tunnel *tun;
693 LIST_FOREACH(tun, &thub->tunnels, chain) {
694 if (&tun->action == pmd_actions)
697 if (!tun || num_actions != 1)
698 return rte_flow_error_set(err, EINVAL,
699 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
701 if (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED))
702 mlx5_flow_tunnel_free(dev, tun);
708 mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev,
710 struct rte_flow_restore_info *info,
711 struct rte_flow_error *err)
713 uint64_t ol_flags = m->ol_flags;
714 const struct mlx5_flow_tbl_data_entry *tble;
715 const uint64_t mask = PKT_RX_FDIR | PKT_RX_FDIR_ID;
717 if ((ol_flags & mask) != mask)
719 tble = tunnel_mark_decode(dev, m->hash.fdir.hi);
721 DRV_LOG(DEBUG, "port %u invalid miss tunnel mark %#x",
722 dev->data->port_id, m->hash.fdir.hi);
725 MLX5_ASSERT(tble->tunnel);
726 memcpy(&info->tunnel, &tble->tunnel->app_tunnel, sizeof(info->tunnel));
727 info->group_id = tble->group_id;
728 info->flags = RTE_FLOW_RESTORE_INFO_TUNNEL |
729 RTE_FLOW_RESTORE_INFO_GROUP_ID |
730 RTE_FLOW_RESTORE_INFO_ENCAPSULATED;
735 return rte_flow_error_set(err, EINVAL,
736 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
737 "failed to get restore info");
740 static const struct rte_flow_ops mlx5_flow_ops = {
741 .validate = mlx5_flow_validate,
742 .create = mlx5_flow_create,
743 .destroy = mlx5_flow_destroy,
744 .flush = mlx5_flow_flush,
745 .isolate = mlx5_flow_isolate,
746 .query = mlx5_flow_query,
747 .dev_dump = mlx5_flow_dev_dump,
748 .get_aged_flows = mlx5_flow_get_aged_flows,
749 .shared_action_create = mlx5_shared_action_create,
750 .shared_action_destroy = mlx5_shared_action_destroy,
751 .shared_action_update = mlx5_shared_action_update,
752 .shared_action_query = mlx5_shared_action_query,
753 .tunnel_decap_set = mlx5_flow_tunnel_decap_set,
754 .tunnel_match = mlx5_flow_tunnel_match,
755 .tunnel_action_decap_release = mlx5_flow_action_release,
756 .tunnel_item_release = mlx5_flow_item_release,
757 .get_restore_info = mlx5_flow_tunnel_get_restore_info,
760 /* Convert FDIR request to Generic flow. */
762 struct rte_flow_attr attr;
763 struct rte_flow_item items[4];
764 struct rte_flow_item_eth l2;
765 struct rte_flow_item_eth l2_mask;
767 struct rte_flow_item_ipv4 ipv4;
768 struct rte_flow_item_ipv6 ipv6;
771 struct rte_flow_item_ipv4 ipv4;
772 struct rte_flow_item_ipv6 ipv6;
775 struct rte_flow_item_udp udp;
776 struct rte_flow_item_tcp tcp;
779 struct rte_flow_item_udp udp;
780 struct rte_flow_item_tcp tcp;
782 struct rte_flow_action actions[2];
783 struct rte_flow_action_queue queue;
786 /* Tunnel information. */
787 struct mlx5_flow_tunnel_info {
788 uint64_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */
789 uint32_t ptype; /**< Tunnel Ptype (see RTE_PTYPE_*). */
792 static struct mlx5_flow_tunnel_info tunnels_info[] = {
794 .tunnel = MLX5_FLOW_LAYER_VXLAN,
795 .ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP,
798 .tunnel = MLX5_FLOW_LAYER_GENEVE,
799 .ptype = RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L4_UDP,
802 .tunnel = MLX5_FLOW_LAYER_VXLAN_GPE,
803 .ptype = RTE_PTYPE_TUNNEL_VXLAN_GPE | RTE_PTYPE_L4_UDP,
806 .tunnel = MLX5_FLOW_LAYER_GRE,
807 .ptype = RTE_PTYPE_TUNNEL_GRE,
810 .tunnel = MLX5_FLOW_LAYER_MPLS | MLX5_FLOW_LAYER_OUTER_L4_UDP,
811 .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_UDP | RTE_PTYPE_L4_UDP,
814 .tunnel = MLX5_FLOW_LAYER_MPLS,
815 .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE,
818 .tunnel = MLX5_FLOW_LAYER_NVGRE,
819 .ptype = RTE_PTYPE_TUNNEL_NVGRE,
822 .tunnel = MLX5_FLOW_LAYER_IPIP,
823 .ptype = RTE_PTYPE_TUNNEL_IP,
826 .tunnel = MLX5_FLOW_LAYER_IPV6_ENCAP,
827 .ptype = RTE_PTYPE_TUNNEL_IP,
830 .tunnel = MLX5_FLOW_LAYER_GTP,
831 .ptype = RTE_PTYPE_TUNNEL_GTPU,
835 /* Key of thread specific flow workspace data. */
836 static pthread_key_t key_workspace;
838 /* Thread specific flow workspace data once initialization data. */
839 static pthread_once_t key_workspace_init;
843 * Translate tag ID to register.
846 * Pointer to the Ethernet device structure.
848 * The feature that request the register.
850 * The request register ID.
852 * Error description in case of any.
855 * The request register on success, a negative errno
856 * value otherwise and rte_errno is set.
859 mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
860 enum mlx5_feature_name feature,
862 struct rte_flow_error *error)
864 struct mlx5_priv *priv = dev->data->dev_private;
865 struct mlx5_dev_config *config = &priv->config;
866 enum modify_reg start_reg;
867 bool skip_mtr_reg = false;
870 case MLX5_HAIRPIN_RX:
872 case MLX5_HAIRPIN_TX:
874 case MLX5_METADATA_RX:
875 switch (config->dv_xmeta_en) {
876 case MLX5_XMETA_MODE_LEGACY:
878 case MLX5_XMETA_MODE_META16:
880 case MLX5_XMETA_MODE_META32:
884 case MLX5_METADATA_TX:
886 case MLX5_METADATA_FDB:
887 switch (config->dv_xmeta_en) {
888 case MLX5_XMETA_MODE_LEGACY:
890 case MLX5_XMETA_MODE_META16:
892 case MLX5_XMETA_MODE_META32:
897 switch (config->dv_xmeta_en) {
898 case MLX5_XMETA_MODE_LEGACY:
900 case MLX5_XMETA_MODE_META16:
902 case MLX5_XMETA_MODE_META32:
908 * If meter color and flow match share one register, flow match
909 * should use the meter color register for match.
911 if (priv->mtr_reg_share)
912 return priv->mtr_color_reg;
914 return priv->mtr_color_reg != REG_C_2 ? REG_C_2 :
917 MLX5_ASSERT(priv->mtr_color_reg != REG_NON);
918 return priv->mtr_color_reg;
921 * Metadata COPY_MARK register using is in meter suffix sub
922 * flow while with meter. It's safe to share the same register.
924 return priv->mtr_color_reg != REG_C_2 ? REG_C_2 : REG_C_3;
927 * If meter is enable, it will engage the register for color
928 * match and flow match. If meter color match is not using the
929 * REG_C_2, need to skip the REG_C_x be used by meter color
931 * If meter is disable, free to use all available registers.
933 start_reg = priv->mtr_color_reg != REG_C_2 ? REG_C_2 :
934 (priv->mtr_reg_share ? REG_C_3 : REG_C_4);
935 skip_mtr_reg = !!(priv->mtr_en && start_reg == REG_C_2);
936 if (id > (REG_C_7 - start_reg))
937 return rte_flow_error_set(error, EINVAL,
938 RTE_FLOW_ERROR_TYPE_ITEM,
939 NULL, "invalid tag id");
940 if (config->flow_mreg_c[id + start_reg - REG_C_0] == REG_NON)
941 return rte_flow_error_set(error, ENOTSUP,
942 RTE_FLOW_ERROR_TYPE_ITEM,
943 NULL, "unsupported tag id");
945 * This case means meter is using the REG_C_x great than 2.
946 * Take care not to conflict with meter color REG_C_x.
947 * If the available index REG_C_y >= REG_C_x, skip the
950 if (skip_mtr_reg && config->flow_mreg_c
951 [id + start_reg - REG_C_0] >= priv->mtr_color_reg) {
952 if (id >= (REG_C_7 - start_reg))
953 return rte_flow_error_set(error, EINVAL,
954 RTE_FLOW_ERROR_TYPE_ITEM,
955 NULL, "invalid tag id");
956 if (config->flow_mreg_c
957 [id + 1 + start_reg - REG_C_0] != REG_NON)
958 return config->flow_mreg_c
959 [id + 1 + start_reg - REG_C_0];
960 return rte_flow_error_set(error, ENOTSUP,
961 RTE_FLOW_ERROR_TYPE_ITEM,
962 NULL, "unsupported tag id");
964 return config->flow_mreg_c[id + start_reg - REG_C_0];
967 return rte_flow_error_set(error, EINVAL,
968 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
969 NULL, "invalid feature name");
973 * Check extensive flow metadata register support.
976 * Pointer to rte_eth_dev structure.
979 * True if device supports extensive flow metadata register, otherwise false.
982 mlx5_flow_ext_mreg_supported(struct rte_eth_dev *dev)
984 struct mlx5_priv *priv = dev->data->dev_private;
985 struct mlx5_dev_config *config = &priv->config;
988 * Having available reg_c can be regarded inclusively as supporting
989 * extensive flow metadata register, which could mean,
990 * - metadata register copy action by modify header.
991 * - 16 modify header actions is supported.
992 * - reg_c's are preserved across different domain (FDB and NIC) on
993 * packet loopback by flow lookup miss.
995 return config->flow_mreg_c[2] != REG_NON;
999 * Verify the @p item specifications (spec, last, mask) are compatible with the
1003 * Item specification.
1005 * @p item->mask or flow default bit-masks.
1006 * @param[in] nic_mask
1007 * Bit-masks covering supported fields by the NIC to compare with user mask.
1009 * Bit-masks size in bytes.
1010 * @param[in] range_accepted
1011 * True if range of values is accepted for specific fields, false otherwise.
1013 * Pointer to error structure.
1016 * 0 on success, a negative errno value otherwise and rte_errno is set.
1019 mlx5_flow_item_acceptable(const struct rte_flow_item *item,
1020 const uint8_t *mask,
1021 const uint8_t *nic_mask,
1023 bool range_accepted,
1024 struct rte_flow_error *error)
1028 MLX5_ASSERT(nic_mask);
1029 for (i = 0; i < size; ++i)
1030 if ((nic_mask[i] | mask[i]) != nic_mask[i])
1031 return rte_flow_error_set(error, ENOTSUP,
1032 RTE_FLOW_ERROR_TYPE_ITEM,
1034 "mask enables non supported"
1036 if (!item->spec && (item->mask || item->last))
1037 return rte_flow_error_set(error, EINVAL,
1038 RTE_FLOW_ERROR_TYPE_ITEM, item,
1039 "mask/last without a spec is not"
1041 if (item->spec && item->last && !range_accepted) {
1047 for (i = 0; i < size; ++i) {
1048 spec[i] = ((const uint8_t *)item->spec)[i] & mask[i];
1049 last[i] = ((const uint8_t *)item->last)[i] & mask[i];
1051 ret = memcmp(spec, last, size);
1053 return rte_flow_error_set(error, EINVAL,
1054 RTE_FLOW_ERROR_TYPE_ITEM,
1056 "range is not valid");
1062 * Adjust the hash fields according to the @p flow information.
1064 * @param[in] dev_flow.
1065 * Pointer to the mlx5_flow.
1067 * 1 when the hash field is for a tunnel item.
1068 * @param[in] layer_types
1070 * @param[in] hash_fields
1074 * The hash fields that should be used.
1077 mlx5_flow_hashfields_adjust(struct mlx5_flow_rss_desc *rss_desc,
1078 int tunnel __rte_unused, uint64_t layer_types,
1079 uint64_t hash_fields)
1081 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
1082 int rss_request_inner = rss_desc->level >= 2;
1084 /* Check RSS hash level for tunnel. */
1085 if (tunnel && rss_request_inner)
1086 hash_fields |= IBV_RX_HASH_INNER;
1087 else if (tunnel || rss_request_inner)
1090 /* Check if requested layer matches RSS hash fields. */
1091 if (!(rss_desc->types & layer_types))
1097 * Lookup and set the ptype in the data Rx part. A single Ptype can be used,
1098 * if several tunnel rules are used on this queue, the tunnel ptype will be
1102 * Rx queue to update.
1105 flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl)
1108 uint32_t tunnel_ptype = 0;
1110 /* Look up for the ptype to use. */
1111 for (i = 0; i != MLX5_FLOW_TUNNEL; ++i) {
1112 if (!rxq_ctrl->flow_tunnels_n[i])
1114 if (!tunnel_ptype) {
1115 tunnel_ptype = tunnels_info[i].ptype;
1121 rxq_ctrl->rxq.tunnel = tunnel_ptype;
1125 * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the devive
1129 * Pointer to the Ethernet device structure.
1130 * @param[in] dev_handle
1131 * Pointer to device flow handle structure.
1134 flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
1135 struct mlx5_flow_handle *dev_handle)
1137 struct mlx5_priv *priv = dev->data->dev_private;
1138 const int mark = dev_handle->mark;
1139 const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
1140 struct mlx5_hrxq *hrxq;
1143 if (dev_handle->fate_action != MLX5_FLOW_FATE_QUEUE)
1145 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
1146 dev_handle->rix_hrxq);
1149 for (i = 0; i != hrxq->ind_table->queues_n; ++i) {
1150 int idx = hrxq->ind_table->queues[i];
1151 struct mlx5_rxq_ctrl *rxq_ctrl =
1152 container_of((*priv->rxqs)[idx],
1153 struct mlx5_rxq_ctrl, rxq);
1156 * To support metadata register copy on Tx loopback,
1157 * this must be always enabled (metadata may arive
1158 * from other port - not from local flows only.
1160 if (priv->config.dv_flow_en &&
1161 priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
1162 mlx5_flow_ext_mreg_supported(dev)) {
1163 rxq_ctrl->rxq.mark = 1;
1164 rxq_ctrl->flow_mark_n = 1;
1166 rxq_ctrl->rxq.mark = 1;
1167 rxq_ctrl->flow_mark_n++;
1172 /* Increase the counter matching the flow. */
1173 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
1174 if ((tunnels_info[j].tunnel &
1175 dev_handle->layers) ==
1176 tunnels_info[j].tunnel) {
1177 rxq_ctrl->flow_tunnels_n[j]++;
1181 flow_rxq_tunnel_ptype_update(rxq_ctrl);
1187 * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) for a flow
1190 * Pointer to the Ethernet device structure.
1192 * Pointer to flow structure.
1195 flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
1197 struct mlx5_priv *priv = dev->data->dev_private;
1198 uint32_t handle_idx;
1199 struct mlx5_flow_handle *dev_handle;
1201 SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
1202 handle_idx, dev_handle, next)
1203 flow_drv_rxq_flags_set(dev, dev_handle);
1207 * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the
1208 * device flow if no other flow uses it with the same kind of request.
1211 * Pointer to Ethernet device.
1212 * @param[in] dev_handle
1213 * Pointer to the device flow handle structure.
1216 flow_drv_rxq_flags_trim(struct rte_eth_dev *dev,
1217 struct mlx5_flow_handle *dev_handle)
1219 struct mlx5_priv *priv = dev->data->dev_private;
1220 const int mark = dev_handle->mark;
1221 const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
1222 struct mlx5_hrxq *hrxq;
1225 if (dev_handle->fate_action != MLX5_FLOW_FATE_QUEUE)
1227 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
1228 dev_handle->rix_hrxq);
1231 MLX5_ASSERT(dev->data->dev_started);
1232 for (i = 0; i != hrxq->ind_table->queues_n; ++i) {
1233 int idx = hrxq->ind_table->queues[i];
1234 struct mlx5_rxq_ctrl *rxq_ctrl =
1235 container_of((*priv->rxqs)[idx],
1236 struct mlx5_rxq_ctrl, rxq);
1238 if (priv->config.dv_flow_en &&
1239 priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
1240 mlx5_flow_ext_mreg_supported(dev)) {
1241 rxq_ctrl->rxq.mark = 1;
1242 rxq_ctrl->flow_mark_n = 1;
1244 rxq_ctrl->flow_mark_n--;
1245 rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n;
1250 /* Decrease the counter matching the flow. */
1251 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
1252 if ((tunnels_info[j].tunnel &
1253 dev_handle->layers) ==
1254 tunnels_info[j].tunnel) {
1255 rxq_ctrl->flow_tunnels_n[j]--;
1259 flow_rxq_tunnel_ptype_update(rxq_ctrl);
1265 * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the
1266 * @p flow if no other flow uses it with the same kind of request.
1269 * Pointer to Ethernet device.
1271 * Pointer to the flow.
1274 flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
1276 struct mlx5_priv *priv = dev->data->dev_private;
1277 uint32_t handle_idx;
1278 struct mlx5_flow_handle *dev_handle;
1280 SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
1281 handle_idx, dev_handle, next)
1282 flow_drv_rxq_flags_trim(dev, dev_handle);
1286 * Clear the Mark/Flag and Tunnel ptype information in all Rx queues.
1289 * Pointer to Ethernet device.
1292 flow_rxq_flags_clear(struct rte_eth_dev *dev)
1294 struct mlx5_priv *priv = dev->data->dev_private;
1297 for (i = 0; i != priv->rxqs_n; ++i) {
1298 struct mlx5_rxq_ctrl *rxq_ctrl;
1301 if (!(*priv->rxqs)[i])
1303 rxq_ctrl = container_of((*priv->rxqs)[i],
1304 struct mlx5_rxq_ctrl, rxq);
1305 rxq_ctrl->flow_mark_n = 0;
1306 rxq_ctrl->rxq.mark = 0;
1307 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j)
1308 rxq_ctrl->flow_tunnels_n[j] = 0;
1309 rxq_ctrl->rxq.tunnel = 0;
1314 * Set the Rx queue dynamic metadata (mask and offset) for a flow
1317 * Pointer to the Ethernet device structure.
1320 mlx5_flow_rxq_dynf_metadata_set(struct rte_eth_dev *dev)
1322 struct mlx5_priv *priv = dev->data->dev_private;
1323 struct mlx5_rxq_data *data;
1326 for (i = 0; i != priv->rxqs_n; ++i) {
1327 if (!(*priv->rxqs)[i])
1329 data = (*priv->rxqs)[i];
1330 if (!rte_flow_dynf_metadata_avail()) {
1331 data->dynf_meta = 0;
1332 data->flow_meta_mask = 0;
1333 data->flow_meta_offset = -1;
1335 data->dynf_meta = 1;
1336 data->flow_meta_mask = rte_flow_dynf_metadata_mask;
1337 data->flow_meta_offset = rte_flow_dynf_metadata_offs;
1343 * return a pointer to the desired action in the list of actions.
1345 * @param[in] actions
1346 * The list of actions to search the action in.
1348 * The action to find.
1351 * Pointer to the action in the list, if found. NULL otherwise.
1353 const struct rte_flow_action *
1354 mlx5_flow_find_action(const struct rte_flow_action *actions,
1355 enum rte_flow_action_type action)
1357 if (actions == NULL)
1359 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++)
1360 if (actions->type == action)
1366 * Validate the flag action.
1368 * @param[in] action_flags
1369 * Bit-fields that holds the actions detected until now.
1371 * Attributes of flow that includes this action.
1373 * Pointer to error structure.
1376 * 0 on success, a negative errno value otherwise and rte_errno is set.
1379 mlx5_flow_validate_action_flag(uint64_t action_flags,
1380 const struct rte_flow_attr *attr,
1381 struct rte_flow_error *error)
1383 if (action_flags & MLX5_FLOW_ACTION_MARK)
1384 return rte_flow_error_set(error, EINVAL,
1385 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1386 "can't mark and flag in same flow");
1387 if (action_flags & MLX5_FLOW_ACTION_FLAG)
1388 return rte_flow_error_set(error, EINVAL,
1389 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1391 " actions in same flow");
1393 return rte_flow_error_set(error, ENOTSUP,
1394 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1395 "flag action not supported for "
1401 * Validate the mark action.
1404 * Pointer to the queue action.
1405 * @param[in] action_flags
1406 * Bit-fields that holds the actions detected until now.
1408 * Attributes of flow that includes this action.
1410 * Pointer to error structure.
1413 * 0 on success, a negative errno value otherwise and rte_errno is set.
1416 mlx5_flow_validate_action_mark(const struct rte_flow_action *action,
1417 uint64_t action_flags,
1418 const struct rte_flow_attr *attr,
1419 struct rte_flow_error *error)
1421 const struct rte_flow_action_mark *mark = action->conf;
1424 return rte_flow_error_set(error, EINVAL,
1425 RTE_FLOW_ERROR_TYPE_ACTION,
1427 "configuration cannot be null");
1428 if (mark->id >= MLX5_FLOW_MARK_MAX)
1429 return rte_flow_error_set(error, EINVAL,
1430 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1432 "mark id must in 0 <= id < "
1433 RTE_STR(MLX5_FLOW_MARK_MAX));
1434 if (action_flags & MLX5_FLOW_ACTION_FLAG)
1435 return rte_flow_error_set(error, EINVAL,
1436 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1437 "can't flag and mark in same flow");
1438 if (action_flags & MLX5_FLOW_ACTION_MARK)
1439 return rte_flow_error_set(error, EINVAL,
1440 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1441 "can't have 2 mark actions in same"
1444 return rte_flow_error_set(error, ENOTSUP,
1445 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1446 "mark action not supported for "
1452 * Validate the drop action.
1454 * @param[in] action_flags
1455 * Bit-fields that holds the actions detected until now.
1457 * Attributes of flow that includes this action.
1459 * Pointer to error structure.
1462 * 0 on success, a negative errno value otherwise and rte_errno is set.
1465 mlx5_flow_validate_action_drop(uint64_t action_flags __rte_unused,
1466 const struct rte_flow_attr *attr,
1467 struct rte_flow_error *error)
1470 return rte_flow_error_set(error, ENOTSUP,
1471 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1472 "drop action not supported for "
1478 * Validate the queue action.
1481 * Pointer to the queue action.
1482 * @param[in] action_flags
1483 * Bit-fields that holds the actions detected until now.
1485 * Pointer to the Ethernet device structure.
1487 * Attributes of flow that includes this action.
1489 * Pointer to error structure.
1492 * 0 on success, a negative errno value otherwise and rte_errno is set.
1495 mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
1496 uint64_t action_flags,
1497 struct rte_eth_dev *dev,
1498 const struct rte_flow_attr *attr,
1499 struct rte_flow_error *error)
1501 struct mlx5_priv *priv = dev->data->dev_private;
1502 const struct rte_flow_action_queue *queue = action->conf;
1504 if (action_flags & MLX5_FLOW_FATE_ACTIONS)
1505 return rte_flow_error_set(error, EINVAL,
1506 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1507 "can't have 2 fate actions in"
1510 return rte_flow_error_set(error, EINVAL,
1511 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1512 NULL, "No Rx queues configured");
1513 if (queue->index >= priv->rxqs_n)
1514 return rte_flow_error_set(error, EINVAL,
1515 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1517 "queue index out of range");
1518 if (!(*priv->rxqs)[queue->index])
1519 return rte_flow_error_set(error, EINVAL,
1520 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1522 "queue is not configured");
1524 return rte_flow_error_set(error, ENOTSUP,
1525 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1526 "queue action not supported for "
1532 * Validate the rss action.
1535 * Pointer to the Ethernet device structure.
1537 * Pointer to the queue action.
1539 * Pointer to error structure.
1542 * 0 on success, a negative errno value otherwise and rte_errno is set.
1545 mlx5_validate_action_rss(struct rte_eth_dev *dev,
1546 const struct rte_flow_action *action,
1547 struct rte_flow_error *error)
1549 struct mlx5_priv *priv = dev->data->dev_private;
1550 const struct rte_flow_action_rss *rss = action->conf;
1553 if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT &&
1554 rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ)
1555 return rte_flow_error_set(error, ENOTSUP,
1556 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1558 "RSS hash function not supported");
1559 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
1564 return rte_flow_error_set(error, ENOTSUP,
1565 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1567 "tunnel RSS is not supported");
1568 /* allow RSS key_len 0 in case of NULL (default) RSS key. */
1569 if (rss->key_len == 0 && rss->key != NULL)
1570 return rte_flow_error_set(error, ENOTSUP,
1571 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1573 "RSS hash key length 0");
1574 if (rss->key_len > 0 && rss->key_len < MLX5_RSS_HASH_KEY_LEN)
1575 return rte_flow_error_set(error, ENOTSUP,
1576 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1578 "RSS hash key too small");
1579 if (rss->key_len > MLX5_RSS_HASH_KEY_LEN)
1580 return rte_flow_error_set(error, ENOTSUP,
1581 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1583 "RSS hash key too large");
1584 if (rss->queue_num > priv->config.ind_table_max_size)
1585 return rte_flow_error_set(error, ENOTSUP,
1586 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1588 "number of queues too large");
1589 if (rss->types & MLX5_RSS_HF_MASK)
1590 return rte_flow_error_set(error, ENOTSUP,
1591 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1593 "some RSS protocols are not"
1595 if ((rss->types & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY)) &&
1596 !(rss->types & ETH_RSS_IP))
1597 return rte_flow_error_set(error, EINVAL,
1598 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
1599 "L3 partial RSS requested but L3 RSS"
1600 " type not specified");
1601 if ((rss->types & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)) &&
1602 !(rss->types & (ETH_RSS_UDP | ETH_RSS_TCP)))
1603 return rte_flow_error_set(error, EINVAL,
1604 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
1605 "L4 partial RSS requested but L4 RSS"
1606 " type not specified");
1608 return rte_flow_error_set(error, EINVAL,
1609 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1610 NULL, "No Rx queues configured");
1611 if (!rss->queue_num)
1612 return rte_flow_error_set(error, EINVAL,
1613 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1614 NULL, "No queues configured");
1615 for (i = 0; i != rss->queue_num; ++i) {
1616 if (rss->queue[i] >= priv->rxqs_n)
1617 return rte_flow_error_set
1619 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1620 &rss->queue[i], "queue index out of range");
1621 if (!(*priv->rxqs)[rss->queue[i]])
1622 return rte_flow_error_set
1623 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1624 &rss->queue[i], "queue is not configured");
1630 * Validate the rss action.
1633 * Pointer to the queue action.
1634 * @param[in] action_flags
1635 * Bit-fields that holds the actions detected until now.
1637 * Pointer to the Ethernet device structure.
1639 * Attributes of flow that includes this action.
1640 * @param[in] item_flags
1641 * Items that were detected.
1643 * Pointer to error structure.
1646 * 0 on success, a negative errno value otherwise and rte_errno is set.
1649 mlx5_flow_validate_action_rss(const struct rte_flow_action *action,
1650 uint64_t action_flags,
1651 struct rte_eth_dev *dev,
1652 const struct rte_flow_attr *attr,
1653 uint64_t item_flags,
1654 struct rte_flow_error *error)
1656 const struct rte_flow_action_rss *rss = action->conf;
1657 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1660 if (action_flags & MLX5_FLOW_FATE_ACTIONS)
1661 return rte_flow_error_set(error, EINVAL,
1662 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1663 "can't have 2 fate actions"
1665 ret = mlx5_validate_action_rss(dev, action, error);
1669 return rte_flow_error_set(error, ENOTSUP,
1670 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1671 "rss action not supported for "
1673 if (rss->level > 1 && !tunnel)
1674 return rte_flow_error_set(error, EINVAL,
1675 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
1676 "inner RSS is not supported for "
1677 "non-tunnel flows");
1678 if ((item_flags & MLX5_FLOW_LAYER_ECPRI) &&
1679 !(item_flags & MLX5_FLOW_LAYER_INNER_L4_UDP)) {
1680 return rte_flow_error_set(error, EINVAL,
1681 RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
1682 "RSS on eCPRI is not supported now");
1688 * Validate the default miss action.
1690 * @param[in] action_flags
1691 * Bit-fields that holds the actions detected until now.
1693 * Pointer to error structure.
1696 * 0 on success, a negative errno value otherwise and rte_errno is set.
1699 mlx5_flow_validate_action_default_miss(uint64_t action_flags,
1700 const struct rte_flow_attr *attr,
1701 struct rte_flow_error *error)
1703 if (action_flags & MLX5_FLOW_FATE_ACTIONS)
1704 return rte_flow_error_set(error, EINVAL,
1705 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1706 "can't have 2 fate actions in"
1709 return rte_flow_error_set(error, ENOTSUP,
1710 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1711 "default miss action not supported "
1714 return rte_flow_error_set(error, ENOTSUP,
1715 RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL,
1716 "only group 0 is supported");
1718 return rte_flow_error_set(error, ENOTSUP,
1719 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1720 NULL, "transfer is not supported");
1725 * Validate the count action.
1728 * Pointer to the Ethernet device structure.
1730 * Attributes of flow that includes this action.
1732 * Pointer to error structure.
1735 * 0 on success, a negative errno value otherwise and rte_errno is set.
1738 mlx5_flow_validate_action_count(struct rte_eth_dev *dev __rte_unused,
1739 const struct rte_flow_attr *attr,
1740 struct rte_flow_error *error)
1743 return rte_flow_error_set(error, ENOTSUP,
1744 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1745 "count action not supported for "
1751 * Verify the @p attributes will be correctly understood by the NIC and store
1752 * them in the @p flow if everything is correct.
1755 * Pointer to the Ethernet device structure.
1756 * @param[in] attributes
1757 * Pointer to flow attributes
1759 * Pointer to error structure.
1762 * 0 on success, a negative errno value otherwise and rte_errno is set.
1765 mlx5_flow_validate_attributes(struct rte_eth_dev *dev,
1766 const struct rte_flow_attr *attributes,
1767 struct rte_flow_error *error)
1769 struct mlx5_priv *priv = dev->data->dev_private;
1770 uint32_t priority_max = priv->config.flow_prio - 1;
1772 if (attributes->group)
1773 return rte_flow_error_set(error, ENOTSUP,
1774 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1775 NULL, "groups is not supported");
1776 if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
1777 attributes->priority >= priority_max)
1778 return rte_flow_error_set(error, ENOTSUP,
1779 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1780 NULL, "priority out of range");
1781 if (attributes->egress)
1782 return rte_flow_error_set(error, ENOTSUP,
1783 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1784 "egress is not supported");
1785 if (attributes->transfer && !priv->config.dv_esw_en)
1786 return rte_flow_error_set(error, ENOTSUP,
1787 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1788 NULL, "transfer is not supported");
1789 if (!attributes->ingress)
1790 return rte_flow_error_set(error, EINVAL,
1791 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1793 "ingress attribute is mandatory");
1798 * Validate ICMP6 item.
1801 * Item specification.
1802 * @param[in] item_flags
1803 * Bit-fields that holds the items detected until now.
1804 * @param[in] ext_vlan_sup
1805 * Whether extended VLAN features are supported or not.
1807 * Pointer to error structure.
1810 * 0 on success, a negative errno value otherwise and rte_errno is set.
1813 mlx5_flow_validate_item_icmp6(const struct rte_flow_item *item,
1814 uint64_t item_flags,
1815 uint8_t target_protocol,
1816 struct rte_flow_error *error)
1818 const struct rte_flow_item_icmp6 *mask = item->mask;
1819 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1820 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1821 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1822 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1823 MLX5_FLOW_LAYER_OUTER_L4;
1826 if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMPV6)
1827 return rte_flow_error_set(error, EINVAL,
1828 RTE_FLOW_ERROR_TYPE_ITEM, item,
1829 "protocol filtering not compatible"
1830 " with ICMP6 layer");
1831 if (!(item_flags & l3m))
1832 return rte_flow_error_set(error, EINVAL,
1833 RTE_FLOW_ERROR_TYPE_ITEM, item,
1834 "IPv6 is mandatory to filter on"
1836 if (item_flags & l4m)
1837 return rte_flow_error_set(error, EINVAL,
1838 RTE_FLOW_ERROR_TYPE_ITEM, item,
1839 "multiple L4 layers not supported");
1841 mask = &rte_flow_item_icmp6_mask;
1842 ret = mlx5_flow_item_acceptable
1843 (item, (const uint8_t *)mask,
1844 (const uint8_t *)&rte_flow_item_icmp6_mask,
1845 sizeof(struct rte_flow_item_icmp6),
1846 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1853 * Validate ICMP item.
1856 * Item specification.
1857 * @param[in] item_flags
1858 * Bit-fields that holds the items detected until now.
1860 * Pointer to error structure.
1863 * 0 on success, a negative errno value otherwise and rte_errno is set.
1866 mlx5_flow_validate_item_icmp(const struct rte_flow_item *item,
1867 uint64_t item_flags,
1868 uint8_t target_protocol,
1869 struct rte_flow_error *error)
1871 const struct rte_flow_item_icmp *mask = item->mask;
1872 const struct rte_flow_item_icmp nic_mask = {
1873 .hdr.icmp_type = 0xff,
1874 .hdr.icmp_code = 0xff,
1875 .hdr.icmp_ident = RTE_BE16(0xffff),
1876 .hdr.icmp_seq_nb = RTE_BE16(0xffff),
1878 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1879 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1880 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1881 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1882 MLX5_FLOW_LAYER_OUTER_L4;
1885 if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMP)
1886 return rte_flow_error_set(error, EINVAL,
1887 RTE_FLOW_ERROR_TYPE_ITEM, item,
1888 "protocol filtering not compatible"
1889 " with ICMP layer");
1890 if (!(item_flags & l3m))
1891 return rte_flow_error_set(error, EINVAL,
1892 RTE_FLOW_ERROR_TYPE_ITEM, item,
1893 "IPv4 is mandatory to filter"
1895 if (item_flags & l4m)
1896 return rte_flow_error_set(error, EINVAL,
1897 RTE_FLOW_ERROR_TYPE_ITEM, item,
1898 "multiple L4 layers not supported");
1901 ret = mlx5_flow_item_acceptable
1902 (item, (const uint8_t *)mask,
1903 (const uint8_t *)&nic_mask,
1904 sizeof(struct rte_flow_item_icmp),
1905 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1912 * Validate Ethernet item.
1915 * Item specification.
1916 * @param[in] item_flags
1917 * Bit-fields that holds the items detected until now.
1919 * Pointer to error structure.
1922 * 0 on success, a negative errno value otherwise and rte_errno is set.
1925 mlx5_flow_validate_item_eth(const struct rte_flow_item *item,
1926 uint64_t item_flags, bool ext_vlan_sup,
1927 struct rte_flow_error *error)
1929 const struct rte_flow_item_eth *mask = item->mask;
1930 const struct rte_flow_item_eth nic_mask = {
1931 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
1932 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
1933 .type = RTE_BE16(0xffff),
1934 .has_vlan = ext_vlan_sup ? 1 : 0,
1937 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1938 const uint64_t ethm = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1939 MLX5_FLOW_LAYER_OUTER_L2;
1941 if (item_flags & ethm)
1942 return rte_flow_error_set(error, ENOTSUP,
1943 RTE_FLOW_ERROR_TYPE_ITEM, item,
1944 "multiple L2 layers not supported");
1945 if ((!tunnel && (item_flags & MLX5_FLOW_LAYER_OUTER_L3)) ||
1946 (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_L3)))
1947 return rte_flow_error_set(error, EINVAL,
1948 RTE_FLOW_ERROR_TYPE_ITEM, item,
1949 "L2 layer should not follow "
1951 if ((!tunnel && (item_flags & MLX5_FLOW_LAYER_OUTER_VLAN)) ||
1952 (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_VLAN)))
1953 return rte_flow_error_set(error, EINVAL,
1954 RTE_FLOW_ERROR_TYPE_ITEM, item,
1955 "L2 layer should not follow VLAN");
1957 mask = &rte_flow_item_eth_mask;
1958 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1959 (const uint8_t *)&nic_mask,
1960 sizeof(struct rte_flow_item_eth),
1961 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1966 * Validate VLAN item.
1969 * Item specification.
1970 * @param[in] item_flags
1971 * Bit-fields that holds the items detected until now.
1973 * Ethernet device flow is being created on.
1975 * Pointer to error structure.
1978 * 0 on success, a negative errno value otherwise and rte_errno is set.
1981 mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
1982 uint64_t item_flags,
1983 struct rte_eth_dev *dev,
1984 struct rte_flow_error *error)
1986 const struct rte_flow_item_vlan *spec = item->spec;
1987 const struct rte_flow_item_vlan *mask = item->mask;
1988 const struct rte_flow_item_vlan nic_mask = {
1989 .tci = RTE_BE16(UINT16_MAX),
1990 .inner_type = RTE_BE16(UINT16_MAX),
1992 uint16_t vlan_tag = 0;
1993 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1995 const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
1996 MLX5_FLOW_LAYER_INNER_L4) :
1997 (MLX5_FLOW_LAYER_OUTER_L3 |
1998 MLX5_FLOW_LAYER_OUTER_L4);
1999 const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
2000 MLX5_FLOW_LAYER_OUTER_VLAN;
2002 if (item_flags & vlanm)
2003 return rte_flow_error_set(error, EINVAL,
2004 RTE_FLOW_ERROR_TYPE_ITEM, item,
2005 "multiple VLAN layers not supported");
2006 else if ((item_flags & l34m) != 0)
2007 return rte_flow_error_set(error, EINVAL,
2008 RTE_FLOW_ERROR_TYPE_ITEM, item,
2009 "VLAN cannot follow L3/L4 layer");
2011 mask = &rte_flow_item_vlan_mask;
2012 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2013 (const uint8_t *)&nic_mask,
2014 sizeof(struct rte_flow_item_vlan),
2015 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2018 if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
2019 struct mlx5_priv *priv = dev->data->dev_private;
2021 if (priv->vmwa_context) {
2023 * Non-NULL context means we have a virtual machine
2024 * and SR-IOV enabled, we have to create VLAN interface
2025 * to make hypervisor to setup E-Switch vport
2026 * context correctly. We avoid creating the multiple
2027 * VLAN interfaces, so we cannot support VLAN tag mask.
2029 return rte_flow_error_set(error, EINVAL,
2030 RTE_FLOW_ERROR_TYPE_ITEM,
2032 "VLAN tag mask is not"
2033 " supported in virtual"
2038 vlan_tag = spec->tci;
2039 vlan_tag &= mask->tci;
2042 * From verbs perspective an empty VLAN is equivalent
2043 * to a packet without VLAN layer.
2046 return rte_flow_error_set(error, EINVAL,
2047 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2049 "VLAN cannot be empty");
2054 * Validate IPV4 item.
2057 * Item specification.
2058 * @param[in] item_flags
2059 * Bit-fields that holds the items detected until now.
2060 * @param[in] last_item
2061 * Previous validated item in the pattern items.
2062 * @param[in] ether_type
2063 * Type in the ethernet layer header (including dot1q).
2064 * @param[in] acc_mask
2065 * Acceptable mask, if NULL default internal default mask
2066 * will be used to check whether item fields are supported.
2067 * @param[in] range_accepted
2068 * True if range of values is accepted for specific fields, false otherwise.
2070 * Pointer to error structure.
2073 * 0 on success, a negative errno value otherwise and rte_errno is set.
2076 mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
2077 uint64_t item_flags,
2079 uint16_t ether_type,
2080 const struct rte_flow_item_ipv4 *acc_mask,
2081 bool range_accepted,
2082 struct rte_flow_error *error)
2084 const struct rte_flow_item_ipv4 *mask = item->mask;
2085 const struct rte_flow_item_ipv4 *spec = item->spec;
2086 const struct rte_flow_item_ipv4 nic_mask = {
2088 .src_addr = RTE_BE32(0xffffffff),
2089 .dst_addr = RTE_BE32(0xffffffff),
2090 .type_of_service = 0xff,
2091 .next_proto_id = 0xff,
2094 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2095 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
2096 MLX5_FLOW_LAYER_OUTER_L3;
2097 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2098 MLX5_FLOW_LAYER_OUTER_L4;
2100 uint8_t next_proto = 0xFF;
2101 const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 |
2102 MLX5_FLOW_LAYER_OUTER_VLAN |
2103 MLX5_FLOW_LAYER_INNER_VLAN);
2105 if ((last_item & l2_vlan) && ether_type &&
2106 ether_type != RTE_ETHER_TYPE_IPV4)
2107 return rte_flow_error_set(error, EINVAL,
2108 RTE_FLOW_ERROR_TYPE_ITEM, item,
2109 "IPv4 cannot follow L2/VLAN layer "
2110 "which ether type is not IPv4");
2111 if (item_flags & MLX5_FLOW_LAYER_IPIP) {
2113 next_proto = mask->hdr.next_proto_id &
2114 spec->hdr.next_proto_id;
2115 if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6)
2116 return rte_flow_error_set(error, EINVAL,
2117 RTE_FLOW_ERROR_TYPE_ITEM,
2122 if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP)
2123 return rte_flow_error_set(error, EINVAL,
2124 RTE_FLOW_ERROR_TYPE_ITEM, item,
2125 "wrong tunnel type - IPv6 specified "
2126 "but IPv4 item provided");
2127 if (item_flags & l3m)
2128 return rte_flow_error_set(error, ENOTSUP,
2129 RTE_FLOW_ERROR_TYPE_ITEM, item,
2130 "multiple L3 layers not supported");
2131 else if (item_flags & l4m)
2132 return rte_flow_error_set(error, EINVAL,
2133 RTE_FLOW_ERROR_TYPE_ITEM, item,
2134 "L3 cannot follow an L4 layer.");
2135 else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) &&
2136 !(item_flags & MLX5_FLOW_LAYER_INNER_L2))
2137 return rte_flow_error_set(error, EINVAL,
2138 RTE_FLOW_ERROR_TYPE_ITEM, item,
2139 "L3 cannot follow an NVGRE layer.");
2141 mask = &rte_flow_item_ipv4_mask;
2142 else if (mask->hdr.next_proto_id != 0 &&
2143 mask->hdr.next_proto_id != 0xff)
2144 return rte_flow_error_set(error, EINVAL,
2145 RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
2146 "partial mask is not supported"
2148 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2149 acc_mask ? (const uint8_t *)acc_mask
2150 : (const uint8_t *)&nic_mask,
2151 sizeof(struct rte_flow_item_ipv4),
2152 range_accepted, error);
2159 * Validate IPV6 item.
2162 * Item specification.
2163 * @param[in] item_flags
2164 * Bit-fields that holds the items detected until now.
2165 * @param[in] last_item
2166 * Previous validated item in the pattern items.
2167 * @param[in] ether_type
2168 * Type in the ethernet layer header (including dot1q).
2169 * @param[in] acc_mask
2170 * Acceptable mask, if NULL default internal default mask
2171 * will be used to check whether item fields are supported.
2173 * Pointer to error structure.
2176 * 0 on success, a negative errno value otherwise and rte_errno is set.
2179 mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
2180 uint64_t item_flags,
2182 uint16_t ether_type,
2183 const struct rte_flow_item_ipv6 *acc_mask,
2184 struct rte_flow_error *error)
2186 const struct rte_flow_item_ipv6 *mask = item->mask;
2187 const struct rte_flow_item_ipv6 *spec = item->spec;
2188 const struct rte_flow_item_ipv6 nic_mask = {
2191 "\xff\xff\xff\xff\xff\xff\xff\xff"
2192 "\xff\xff\xff\xff\xff\xff\xff\xff",
2194 "\xff\xff\xff\xff\xff\xff\xff\xff"
2195 "\xff\xff\xff\xff\xff\xff\xff\xff",
2196 .vtc_flow = RTE_BE32(0xffffffff),
2200 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2201 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
2202 MLX5_FLOW_LAYER_OUTER_L3;
2203 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2204 MLX5_FLOW_LAYER_OUTER_L4;
2206 uint8_t next_proto = 0xFF;
2207 const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 |
2208 MLX5_FLOW_LAYER_OUTER_VLAN |
2209 MLX5_FLOW_LAYER_INNER_VLAN);
2211 if ((last_item & l2_vlan) && ether_type &&
2212 ether_type != RTE_ETHER_TYPE_IPV6)
2213 return rte_flow_error_set(error, EINVAL,
2214 RTE_FLOW_ERROR_TYPE_ITEM, item,
2215 "IPv6 cannot follow L2/VLAN layer "
2216 "which ether type is not IPv6");
2217 if (mask && mask->hdr.proto == UINT8_MAX && spec)
2218 next_proto = spec->hdr.proto;
2219 if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) {
2220 if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6)
2221 return rte_flow_error_set(error, EINVAL,
2222 RTE_FLOW_ERROR_TYPE_ITEM,
2227 if (next_proto == IPPROTO_HOPOPTS ||
2228 next_proto == IPPROTO_ROUTING ||
2229 next_proto == IPPROTO_FRAGMENT ||
2230 next_proto == IPPROTO_ESP ||
2231 next_proto == IPPROTO_AH ||
2232 next_proto == IPPROTO_DSTOPTS)
2233 return rte_flow_error_set(error, EINVAL,
2234 RTE_FLOW_ERROR_TYPE_ITEM, item,
2235 "IPv6 proto (next header) should "
2236 "not be set as extension header");
2237 if (item_flags & MLX5_FLOW_LAYER_IPIP)
2238 return rte_flow_error_set(error, EINVAL,
2239 RTE_FLOW_ERROR_TYPE_ITEM, item,
2240 "wrong tunnel type - IPv4 specified "
2241 "but IPv6 item provided");
2242 if (item_flags & l3m)
2243 return rte_flow_error_set(error, ENOTSUP,
2244 RTE_FLOW_ERROR_TYPE_ITEM, item,
2245 "multiple L3 layers not supported");
2246 else if (item_flags & l4m)
2247 return rte_flow_error_set(error, EINVAL,
2248 RTE_FLOW_ERROR_TYPE_ITEM, item,
2249 "L3 cannot follow an L4 layer.");
2250 else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) &&
2251 !(item_flags & MLX5_FLOW_LAYER_INNER_L2))
2252 return rte_flow_error_set(error, EINVAL,
2253 RTE_FLOW_ERROR_TYPE_ITEM, item,
2254 "L3 cannot follow an NVGRE layer.");
2256 mask = &rte_flow_item_ipv6_mask;
2257 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2258 acc_mask ? (const uint8_t *)acc_mask
2259 : (const uint8_t *)&nic_mask,
2260 sizeof(struct rte_flow_item_ipv6),
2261 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2268 * Validate UDP item.
2271 * Item specification.
2272 * @param[in] item_flags
2273 * Bit-fields that holds the items detected until now.
2274 * @param[in] target_protocol
2275 * The next protocol in the previous item.
2276 * @param[in] flow_mask
2277 * mlx5 flow-specific (DV, verbs, etc.) supported header fields mask.
2279 * Pointer to error structure.
2282 * 0 on success, a negative errno value otherwise and rte_errno is set.
2285 mlx5_flow_validate_item_udp(const struct rte_flow_item *item,
2286 uint64_t item_flags,
2287 uint8_t target_protocol,
2288 struct rte_flow_error *error)
2290 const struct rte_flow_item_udp *mask = item->mask;
2291 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2292 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
2293 MLX5_FLOW_LAYER_OUTER_L3;
2294 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2295 MLX5_FLOW_LAYER_OUTER_L4;
2298 if (target_protocol != 0xff && target_protocol != IPPROTO_UDP)
2299 return rte_flow_error_set(error, EINVAL,
2300 RTE_FLOW_ERROR_TYPE_ITEM, item,
2301 "protocol filtering not compatible"
2303 if (!(item_flags & l3m))
2304 return rte_flow_error_set(error, EINVAL,
2305 RTE_FLOW_ERROR_TYPE_ITEM, item,
2306 "L3 is mandatory to filter on L4");
2307 if (item_flags & l4m)
2308 return rte_flow_error_set(error, EINVAL,
2309 RTE_FLOW_ERROR_TYPE_ITEM, item,
2310 "multiple L4 layers not supported");
2312 mask = &rte_flow_item_udp_mask;
2313 ret = mlx5_flow_item_acceptable
2314 (item, (const uint8_t *)mask,
2315 (const uint8_t *)&rte_flow_item_udp_mask,
2316 sizeof(struct rte_flow_item_udp), MLX5_ITEM_RANGE_NOT_ACCEPTED,
2324 * Validate TCP item.
2327 * Item specification.
2328 * @param[in] item_flags
2329 * Bit-fields that holds the items detected until now.
2330 * @param[in] target_protocol
2331 * The next protocol in the previous item.
2333 * Pointer to error structure.
2336 * 0 on success, a negative errno value otherwise and rte_errno is set.
2339 mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,
2340 uint64_t item_flags,
2341 uint8_t target_protocol,
2342 const struct rte_flow_item_tcp *flow_mask,
2343 struct rte_flow_error *error)
2345 const struct rte_flow_item_tcp *mask = item->mask;
2346 const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2347 const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
2348 MLX5_FLOW_LAYER_OUTER_L3;
2349 const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2350 MLX5_FLOW_LAYER_OUTER_L4;
2353 MLX5_ASSERT(flow_mask);
2354 if (target_protocol != 0xff && target_protocol != IPPROTO_TCP)
2355 return rte_flow_error_set(error, EINVAL,
2356 RTE_FLOW_ERROR_TYPE_ITEM, item,
2357 "protocol filtering not compatible"
2359 if (!(item_flags & l3m))
2360 return rte_flow_error_set(error, EINVAL,
2361 RTE_FLOW_ERROR_TYPE_ITEM, item,
2362 "L3 is mandatory to filter on L4");
2363 if (item_flags & l4m)
2364 return rte_flow_error_set(error, EINVAL,
2365 RTE_FLOW_ERROR_TYPE_ITEM, item,
2366 "multiple L4 layers not supported");
2368 mask = &rte_flow_item_tcp_mask;
2369 ret = mlx5_flow_item_acceptable
2370 (item, (const uint8_t *)mask,
2371 (const uint8_t *)flow_mask,
2372 sizeof(struct rte_flow_item_tcp), MLX5_ITEM_RANGE_NOT_ACCEPTED,
2380 * Validate VXLAN item.
2383 * Item specification.
2384 * @param[in] item_flags
2385 * Bit-fields that holds the items detected until now.
2386 * @param[in] target_protocol
2387 * The next protocol in the previous item.
2389 * Pointer to error structure.
2392 * 0 on success, a negative errno value otherwise and rte_errno is set.
2395 mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item,
2396 uint64_t item_flags,
2397 struct rte_flow_error *error)
2399 const struct rte_flow_item_vxlan *spec = item->spec;
2400 const struct rte_flow_item_vxlan *mask = item->mask;
2405 } id = { .vlan_id = 0, };
2408 if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2409 return rte_flow_error_set(error, ENOTSUP,
2410 RTE_FLOW_ERROR_TYPE_ITEM, item,
2411 "multiple tunnel layers not"
2414 * Verify only UDPv4 is present as defined in
2415 * https://tools.ietf.org/html/rfc7348
2417 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2418 return rte_flow_error_set(error, EINVAL,
2419 RTE_FLOW_ERROR_TYPE_ITEM, item,
2420 "no outer UDP layer found");
2422 mask = &rte_flow_item_vxlan_mask;
2423 ret = mlx5_flow_item_acceptable
2424 (item, (const uint8_t *)mask,
2425 (const uint8_t *)&rte_flow_item_vxlan_mask,
2426 sizeof(struct rte_flow_item_vxlan),
2427 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2431 memcpy(&id.vni[1], spec->vni, 3);
2432 memcpy(&id.vni[1], mask->vni, 3);
2434 if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
2435 return rte_flow_error_set(error, ENOTSUP,
2436 RTE_FLOW_ERROR_TYPE_ITEM, item,
2437 "VXLAN tunnel must be fully defined");
2442 * Validate VXLAN_GPE item.
2445 * Item specification.
2446 * @param[in] item_flags
2447 * Bit-fields that holds the items detected until now.
2449 * Pointer to the private data structure.
2450 * @param[in] target_protocol
2451 * The next protocol in the previous item.
2453 * Pointer to error structure.
2456 * 0 on success, a negative errno value otherwise and rte_errno is set.
2459 mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,
2460 uint64_t item_flags,
2461 struct rte_eth_dev *dev,
2462 struct rte_flow_error *error)
2464 struct mlx5_priv *priv = dev->data->dev_private;
2465 const struct rte_flow_item_vxlan_gpe *spec = item->spec;
2466 const struct rte_flow_item_vxlan_gpe *mask = item->mask;
2471 } id = { .vlan_id = 0, };
2473 if (!priv->config.l3_vxlan_en)
2474 return rte_flow_error_set(error, ENOTSUP,
2475 RTE_FLOW_ERROR_TYPE_ITEM, item,
2476 "L3 VXLAN is not enabled by device"
2477 " parameter and/or not configured in"
2479 if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2480 return rte_flow_error_set(error, ENOTSUP,
2481 RTE_FLOW_ERROR_TYPE_ITEM, item,
2482 "multiple tunnel layers not"
2485 * Verify only UDPv4 is present as defined in
2486 * https://tools.ietf.org/html/rfc7348
2488 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2489 return rte_flow_error_set(error, EINVAL,
2490 RTE_FLOW_ERROR_TYPE_ITEM, item,
2491 "no outer UDP layer found");
2493 mask = &rte_flow_item_vxlan_gpe_mask;
2494 ret = mlx5_flow_item_acceptable
2495 (item, (const uint8_t *)mask,
2496 (const uint8_t *)&rte_flow_item_vxlan_gpe_mask,
2497 sizeof(struct rte_flow_item_vxlan_gpe),
2498 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2503 return rte_flow_error_set(error, ENOTSUP,
2504 RTE_FLOW_ERROR_TYPE_ITEM,
2506 "VxLAN-GPE protocol"
2508 memcpy(&id.vni[1], spec->vni, 3);
2509 memcpy(&id.vni[1], mask->vni, 3);
2511 if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
2512 return rte_flow_error_set(error, ENOTSUP,
2513 RTE_FLOW_ERROR_TYPE_ITEM, item,
2514 "VXLAN-GPE tunnel must be fully"
2519 * Validate GRE Key item.
2522 * Item specification.
2523 * @param[in] item_flags
2524 * Bit flags to mark detected items.
2525 * @param[in] gre_item
2526 * Pointer to gre_item
2528 * Pointer to error structure.
2531 * 0 on success, a negative errno value otherwise and rte_errno is set.
2534 mlx5_flow_validate_item_gre_key(const struct rte_flow_item *item,
2535 uint64_t item_flags,
2536 const struct rte_flow_item *gre_item,
2537 struct rte_flow_error *error)
2539 const rte_be32_t *mask = item->mask;
2541 rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
2542 const struct rte_flow_item_gre *gre_spec;
2543 const struct rte_flow_item_gre *gre_mask;
2545 if (item_flags & MLX5_FLOW_LAYER_GRE_KEY)
2546 return rte_flow_error_set(error, ENOTSUP,
2547 RTE_FLOW_ERROR_TYPE_ITEM, item,
2548 "Multiple GRE key not support");
2549 if (!(item_flags & MLX5_FLOW_LAYER_GRE))
2550 return rte_flow_error_set(error, ENOTSUP,
2551 RTE_FLOW_ERROR_TYPE_ITEM, item,
2552 "No preceding GRE header");
2553 if (item_flags & MLX5_FLOW_LAYER_INNER)
2554 return rte_flow_error_set(error, ENOTSUP,
2555 RTE_FLOW_ERROR_TYPE_ITEM, item,
2556 "GRE key following a wrong item");
2557 gre_mask = gre_item->mask;
2559 gre_mask = &rte_flow_item_gre_mask;
2560 gre_spec = gre_item->spec;
2561 if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x2000)) &&
2562 !(gre_spec->c_rsvd0_ver & RTE_BE16(0x2000)))
2563 return rte_flow_error_set(error, EINVAL,
2564 RTE_FLOW_ERROR_TYPE_ITEM, item,
2565 "Key bit must be on");
2568 mask = &gre_key_default_mask;
2569 ret = mlx5_flow_item_acceptable
2570 (item, (const uint8_t *)mask,
2571 (const uint8_t *)&gre_key_default_mask,
2572 sizeof(rte_be32_t), MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2577 * Validate GRE item.
2580 * Item specification.
2581 * @param[in] item_flags
2582 * Bit flags to mark detected items.
2583 * @param[in] target_protocol
2584 * The next protocol in the previous item.
2586 * Pointer to error structure.
2589 * 0 on success, a negative errno value otherwise and rte_errno is set.
2592 mlx5_flow_validate_item_gre(const struct rte_flow_item *item,
2593 uint64_t item_flags,
2594 uint8_t target_protocol,
2595 struct rte_flow_error *error)
2597 const struct rte_flow_item_gre *spec __rte_unused = item->spec;
2598 const struct rte_flow_item_gre *mask = item->mask;
2600 const struct rte_flow_item_gre nic_mask = {
2601 .c_rsvd0_ver = RTE_BE16(0xB000),
2602 .protocol = RTE_BE16(UINT16_MAX),
2605 if (target_protocol != 0xff && target_protocol != IPPROTO_GRE)
2606 return rte_flow_error_set(error, EINVAL,
2607 RTE_FLOW_ERROR_TYPE_ITEM, item,
2608 "protocol filtering not compatible"
2609 " with this GRE layer");
2610 if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2611 return rte_flow_error_set(error, ENOTSUP,
2612 RTE_FLOW_ERROR_TYPE_ITEM, item,
2613 "multiple tunnel layers not"
2615 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3))
2616 return rte_flow_error_set(error, ENOTSUP,
2617 RTE_FLOW_ERROR_TYPE_ITEM, item,
2618 "L3 Layer is missing");
2620 mask = &rte_flow_item_gre_mask;
2621 ret = mlx5_flow_item_acceptable
2622 (item, (const uint8_t *)mask,
2623 (const uint8_t *)&nic_mask,
2624 sizeof(struct rte_flow_item_gre), MLX5_ITEM_RANGE_NOT_ACCEPTED,
2628 #ifndef HAVE_MLX5DV_DR
2629 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
2630 if (spec && (spec->protocol & mask->protocol))
2631 return rte_flow_error_set(error, ENOTSUP,
2632 RTE_FLOW_ERROR_TYPE_ITEM, item,
2633 "without MPLS support the"
2634 " specification cannot be used for"
2642 * Validate Geneve item.
2645 * Item specification.
2646 * @param[in] itemFlags
2647 * Bit-fields that holds the items detected until now.
2649 * Pointer to the private data structure.
2651 * Pointer to error structure.
2654 * 0 on success, a negative errno value otherwise and rte_errno is set.
2658 mlx5_flow_validate_item_geneve(const struct rte_flow_item *item,
2659 uint64_t item_flags,
2660 struct rte_eth_dev *dev,
2661 struct rte_flow_error *error)
2663 struct mlx5_priv *priv = dev->data->dev_private;
2664 const struct rte_flow_item_geneve *spec = item->spec;
2665 const struct rte_flow_item_geneve *mask = item->mask;
2668 uint8_t opt_len = priv->config.hca_attr.geneve_max_opt_len ?
2669 MLX5_GENEVE_OPT_LEN_1 : MLX5_GENEVE_OPT_LEN_0;
2670 const struct rte_flow_item_geneve nic_mask = {
2671 .ver_opt_len_o_c_rsvd0 = RTE_BE16(0x3f80),
2672 .vni = "\xff\xff\xff",
2673 .protocol = RTE_BE16(UINT16_MAX),
2676 if (!priv->config.hca_attr.tunnel_stateless_geneve_rx)
2677 return rte_flow_error_set(error, ENOTSUP,
2678 RTE_FLOW_ERROR_TYPE_ITEM, item,
2679 "L3 Geneve is not enabled by device"
2680 " parameter and/or not configured in"
2682 if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2683 return rte_flow_error_set(error, ENOTSUP,
2684 RTE_FLOW_ERROR_TYPE_ITEM, item,
2685 "multiple tunnel layers not"
2688 * Verify only UDPv4 is present as defined in
2689 * https://tools.ietf.org/html/rfc7348
2691 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2692 return rte_flow_error_set(error, EINVAL,
2693 RTE_FLOW_ERROR_TYPE_ITEM, item,
2694 "no outer UDP layer found");
2696 mask = &rte_flow_item_geneve_mask;
2697 ret = mlx5_flow_item_acceptable
2698 (item, (const uint8_t *)mask,
2699 (const uint8_t *)&nic_mask,
2700 sizeof(struct rte_flow_item_geneve),
2701 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2705 gbhdr = rte_be_to_cpu_16(spec->ver_opt_len_o_c_rsvd0);
2706 if (MLX5_GENEVE_VER_VAL(gbhdr) ||
2707 MLX5_GENEVE_CRITO_VAL(gbhdr) ||
2708 MLX5_GENEVE_RSVD_VAL(gbhdr) || spec->rsvd1)
2709 return rte_flow_error_set(error, ENOTSUP,
2710 RTE_FLOW_ERROR_TYPE_ITEM,
2712 "Geneve protocol unsupported"
2713 " fields are being used");
2714 if (MLX5_GENEVE_OPTLEN_VAL(gbhdr) > opt_len)
2715 return rte_flow_error_set
2717 RTE_FLOW_ERROR_TYPE_ITEM,
2719 "Unsupported Geneve options length");
2721 if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
2722 return rte_flow_error_set
2724 RTE_FLOW_ERROR_TYPE_ITEM, item,
2725 "Geneve tunnel must be fully defined");
2730 * Validate MPLS item.
2733 * Pointer to the rte_eth_dev structure.
2735 * Item specification.
2736 * @param[in] item_flags
2737 * Bit-fields that holds the items detected until now.
2738 * @param[in] prev_layer
2739 * The protocol layer indicated in previous item.
2741 * Pointer to error structure.
2744 * 0 on success, a negative errno value otherwise and rte_errno is set.
2747 mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused,
2748 const struct rte_flow_item *item __rte_unused,
2749 uint64_t item_flags __rte_unused,
2750 uint64_t prev_layer __rte_unused,
2751 struct rte_flow_error *error)
2753 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
2754 const struct rte_flow_item_mpls *mask = item->mask;
2755 struct mlx5_priv *priv = dev->data->dev_private;
2758 if (!priv->config.mpls_en)
2759 return rte_flow_error_set(error, ENOTSUP,
2760 RTE_FLOW_ERROR_TYPE_ITEM, item,
2761 "MPLS not supported or"
2762 " disabled in firmware"
2764 /* MPLS over IP, UDP, GRE is allowed */
2765 if (!(prev_layer & (MLX5_FLOW_LAYER_OUTER_L3 |
2766 MLX5_FLOW_LAYER_OUTER_L4_UDP |
2767 MLX5_FLOW_LAYER_GRE)))
2768 return rte_flow_error_set(error, EINVAL,
2769 RTE_FLOW_ERROR_TYPE_ITEM, item,
2770 "protocol filtering not compatible"
2771 " with MPLS layer");
2772 /* Multi-tunnel isn't allowed but MPLS over GRE is an exception. */
2773 if ((item_flags & MLX5_FLOW_LAYER_TUNNEL) &&
2774 !(item_flags & MLX5_FLOW_LAYER_GRE))
2775 return rte_flow_error_set(error, ENOTSUP,
2776 RTE_FLOW_ERROR_TYPE_ITEM, item,
2777 "multiple tunnel layers not"
2780 mask = &rte_flow_item_mpls_mask;
2781 ret = mlx5_flow_item_acceptable
2782 (item, (const uint8_t *)mask,
2783 (const uint8_t *)&rte_flow_item_mpls_mask,
2784 sizeof(struct rte_flow_item_mpls),
2785 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2790 return rte_flow_error_set(error, ENOTSUP,
2791 RTE_FLOW_ERROR_TYPE_ITEM, item,
2792 "MPLS is not supported by Verbs, please"
2798 * Validate NVGRE item.
2801 * Item specification.
2802 * @param[in] item_flags
2803 * Bit flags to mark detected items.
2804 * @param[in] target_protocol
2805 * The next protocol in the previous item.
2807 * Pointer to error structure.
2810 * 0 on success, a negative errno value otherwise and rte_errno is set.
2813 mlx5_flow_validate_item_nvgre(const struct rte_flow_item *item,
2814 uint64_t item_flags,
2815 uint8_t target_protocol,
2816 struct rte_flow_error *error)
2818 const struct rte_flow_item_nvgre *mask = item->mask;
2821 if (target_protocol != 0xff && target_protocol != IPPROTO_GRE)
2822 return rte_flow_error_set(error, EINVAL,
2823 RTE_FLOW_ERROR_TYPE_ITEM, item,
2824 "protocol filtering not compatible"
2825 " with this GRE layer");
2826 if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2827 return rte_flow_error_set(error, ENOTSUP,
2828 RTE_FLOW_ERROR_TYPE_ITEM, item,
2829 "multiple tunnel layers not"
2831 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3))
2832 return rte_flow_error_set(error, ENOTSUP,
2833 RTE_FLOW_ERROR_TYPE_ITEM, item,
2834 "L3 Layer is missing");
2836 mask = &rte_flow_item_nvgre_mask;
2837 ret = mlx5_flow_item_acceptable
2838 (item, (const uint8_t *)mask,
2839 (const uint8_t *)&rte_flow_item_nvgre_mask,
2840 sizeof(struct rte_flow_item_nvgre),
2841 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2848 * Validate eCPRI item.
2851 * Item specification.
2852 * @param[in] item_flags
2853 * Bit-fields that holds the items detected until now.
2854 * @param[in] last_item
2855 * Previous validated item in the pattern items.
2856 * @param[in] ether_type
2857 * Type in the ethernet layer header (including dot1q).
2858 * @param[in] acc_mask
2859 * Acceptable mask, if NULL default internal default mask
2860 * will be used to check whether item fields are supported.
2862 * Pointer to error structure.
2865 * 0 on success, a negative errno value otherwise and rte_errno is set.
2868 mlx5_flow_validate_item_ecpri(const struct rte_flow_item *item,
2869 uint64_t item_flags,
2871 uint16_t ether_type,
2872 const struct rte_flow_item_ecpri *acc_mask,
2873 struct rte_flow_error *error)
2875 const struct rte_flow_item_ecpri *mask = item->mask;
2876 const struct rte_flow_item_ecpri nic_mask = {
2880 RTE_BE32(((const struct rte_ecpri_common_hdr) {
2884 .dummy[0] = 0xFFFFFFFF,
2887 const uint64_t outer_l2_vlan = (MLX5_FLOW_LAYER_OUTER_L2 |
2888 MLX5_FLOW_LAYER_OUTER_VLAN);
2889 struct rte_flow_item_ecpri mask_lo;
2891 if ((last_item & outer_l2_vlan) && ether_type &&
2892 ether_type != RTE_ETHER_TYPE_ECPRI)
2893 return rte_flow_error_set(error, EINVAL,
2894 RTE_FLOW_ERROR_TYPE_ITEM, item,
2895 "eCPRI cannot follow L2/VLAN layer "
2896 "which ether type is not 0xAEFE.");
2897 if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2898 return rte_flow_error_set(error, EINVAL,
2899 RTE_FLOW_ERROR_TYPE_ITEM, item,
2900 "eCPRI with tunnel is not supported "
2902 if (item_flags & MLX5_FLOW_LAYER_OUTER_L3)
2903 return rte_flow_error_set(error, ENOTSUP,
2904 RTE_FLOW_ERROR_TYPE_ITEM, item,
2905 "multiple L3 layers not supported");
2906 else if (item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP)
2907 return rte_flow_error_set(error, EINVAL,
2908 RTE_FLOW_ERROR_TYPE_ITEM, item,
2909 "eCPRI cannot follow a TCP layer.");
2910 /* In specification, eCPRI could be over UDP layer. */
2911 else if (item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)
2912 return rte_flow_error_set(error, EINVAL,
2913 RTE_FLOW_ERROR_TYPE_ITEM, item,
2914 "eCPRI over UDP layer is not yet "
2915 "supported right now.");
2916 /* Mask for type field in common header could be zero. */
2918 mask = &rte_flow_item_ecpri_mask;
2919 mask_lo.hdr.common.u32 = rte_be_to_cpu_32(mask->hdr.common.u32);
2920 /* Input mask is in big-endian format. */
2921 if (mask_lo.hdr.common.type != 0 && mask_lo.hdr.common.type != 0xff)
2922 return rte_flow_error_set(error, EINVAL,
2923 RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
2924 "partial mask is not supported "
2926 else if (mask_lo.hdr.common.type == 0 && mask->hdr.dummy[0] != 0)
2927 return rte_flow_error_set(error, EINVAL,
2928 RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
2929 "message header mask must be after "
2931 return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2932 acc_mask ? (const uint8_t *)acc_mask
2933 : (const uint8_t *)&nic_mask,
2934 sizeof(struct rte_flow_item_ecpri),
2935 MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2939 * Release resource related QUEUE/RSS action split.
2942 * Pointer to Ethernet device.
2944 * Flow to release id's from.
2947 flow_mreg_split_qrss_release(struct rte_eth_dev *dev,
2948 struct rte_flow *flow)
2950 struct mlx5_priv *priv = dev->data->dev_private;
2951 uint32_t handle_idx;
2952 struct mlx5_flow_handle *dev_handle;
2954 SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
2955 handle_idx, dev_handle, next)
2956 if (dev_handle->split_flow_id)
2957 mlx5_ipool_free(priv->sh->ipool
2958 [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
2959 dev_handle->split_flow_id);
2963 flow_null_validate(struct rte_eth_dev *dev __rte_unused,
2964 const struct rte_flow_attr *attr __rte_unused,
2965 const struct rte_flow_item items[] __rte_unused,
2966 const struct rte_flow_action actions[] __rte_unused,
2967 bool external __rte_unused,
2968 int hairpin __rte_unused,
2969 struct rte_flow_error *error)
2971 return rte_flow_error_set(error, ENOTSUP,
2972 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
2975 static struct mlx5_flow *
2976 flow_null_prepare(struct rte_eth_dev *dev __rte_unused,
2977 const struct rte_flow_attr *attr __rte_unused,
2978 const struct rte_flow_item items[] __rte_unused,
2979 const struct rte_flow_action actions[] __rte_unused,
2980 struct rte_flow_error *error)
2982 rte_flow_error_set(error, ENOTSUP,
2983 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
2988 flow_null_translate(struct rte_eth_dev *dev __rte_unused,
2989 struct mlx5_flow *dev_flow __rte_unused,
2990 const struct rte_flow_attr *attr __rte_unused,
2991 const struct rte_flow_item items[] __rte_unused,
2992 const struct rte_flow_action actions[] __rte_unused,
2993 struct rte_flow_error *error)
2995 return rte_flow_error_set(error, ENOTSUP,
2996 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
3000 flow_null_apply(struct rte_eth_dev *dev __rte_unused,
3001 struct rte_flow *flow __rte_unused,
3002 struct rte_flow_error *error)
3004 return rte_flow_error_set(error, ENOTSUP,
3005 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
3009 flow_null_remove(struct rte_eth_dev *dev __rte_unused,
3010 struct rte_flow *flow __rte_unused)
3015 flow_null_destroy(struct rte_eth_dev *dev __rte_unused,
3016 struct rte_flow *flow __rte_unused)
3021 flow_null_query(struct rte_eth_dev *dev __rte_unused,
3022 struct rte_flow *flow __rte_unused,
3023 const struct rte_flow_action *actions __rte_unused,
3024 void *data __rte_unused,
3025 struct rte_flow_error *error)
3027 return rte_flow_error_set(error, ENOTSUP,
3028 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
3032 flow_null_sync_domain(struct rte_eth_dev *dev __rte_unused,
3033 uint32_t domains __rte_unused,
3034 uint32_t flags __rte_unused)
3039 /* Void driver to protect from null pointer reference. */
3040 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops = {
3041 .validate = flow_null_validate,
3042 .prepare = flow_null_prepare,
3043 .translate = flow_null_translate,
3044 .apply = flow_null_apply,
3045 .remove = flow_null_remove,
3046 .destroy = flow_null_destroy,
3047 .query = flow_null_query,
3048 .sync_domain = flow_null_sync_domain,
3052 * Select flow driver type according to flow attributes and device
3056 * Pointer to the dev structure.
3058 * Pointer to the flow attributes.
3061 * flow driver type, MLX5_FLOW_TYPE_MAX otherwise.
3063 static enum mlx5_flow_drv_type
3064 flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr)
3066 struct mlx5_priv *priv = dev->data->dev_private;
3067 /* The OS can determine first a specific flow type (DV, VERBS) */
3068 enum mlx5_flow_drv_type type = mlx5_flow_os_get_type();
3070 if (type != MLX5_FLOW_TYPE_MAX)
3072 /* If no OS specific type - continue with DV/VERBS selection */
3073 if (attr->transfer && priv->config.dv_esw_en)
3074 type = MLX5_FLOW_TYPE_DV;
3075 if (!attr->transfer)
3076 type = priv->config.dv_flow_en ? MLX5_FLOW_TYPE_DV :
3077 MLX5_FLOW_TYPE_VERBS;
3081 #define flow_get_drv_ops(type) flow_drv_ops[type]
3084 * Flow driver validation API. This abstracts calling driver specific functions.
3085 * The type of flow driver is determined according to flow attributes.
3088 * Pointer to the dev structure.
3090 * Pointer to the flow attributes.
3092 * Pointer to the list of items.
3093 * @param[in] actions
3094 * Pointer to the list of actions.
3095 * @param[in] external
3096 * This flow rule is created by request external to PMD.
3097 * @param[in] hairpin
3098 * Number of hairpin TX actions, 0 means classic flow.
3100 * Pointer to the error structure.
3103 * 0 on success, a negative errno value otherwise and rte_errno is set.
3106 flow_drv_validate(struct rte_eth_dev *dev,
3107 const struct rte_flow_attr *attr,
3108 const struct rte_flow_item items[],
3109 const struct rte_flow_action actions[],
3110 bool external, int hairpin, struct rte_flow_error *error)
3112 const struct mlx5_flow_driver_ops *fops;
3113 enum mlx5_flow_drv_type type = flow_get_drv_type(dev, attr);
3115 fops = flow_get_drv_ops(type);
3116 return fops->validate(dev, attr, items, actions, external,
3121 * Flow driver preparation API. This abstracts calling driver specific
3122 * functions. Parent flow (rte_flow) should have driver type (drv_type). It
3123 * calculates the size of memory required for device flow, allocates the memory,
3124 * initializes the device flow and returns the pointer.
3127 * This function initializes device flow structure such as dv or verbs in
3128 * struct mlx5_flow. However, it is caller's responsibility to initialize the
3129 * rest. For example, adding returning device flow to flow->dev_flow list and
3130 * setting backward reference to the flow should be done out of this function.
3131 * layers field is not filled either.
3134 * Pointer to the dev structure.
3136 * Pointer to the flow attributes.
3138 * Pointer to the list of items.
3139 * @param[in] actions
3140 * Pointer to the list of actions.
3141 * @param[in] flow_idx
3142 * This memory pool index to the flow.
3144 * Pointer to the error structure.
3147 * Pointer to device flow on success, otherwise NULL and rte_errno is set.
3149 static inline struct mlx5_flow *
3150 flow_drv_prepare(struct rte_eth_dev *dev,
3151 const struct rte_flow *flow,
3152 const struct rte_flow_attr *attr,
3153 const struct rte_flow_item items[],
3154 const struct rte_flow_action actions[],
3156 struct rte_flow_error *error)
3158 const struct mlx5_flow_driver_ops *fops;
3159 enum mlx5_flow_drv_type type = flow->drv_type;
3160 struct mlx5_flow *mlx5_flow = NULL;
3162 MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
3163 fops = flow_get_drv_ops(type);
3164 mlx5_flow = fops->prepare(dev, attr, items, actions, error);
3166 mlx5_flow->flow_idx = flow_idx;
3171 * Flow driver translation API. This abstracts calling driver specific
3172 * functions. Parent flow (rte_flow) should have driver type (drv_type). It
3173 * translates a generic flow into a driver flow. flow_drv_prepare() must
3177 * dev_flow->layers could be filled as a result of parsing during translation
3178 * if needed by flow_drv_apply(). dev_flow->flow->actions can also be filled
3179 * if necessary. As a flow can have multiple dev_flows by RSS flow expansion,
3180 * flow->actions could be overwritten even though all the expanded dev_flows
3181 * have the same actions.
3184 * Pointer to the rte dev structure.
3185 * @param[in, out] dev_flow
3186 * Pointer to the mlx5 flow.
3188 * Pointer to the flow attributes.
3190 * Pointer to the list of items.
3191 * @param[in] actions
3192 * Pointer to the list of actions.
3194 * Pointer to the error structure.
3197 * 0 on success, a negative errno value otherwise and rte_errno is set.
3200 flow_drv_translate(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow,
3201 const struct rte_flow_attr *attr,
3202 const struct rte_flow_item items[],
3203 const struct rte_flow_action actions[],
3204 struct rte_flow_error *error)
3206 const struct mlx5_flow_driver_ops *fops;
3207 enum mlx5_flow_drv_type type = dev_flow->flow->drv_type;
3209 MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
3210 fops = flow_get_drv_ops(type);
3211 return fops->translate(dev, dev_flow, attr, items, actions, error);
3215 * Flow driver apply API. This abstracts calling driver specific functions.
3216 * Parent flow (rte_flow) should have driver type (drv_type). It applies
3217 * translated driver flows on to device. flow_drv_translate() must precede.
3220 * Pointer to Ethernet device structure.
3221 * @param[in, out] flow
3222 * Pointer to flow structure.
3224 * Pointer to error structure.
3227 * 0 on success, a negative errno value otherwise and rte_errno is set.
3230 flow_drv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
3231 struct rte_flow_error *error)
3233 const struct mlx5_flow_driver_ops *fops;
3234 enum mlx5_flow_drv_type type = flow->drv_type;
3236 MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
3237 fops = flow_get_drv_ops(type);
3238 return fops->apply(dev, flow, error);
3242 * Flow driver destroy API. This abstracts calling driver specific functions.
3243 * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow
3244 * on device and releases resources of the flow.
3247 * Pointer to Ethernet device.
3248 * @param[in, out] flow
3249 * Pointer to flow structure.
3252 flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
3254 const struct mlx5_flow_driver_ops *fops;
3255 enum mlx5_flow_drv_type type = flow->drv_type;
3257 flow_mreg_split_qrss_release(dev, flow);
3258 MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
3259 fops = flow_get_drv_ops(type);
3260 fops->destroy(dev, flow);
3264 * Get RSS action from the action list.
3266 * @param[in] actions
3267 * Pointer to the list of actions.
3270 * Pointer to the RSS action if exist, else return NULL.
3272 static const struct rte_flow_action_rss*
3273 flow_get_rss_action(const struct rte_flow_action actions[])
3275 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3276 switch (actions->type) {
3277 case RTE_FLOW_ACTION_TYPE_RSS:
3278 return (const struct rte_flow_action_rss *)
3287 /* maps shared action to translated non shared in some actions array */
3288 struct mlx5_translated_shared_action {
3289 struct rte_flow_shared_action *action; /**< Shared action */
3290 int index; /**< Index in related array of rte_flow_action */
3294 * Translates actions of type RTE_FLOW_ACTION_TYPE_SHARED to related
3295 * non shared action if translation possible.
3296 * This functionality used to run same execution path for both shared & non
3297 * shared actions on flow create. All necessary preparations for shared
3298 * action handling should be preformed on *shared* actions list returned
3301 * @param[in] actions
3302 * List of actions to translate.
3303 * @param[out] shared
3304 * List to store translated shared actions.
3305 * @param[in, out] shared_n
3306 * Size of *shared* array. On return should be updated with number of shared
3307 * actions retrieved from the *actions* list.
3308 * @param[out] translated_actions
3309 * List of actions where all shared actions were translated to non shared
3310 * if possible. NULL if no translation took place.
3312 * Pointer to the error structure.
3315 * 0 on success, a negative errno value otherwise and rte_errno is set.
3318 flow_shared_actions_translate(const struct rte_flow_action actions[],
3319 struct mlx5_translated_shared_action *shared,
3321 struct rte_flow_action **translated_actions,
3322 struct rte_flow_error *error)
3324 struct rte_flow_action *translated = NULL;
3325 size_t actions_size;
3328 struct mlx5_translated_shared_action *shared_end = NULL;
3330 for (n = 0; actions[n].type != RTE_FLOW_ACTION_TYPE_END; n++) {
3331 if (actions[n].type != RTE_FLOW_ACTION_TYPE_SHARED)
3333 if (copied_n == *shared_n) {
3334 return rte_flow_error_set
3335 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_NUM,
3336 NULL, "too many shared actions");
3338 rte_memcpy(&shared[copied_n].action, &actions[n].conf,
3339 sizeof(actions[n].conf));
3340 shared[copied_n].index = n;
3344 *shared_n = copied_n;
3347 actions_size = sizeof(struct rte_flow_action) * n;
3348 translated = mlx5_malloc(MLX5_MEM_ZERO, actions_size, 0, SOCKET_ID_ANY);
3353 memcpy(translated, actions, actions_size);
3354 for (shared_end = shared + copied_n; shared < shared_end; shared++) {
3355 const struct rte_flow_shared_action *shared_action;
3357 shared_action = shared->action;
3358 switch (shared_action->type) {
3359 case MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS:
3360 translated[shared->index].type =
3361 RTE_FLOW_ACTION_TYPE_RSS;
3362 translated[shared->index].conf =
3363 &shared_action->rss.origin;
3366 mlx5_free(translated);
3367 return rte_flow_error_set
3368 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
3369 NULL, "invalid shared action type");
3372 *translated_actions = translated;
3377 * Get Shared RSS action from the action list.
3380 * Pointer to the list of actions.
3381 * @param[in] shared_n
3382 * Actions list length.
3385 * Pointer to the MLX5 RSS action if exists, otherwise return NULL.
3387 static struct mlx5_shared_action_rss *
3388 flow_get_shared_rss_action(struct mlx5_translated_shared_action *shared,
3391 struct mlx5_translated_shared_action *shared_end;
3393 for (shared_end = shared + shared_n; shared < shared_end; shared++) {
3394 struct rte_flow_shared_action *shared_action;
3396 shared_action = shared->action;
3397 switch (shared_action->type) {
3398 case MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS:
3399 __atomic_add_fetch(&shared_action->refcnt, 1,
3401 return &shared_action->rss;
3409 struct rte_flow_shared_action *
3410 mlx5_flow_get_shared_rss(struct rte_flow *flow)
3412 if (flow->shared_rss)
3413 return container_of(flow->shared_rss,
3414 struct rte_flow_shared_action, rss);
3420 find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level)
3422 const struct rte_flow_item *item;
3423 unsigned int has_vlan = 0;
3425 for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3426 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
3432 return rss_level < 2 ? MLX5_EXPANSION_ROOT_ETH_VLAN :
3433 MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN;
3434 return rss_level < 2 ? MLX5_EXPANSION_ROOT :
3435 MLX5_EXPANSION_ROOT_OUTER;
3439 * Get layer flags from the prefix flow.
3441 * Some flows may be split to several subflows, the prefix subflow gets the
3442 * match items and the suffix sub flow gets the actions.
3443 * Some actions need the user defined match item flags to get the detail for
3445 * This function helps the suffix flow to get the item layer flags from prefix
3448 * @param[in] dev_flow
3449 * Pointer the created preifx subflow.
3452 * The layers get from prefix subflow.
3454 static inline uint64_t
3455 flow_get_prefix_layer_flags(struct mlx5_flow *dev_flow)
3457 uint64_t layers = 0;
3460 * Layers bits could be localization, but usually the compiler will
3461 * help to do the optimization work for source code.
3462 * If no decap actions, use the layers directly.
3464 if (!(dev_flow->act_flags & MLX5_FLOW_ACTION_DECAP))
3465 return dev_flow->handle->layers;
3466 /* Convert L3 layers with decap action. */
3467 if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV4)
3468 layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3469 else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV6)
3470 layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3471 /* Convert L4 layers with decap action. */
3472 if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_TCP)
3473 layers |= MLX5_FLOW_LAYER_OUTER_L4_TCP;
3474 else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_UDP)
3475 layers |= MLX5_FLOW_LAYER_OUTER_L4_UDP;
3480 * Get metadata split action information.
3482 * @param[in] actions
3483 * Pointer to the list of actions.
3485 * Pointer to the return pointer.
3486 * @param[out] qrss_type
3487 * Pointer to the action type to return. RTE_FLOW_ACTION_TYPE_END is returned
3488 * if no QUEUE/RSS is found.
3489 * @param[out] encap_idx
3490 * Pointer to the index of the encap action if exists, otherwise the last
3494 * Total number of actions.
3497 flow_parse_metadata_split_actions_info(const struct rte_flow_action actions[],
3498 const struct rte_flow_action **qrss,
3501 const struct rte_flow_action_raw_encap *raw_encap;
3503 int raw_decap_idx = -1;
3506 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3507 switch (actions->type) {
3508 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3509 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3510 *encap_idx = actions_n;
3512 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
3513 raw_decap_idx = actions_n;
3515 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3516 raw_encap = actions->conf;
3517 if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
3518 *encap_idx = raw_decap_idx != -1 ?
3519 raw_decap_idx : actions_n;
3521 case RTE_FLOW_ACTION_TYPE_QUEUE:
3522 case RTE_FLOW_ACTION_TYPE_RSS:
3530 if (*encap_idx == -1)
3531 *encap_idx = actions_n;
3532 /* Count RTE_FLOW_ACTION_TYPE_END. */
3533 return actions_n + 1;
3537 * Check meter action from the action list.
3539 * @param[in] actions
3540 * Pointer to the list of actions.
3542 * Pointer to the meter exist flag.
3545 * Total number of actions.
3548 flow_check_meter_action(const struct rte_flow_action actions[], uint32_t *mtr)
3554 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3555 switch (actions->type) {
3556 case RTE_FLOW_ACTION_TYPE_METER:
3564 /* Count RTE_FLOW_ACTION_TYPE_END. */
3565 return actions_n + 1;
3569 * Check if the flow should be split due to hairpin.
3570 * The reason for the split is that in current HW we can't
3571 * support encap and push-vlan on Rx, so if a flow contains
3572 * these actions we move it to Tx.
3575 * Pointer to Ethernet device.
3577 * Flow rule attributes.
3578 * @param[in] actions
3579 * Associated actions (list terminated by the END action).
3582 * > 0 the number of actions and the flow should be split,
3583 * 0 when no split required.
3586 flow_check_hairpin_split(struct rte_eth_dev *dev,
3587 const struct rte_flow_attr *attr,
3588 const struct rte_flow_action actions[])
3590 int queue_action = 0;
3593 const struct rte_flow_action_queue *queue;
3594 const struct rte_flow_action_rss *rss;
3595 const struct rte_flow_action_raw_encap *raw_encap;
3596 const struct rte_eth_hairpin_conf *conf;
3600 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3601 switch (actions->type) {
3602 case RTE_FLOW_ACTION_TYPE_QUEUE:
3603 queue = actions->conf;
3606 conf = mlx5_rxq_get_hairpin_conf(dev, queue->index);
3607 if (conf != NULL && !!conf->tx_explicit)
3612 case RTE_FLOW_ACTION_TYPE_RSS:
3613 rss = actions->conf;
3614 if (rss == NULL || rss->queue_num == 0)
3616 conf = mlx5_rxq_get_hairpin_conf(dev, rss->queue[0]);
3617 if (conf != NULL && !!conf->tx_explicit)
3622 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3623 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3624 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
3625 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
3626 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
3630 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3631 raw_encap = actions->conf;
3632 if (raw_encap->size >
3633 (sizeof(struct rte_flow_item_eth) +
3634 sizeof(struct rte_flow_item_ipv4)))
3643 if (split && queue_action)
3648 /* Declare flow create/destroy prototype in advance. */
3650 flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
3651 const struct rte_flow_attr *attr,
3652 const struct rte_flow_item items[],
3653 const struct rte_flow_action actions[],
3654 bool external, struct rte_flow_error *error);
3657 flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,
3660 struct mlx5_hlist_entry *
3661 flow_dv_mreg_create_cb(struct mlx5_hlist *list, uint64_t key,
3664 struct rte_eth_dev *dev = list->ctx;
3665 struct mlx5_priv *priv = dev->data->dev_private;
3666 struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3667 struct mlx5_flow_mreg_copy_resource *mcp_res;
3668 struct rte_flow_error *error = ctx->error;
3671 uint32_t mark_id = key;
3672 struct rte_flow_attr attr = {
3673 .group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
3676 struct mlx5_rte_flow_item_tag tag_spec = {
3679 struct rte_flow_item items[] = {
3680 [1] = { .type = RTE_FLOW_ITEM_TYPE_END, },
3682 struct rte_flow_action_mark ftag = {
3685 struct mlx5_flow_action_copy_mreg cp_mreg = {
3689 struct rte_flow_action_jump jump = {
3690 .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP,
3692 struct rte_flow_action actions[] = {
3693 [3] = { .type = RTE_FLOW_ACTION_TYPE_END, },
3696 /* Fill the register fileds in the flow. */
3697 ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3701 ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error);
3705 /* Provide the full width of FLAG specific value. */
3706 if (mark_id == (priv->sh->dv_regc0_mask & MLX5_FLOW_MARK_DEFAULT))
3707 tag_spec.data = MLX5_FLOW_MARK_DEFAULT;
3708 /* Build a new flow. */
3709 if (mark_id != MLX5_DEFAULT_COPY_ID) {
3710 items[0] = (struct rte_flow_item){
3711 .type = (enum rte_flow_item_type)
3712 MLX5_RTE_FLOW_ITEM_TYPE_TAG,
3715 items[1] = (struct rte_flow_item){
3716 .type = RTE_FLOW_ITEM_TYPE_END,
3718 actions[0] = (struct rte_flow_action){
3719 .type = (enum rte_flow_action_type)
3720 MLX5_RTE_FLOW_ACTION_TYPE_MARK,
3723 actions[1] = (struct rte_flow_action){
3724 .type = (enum rte_flow_action_type)
3725 MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
3728 actions[2] = (struct rte_flow_action){
3729 .type = RTE_FLOW_ACTION_TYPE_JUMP,
3732 actions[3] = (struct rte_flow_action){
3733 .type = RTE_FLOW_ACTION_TYPE_END,
3736 /* Default rule, wildcard match. */
3737 attr.priority = MLX5_FLOW_PRIO_RSVD;
3738 items[0] = (struct rte_flow_item){
3739 .type = RTE_FLOW_ITEM_TYPE_END,
3741 actions[0] = (struct rte_flow_action){
3742 .type = (enum rte_flow_action_type)
3743 MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
3746 actions[1] = (struct rte_flow_action){
3747 .type = RTE_FLOW_ACTION_TYPE_JUMP,
3750 actions[2] = (struct rte_flow_action){
3751 .type = RTE_FLOW_ACTION_TYPE_END,
3754 /* Build a new entry. */
3755 mcp_res = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MCP], &idx);
3762 * The copy Flows are not included in any list. There
3763 * ones are referenced from other Flows and can not
3764 * be applied, removed, deleted in ardbitrary order
3765 * by list traversing.
3767 mcp_res->rix_flow = flow_list_create(dev, NULL, &attr, items,
3768 actions, false, error);
3769 if (!mcp_res->rix_flow) {
3770 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], idx);
3773 return &mcp_res->hlist_ent;
3777 * Add a flow of copying flow metadata registers in RX_CP_TBL.
3779 * As mark_id is unique, if there's already a registered flow for the mark_id,
3780 * return by increasing the reference counter of the resource. Otherwise, create
3781 * the resource (mcp_res) and flow.
3784 * - If ingress port is ANY and reg_c[1] is mark_id,
3785 * flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL.
3787 * For default flow (zero mark_id), flow is like,
3788 * - If ingress port is ANY,
3789 * reg_b := reg_c[0] and jump to RX_ACT_TBL.
3792 * Pointer to Ethernet device.
3794 * ID of MARK action, zero means default flow for META.
3796 * Perform verbose error reporting if not NULL.
3799 * Associated resource on success, NULL otherwise and rte_errno is set.
3801 static struct mlx5_flow_mreg_copy_resource *
3802 flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id,
3803 struct rte_flow_error *error)
3805 struct mlx5_priv *priv = dev->data->dev_private;
3806 struct mlx5_hlist_entry *entry;
3807 struct mlx5_flow_cb_ctx ctx = {
3812 /* Check if already registered. */
3813 MLX5_ASSERT(priv->mreg_cp_tbl);
3814 entry = mlx5_hlist_register(priv->mreg_cp_tbl, mark_id, &ctx);
3817 return container_of(entry, struct mlx5_flow_mreg_copy_resource,
3822 flow_dv_mreg_remove_cb(struct mlx5_hlist *list, struct mlx5_hlist_entry *entry)
3824 struct mlx5_flow_mreg_copy_resource *mcp_res =
3825 container_of(entry, typeof(*mcp_res), hlist_ent);
3826 struct rte_eth_dev *dev = list->ctx;
3827 struct mlx5_priv *priv = dev->data->dev_private;
3829 MLX5_ASSERT(mcp_res->rix_flow);
3830 flow_list_destroy(dev, NULL, mcp_res->rix_flow);
3831 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx);
3835 * Release flow in RX_CP_TBL.
3838 * Pointer to Ethernet device.
3840 * Parent flow for wich copying is provided.
3843 flow_mreg_del_copy_action(struct rte_eth_dev *dev,
3844 struct rte_flow *flow)
3846 struct mlx5_flow_mreg_copy_resource *mcp_res;
3847 struct mlx5_priv *priv = dev->data->dev_private;
3849 if (!flow->rix_mreg_copy)
3851 mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP],
3852 flow->rix_mreg_copy);
3853 if (!mcp_res || !priv->mreg_cp_tbl)
3855 MLX5_ASSERT(mcp_res->rix_flow);
3856 mlx5_hlist_unregister(priv->mreg_cp_tbl, &mcp_res->hlist_ent);
3857 flow->rix_mreg_copy = 0;
3861 * Remove the default copy action from RX_CP_TBL.
3863 * This functions is called in the mlx5_dev_start(). No thread safe
3867 * Pointer to Ethernet device.
3870 flow_mreg_del_default_copy_action(struct rte_eth_dev *dev)
3872 struct mlx5_hlist_entry *entry;
3873 struct mlx5_priv *priv = dev->data->dev_private;
3875 /* Check if default flow is registered. */
3876 if (!priv->mreg_cp_tbl)
3878 entry = mlx5_hlist_lookup(priv->mreg_cp_tbl,
3879 MLX5_DEFAULT_COPY_ID, NULL);
3882 mlx5_hlist_unregister(priv->mreg_cp_tbl, entry);
3886 * Add the default copy action in in RX_CP_TBL.
3888 * This functions is called in the mlx5_dev_start(). No thread safe
3892 * Pointer to Ethernet device.
3894 * Perform verbose error reporting if not NULL.
3897 * 0 for success, negative value otherwise and rte_errno is set.
3900 flow_mreg_add_default_copy_action(struct rte_eth_dev *dev,
3901 struct rte_flow_error *error)
3903 struct mlx5_priv *priv = dev->data->dev_private;
3904 struct mlx5_flow_mreg_copy_resource *mcp_res;
3906 /* Check whether extensive metadata feature is engaged. */
3907 if (!priv->config.dv_flow_en ||
3908 priv->config.dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
3909 !mlx5_flow_ext_mreg_supported(dev) ||
3910 !priv->sh->dv_regc0_mask)
3913 * Add default mreg copy flow may be called multiple time, but
3914 * only be called once in stop. Avoid register it twice.
3916 if (mlx5_hlist_lookup(priv->mreg_cp_tbl, MLX5_DEFAULT_COPY_ID, NULL))
3918 mcp_res = flow_mreg_add_copy_action(dev, MLX5_DEFAULT_COPY_ID, error);
3925 * Add a flow of copying flow metadata registers in RX_CP_TBL.
3927 * All the flow having Q/RSS action should be split by
3928 * flow_mreg_split_qrss_prep() to pass by RX_CP_TBL. A flow in the RX_CP_TBL
3929 * performs the following,
3930 * - CQE->flow_tag := reg_c[1] (MARK)
3931 * - CQE->flow_table_metadata (reg_b) := reg_c[0] (META)
3932 * As CQE's flow_tag is not a register, it can't be simply copied from reg_c[1]
3933 * but there should be a flow per each MARK ID set by MARK action.
3935 * For the aforementioned reason, if there's a MARK action in flow's action
3936 * list, a corresponding flow should be added to the RX_CP_TBL in order to copy
3937 * the MARK ID to CQE's flow_tag like,
3938 * - If reg_c[1] is mark_id,
3939 * flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL.
3941 * For SET_META action which stores value in reg_c[0], as the destination is
3942 * also a flow metadata register (reg_b), adding a default flow is enough. Zero
3943 * MARK ID means the default flow. The default flow looks like,
3944 * - For all flow, reg_b := reg_c[0] and jump to RX_ACT_TBL.
3947 * Pointer to Ethernet device.
3949 * Pointer to flow structure.
3950 * @param[in] actions
3951 * Pointer to the list of actions.
3953 * Perform verbose error reporting if not NULL.
3956 * 0 on success, negative value otherwise and rte_errno is set.
3959 flow_mreg_update_copy_table(struct rte_eth_dev *dev,
3960 struct rte_flow *flow,
3961 const struct rte_flow_action *actions,
3962 struct rte_flow_error *error)
3964 struct mlx5_priv *priv = dev->data->dev_private;
3965 struct mlx5_dev_config *config = &priv->config;
3966 struct mlx5_flow_mreg_copy_resource *mcp_res;
3967 const struct rte_flow_action_mark *mark;
3969 /* Check whether extensive metadata feature is engaged. */
3970 if (!config->dv_flow_en ||
3971 config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
3972 !mlx5_flow_ext_mreg_supported(dev) ||
3973 !priv->sh->dv_regc0_mask)
3975 /* Find MARK action. */
3976 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3977 switch (actions->type) {
3978 case RTE_FLOW_ACTION_TYPE_FLAG:
3979 mcp_res = flow_mreg_add_copy_action
3980 (dev, MLX5_FLOW_MARK_DEFAULT, error);
3983 flow->rix_mreg_copy = mcp_res->idx;
3985 case RTE_FLOW_ACTION_TYPE_MARK:
3986 mark = (const struct rte_flow_action_mark *)
3989 flow_mreg_add_copy_action(dev, mark->id, error);
3992 flow->rix_mreg_copy = mcp_res->idx;
4001 #define MLX5_MAX_SPLIT_ACTIONS 24
4002 #define MLX5_MAX_SPLIT_ITEMS 24
4005 * Split the hairpin flow.
4006 * Since HW can't support encap and push-vlan on Rx, we move these
4008 * If the count action is after the encap then we also
4009 * move the count action. in this case the count will also measure
4013 * Pointer to Ethernet device.
4014 * @param[in] actions
4015 * Associated actions (list terminated by the END action).
4016 * @param[out] actions_rx
4018 * @param[out] actions_tx
4020 * @param[out] pattern_tx
4021 * The pattern items for the Tx flow.
4022 * @param[out] flow_id
4023 * The flow ID connected to this flow.
4029 flow_hairpin_split(struct rte_eth_dev *dev,
4030 const struct rte_flow_action actions[],
4031 struct rte_flow_action actions_rx[],
4032 struct rte_flow_action actions_tx[],
4033 struct rte_flow_item pattern_tx[],
4036 const struct rte_flow_action_raw_encap *raw_encap;
4037 const struct rte_flow_action_raw_decap *raw_decap;
4038 struct mlx5_rte_flow_action_set_tag *set_tag;
4039 struct rte_flow_action *tag_action;
4040 struct mlx5_rte_flow_item_tag *tag_item;
4041 struct rte_flow_item *item;
4045 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
4046 switch (actions->type) {
4047 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
4048 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
4049 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
4050 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
4051 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
4052 rte_memcpy(actions_tx, actions,
4053 sizeof(struct rte_flow_action));
4056 case RTE_FLOW_ACTION_TYPE_COUNT:
4058 rte_memcpy(actions_tx, actions,
4059 sizeof(struct rte_flow_action));
4062 rte_memcpy(actions_rx, actions,
4063 sizeof(struct rte_flow_action));
4067 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
4068 raw_encap = actions->conf;
4069 if (raw_encap->size >
4070 (sizeof(struct rte_flow_item_eth) +
4071 sizeof(struct rte_flow_item_ipv4))) {
4072 memcpy(actions_tx, actions,
4073 sizeof(struct rte_flow_action));
4077 rte_memcpy(actions_rx, actions,
4078 sizeof(struct rte_flow_action));
4082 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
4083 raw_decap = actions->conf;
4084 if (raw_decap->size <
4085 (sizeof(struct rte_flow_item_eth) +
4086 sizeof(struct rte_flow_item_ipv4))) {
4087 memcpy(actions_tx, actions,
4088 sizeof(struct rte_flow_action));
4091 rte_memcpy(actions_rx, actions,
4092 sizeof(struct rte_flow_action));
4097 rte_memcpy(actions_rx, actions,
4098 sizeof(struct rte_flow_action));
4103 /* Add set meta action and end action for the Rx flow. */
4104 tag_action = actions_rx;
4105 tag_action->type = (enum rte_flow_action_type)
4106 MLX5_RTE_FLOW_ACTION_TYPE_TAG;
4108 rte_memcpy(actions_rx, actions, sizeof(struct rte_flow_action));
4110 set_tag = (void *)actions_rx;
4111 set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_RX, 0, NULL);
4112 MLX5_ASSERT(set_tag->id > REG_NON);
4113 set_tag->data = flow_id;
4114 tag_action->conf = set_tag;
4115 /* Create Tx item list. */
4116 rte_memcpy(actions_tx, actions, sizeof(struct rte_flow_action));
4117 addr = (void *)&pattern_tx[2];
4119 item->type = (enum rte_flow_item_type)
4120 MLX5_RTE_FLOW_ITEM_TYPE_TAG;
4121 tag_item = (void *)addr;
4122 tag_item->data = flow_id;
4123 tag_item->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_TX, 0, NULL);
4124 MLX5_ASSERT(set_tag->id > REG_NON);
4125 item->spec = tag_item;
4126 addr += sizeof(struct mlx5_rte_flow_item_tag);
4127 tag_item = (void *)addr;
4128 tag_item->data = UINT32_MAX;
4129 tag_item->id = UINT16_MAX;
4130 item->mask = tag_item;
4133 item->type = RTE_FLOW_ITEM_TYPE_END;
4138 union tunnel_offload_mark {
4141 uint32_t app_reserve:8;
4142 uint32_t table_id:15;
4143 uint32_t transfer:1;
4144 uint32_t _unused_:8;
4148 struct tunnel_default_miss_ctx {
4152 struct rte_flow_action_rss action_rss;
4153 struct rte_flow_action_queue miss_queue;
4154 struct rte_flow_action_jump miss_jump;
4160 flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
4161 struct rte_flow *flow,
4162 const struct rte_flow_attr *attr,
4163 const struct rte_flow_action *app_actions,
4165 struct tunnel_default_miss_ctx *ctx,
4166 struct rte_flow_error *error)
4168 struct mlx5_priv *priv = dev->data->dev_private;
4169 struct mlx5_flow *dev_flow;
4170 struct rte_flow_attr miss_attr = *attr;
4171 const struct mlx5_flow_tunnel *tunnel = app_actions[0].conf;
4172 const struct rte_flow_item miss_items[2] = {
4174 .type = RTE_FLOW_ITEM_TYPE_ETH,
4180 .type = RTE_FLOW_ITEM_TYPE_END,
4186 union tunnel_offload_mark mark_id;
4187 struct rte_flow_action_mark miss_mark;
4188 struct rte_flow_action miss_actions[3] = {
4189 [0] = { .type = RTE_FLOW_ACTION_TYPE_MARK, .conf = &miss_mark },
4190 [2] = { .type = RTE_FLOW_ACTION_TYPE_END, .conf = NULL }
4192 const struct rte_flow_action_jump *jump_data;
4193 uint32_t i, flow_table = 0; /* prevent compilation warning */
4194 struct flow_grp_info grp_info = {
4196 .transfer = attr->transfer,
4197 .fdb_def_rule = !!priv->fdb_def_rule,
4202 if (!attr->transfer) {
4205 miss_actions[1].type = RTE_FLOW_ACTION_TYPE_RSS;
4206 q_size = priv->reta_idx_n * sizeof(ctx->queue[0]);
4207 ctx->queue = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, q_size,
4210 return rte_flow_error_set
4212 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4213 NULL, "invalid default miss RSS");
4214 ctx->action_rss.func = RTE_ETH_HASH_FUNCTION_DEFAULT,
4215 ctx->action_rss.level = 0,
4216 ctx->action_rss.types = priv->rss_conf.rss_hf,
4217 ctx->action_rss.key_len = priv->rss_conf.rss_key_len,
4218 ctx->action_rss.queue_num = priv->reta_idx_n,
4219 ctx->action_rss.key = priv->rss_conf.rss_key,
4220 ctx->action_rss.queue = ctx->queue;
4221 if (!priv->reta_idx_n || !priv->rxqs_n)
4222 return rte_flow_error_set
4224 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4225 NULL, "invalid port configuration");
4226 if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
4227 ctx->action_rss.types = 0;
4228 for (i = 0; i != priv->reta_idx_n; ++i)
4229 ctx->queue[i] = (*priv->reta_idx)[i];
4231 miss_actions[1].type = RTE_FLOW_ACTION_TYPE_JUMP;
4232 ctx->miss_jump.group = MLX5_TNL_MISS_FDB_JUMP_GRP;
4234 miss_actions[1].conf = (typeof(miss_actions[1].conf))ctx->raw;
4235 for (; app_actions->type != RTE_FLOW_ACTION_TYPE_JUMP; app_actions++);
4236 jump_data = app_actions->conf;
4237 miss_attr.priority = MLX5_TNL_MISS_RULE_PRIORITY;
4238 miss_attr.group = jump_data->group;
4239 ret = mlx5_flow_group_to_table(dev, tunnel, jump_data->group,
4240 &flow_table, grp_info, error);
4242 return rte_flow_error_set(error, EINVAL,
4243 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4244 NULL, "invalid tunnel id");
4245 mark_id.app_reserve = 0;
4246 mark_id.table_id = tunnel_flow_tbl_to_id(flow_table);
4247 mark_id.transfer = !!attr->transfer;
4248 mark_id._unused_ = 0;
4249 miss_mark.id = mark_id.val;
4250 dev_flow = flow_drv_prepare(dev, flow, &miss_attr,
4251 miss_items, miss_actions, flow_idx, error);
4254 dev_flow->flow = flow;
4255 dev_flow->external = true;
4256 dev_flow->tunnel = tunnel;
4257 /* Subflow object was created, we must include one in the list. */
4258 SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
4259 dev_flow->handle, next);
4261 "port %u tunnel type=%d id=%u miss rule priority=%u group=%u",
4262 dev->data->port_id, tunnel->app_tunnel.type,
4263 tunnel->tunnel_id, miss_attr.priority, miss_attr.group);
4264 ret = flow_drv_translate(dev, dev_flow, &miss_attr, miss_items,
4265 miss_actions, error);
4267 ret = flow_mreg_update_copy_table(dev, flow, miss_actions,
4274 * The last stage of splitting chain, just creates the subflow
4275 * without any modification.
4278 * Pointer to Ethernet device.
4280 * Parent flow structure pointer.
4281 * @param[in, out] sub_flow
4282 * Pointer to return the created subflow, may be NULL.
4283 * @param[in] prefix_layers
4284 * Prefix subflow layers, may be 0.
4285 * @param[in] prefix_mark
4286 * Prefix subflow mark flag, may be 0.
4288 * Flow rule attributes.
4290 * Pattern specification (list terminated by the END pattern item).
4291 * @param[in] actions
4292 * Associated actions (list terminated by the END action).
4293 * @param[in] external
4294 * This flow rule is created by request external to PMD.
4295 * @param[in] flow_idx
4296 * This memory pool index to the flow.
4298 * Perform verbose error reporting if not NULL.
4300 * 0 on success, negative value otherwise
4303 flow_create_split_inner(struct rte_eth_dev *dev,
4304 struct rte_flow *flow,
4305 struct mlx5_flow **sub_flow,
4306 uint64_t prefix_layers,
4307 uint32_t prefix_mark,
4308 const struct rte_flow_attr *attr,
4309 const struct rte_flow_item items[],
4310 const struct rte_flow_action actions[],
4311 bool external, uint32_t flow_idx,
4312 struct rte_flow_error *error)
4314 struct mlx5_flow *dev_flow;
4316 dev_flow = flow_drv_prepare(dev, flow, attr, items, actions,
4320 dev_flow->flow = flow;
4321 dev_flow->external = external;
4322 /* Subflow object was created, we must include one in the list. */
4323 SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
4324 dev_flow->handle, next);
4326 * If dev_flow is as one of the suffix flow, some actions in suffix
4327 * flow may need some user defined item layer flags, and pass the
4328 * Metadate rxq mark flag to suffix flow as well.
4331 dev_flow->handle->layers = prefix_layers;
4333 dev_flow->handle->mark = 1;
4335 *sub_flow = dev_flow;
4336 return flow_drv_translate(dev, dev_flow, attr, items, actions, error);
4340 * Split the meter flow.
4342 * As meter flow will split to three sub flow, other than meter
4343 * action, the other actions make sense to only meter accepts
4344 * the packet. If it need to be dropped, no other additional
4345 * actions should be take.
4347 * One kind of special action which decapsulates the L3 tunnel
4348 * header will be in the prefix sub flow, as not to take the
4349 * L3 tunnel header into account.
4352 * Pointer to Ethernet device.
4354 * Pattern specification (list terminated by the END pattern item).
4355 * @param[out] sfx_items
4356 * Suffix flow match items (list terminated by the END pattern item).
4357 * @param[in] actions
4358 * Associated actions (list terminated by the END action).
4359 * @param[out] actions_sfx
4360 * Suffix flow actions.
4361 * @param[out] actions_pre
4362 * Prefix flow actions.
4363 * @param[out] pattern_sfx
4364 * The pattern items for the suffix flow.
4365 * @param[out] tag_sfx
4366 * Pointer to suffix flow tag.
4372 flow_meter_split_prep(struct rte_eth_dev *dev,
4373 const struct rte_flow_item items[],
4374 struct rte_flow_item sfx_items[],
4375 const struct rte_flow_action actions[],
4376 struct rte_flow_action actions_sfx[],
4377 struct rte_flow_action actions_pre[])
4379 struct mlx5_priv *priv = dev->data->dev_private;
4380 struct rte_flow_action *tag_action = NULL;
4381 struct rte_flow_item *tag_item;
4382 struct mlx5_rte_flow_action_set_tag *set_tag;
4383 struct rte_flow_error error;
4384 const struct rte_flow_action_raw_encap *raw_encap;
4385 const struct rte_flow_action_raw_decap *raw_decap;
4386 struct mlx5_rte_flow_item_tag *tag_spec;
4387 struct mlx5_rte_flow_item_tag *tag_mask;
4388 uint32_t tag_id = 0;
4389 bool copy_vlan = false;
4391 /* Prepare the actions for prefix and suffix flow. */
4392 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
4393 struct rte_flow_action **action_cur = NULL;
4395 switch (actions->type) {
4396 case RTE_FLOW_ACTION_TYPE_METER:
4397 /* Add the extra tag action first. */
4398 tag_action = actions_pre;
4399 tag_action->type = (enum rte_flow_action_type)
4400 MLX5_RTE_FLOW_ACTION_TYPE_TAG;
4402 action_cur = &actions_pre;
4404 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
4405 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
4406 action_cur = &actions_pre;
4408 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
4409 raw_encap = actions->conf;
4410 if (raw_encap->size < MLX5_ENCAPSULATION_DECISION_SIZE)
4411 action_cur = &actions_pre;
4413 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
4414 raw_decap = actions->conf;
4415 if (raw_decap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
4416 action_cur = &actions_pre;
4418 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
4419 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
4426 action_cur = &actions_sfx;
4427 memcpy(*action_cur, actions, sizeof(struct rte_flow_action));
4430 /* Add end action to the actions. */
4431 actions_sfx->type = RTE_FLOW_ACTION_TYPE_END;
4432 actions_pre->type = RTE_FLOW_ACTION_TYPE_END;
4435 set_tag = (void *)actions_pre;
4436 set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_MTR_SFX, 0, &error);
4437 mlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
4439 if (tag_id >= (1 << (sizeof(tag_id) * 8 - MLX5_MTR_COLOR_BITS))) {
4440 DRV_LOG(ERR, "Port %u meter flow id exceed max limit.",
4441 dev->data->port_id);
4442 mlx5_ipool_free(priv->sh->ipool
4443 [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], tag_id);
4445 } else if (!tag_id) {
4448 set_tag->data = tag_id << MLX5_MTR_COLOR_BITS;
4450 tag_action->conf = set_tag;
4451 /* Prepare the suffix subflow items. */
4452 tag_item = sfx_items++;
4453 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
4454 int item_type = items->type;
4456 switch (item_type) {
4457 case RTE_FLOW_ITEM_TYPE_PORT_ID:
4458 memcpy(sfx_items, items, sizeof(*sfx_items));
4461 case RTE_FLOW_ITEM_TYPE_VLAN:
4463 memcpy(sfx_items, items, sizeof(*sfx_items));
4465 * Convert to internal match item, it is used
4466 * for vlan push and set vid.
4468 sfx_items->type = (enum rte_flow_item_type)
4469 MLX5_RTE_FLOW_ITEM_TYPE_VLAN;
4477 sfx_items->type = RTE_FLOW_ITEM_TYPE_END;
4479 tag_spec = (struct mlx5_rte_flow_item_tag *)sfx_items;
4480 tag_spec->data = tag_id << MLX5_MTR_COLOR_BITS;
4481 tag_spec->id = mlx5_flow_get_reg_id(dev, MLX5_MTR_SFX, 0, &error);
4482 tag_mask = tag_spec + 1;
4483 tag_mask->data = 0xffffff00;
4484 tag_item->type = (enum rte_flow_item_type)
4485 MLX5_RTE_FLOW_ITEM_TYPE_TAG;
4486 tag_item->spec = tag_spec;
4487 tag_item->last = NULL;
4488 tag_item->mask = tag_mask;
4493 * Split action list having QUEUE/RSS for metadata register copy.
4495 * Once Q/RSS action is detected in user's action list, the flow action
4496 * should be split in order to copy metadata registers, which will happen in
4498 * - CQE->flow_tag := reg_c[1] (MARK)
4499 * - CQE->flow_table_metadata (reg_b) := reg_c[0] (META)
4500 * The Q/RSS action will be performed on RX_ACT_TBL after passing by RX_CP_TBL.
4501 * This is because the last action of each flow must be a terminal action
4502 * (QUEUE, RSS or DROP).
4504 * Flow ID must be allocated to identify actions in the RX_ACT_TBL and it is
4505 * stored and kept in the mlx5_flow structure per each sub_flow.
4507 * The Q/RSS action is replaced with,
4508 * - SET_TAG, setting the allocated flow ID to reg_c[2].
4509 * And the following JUMP action is added at the end,
4510 * - JUMP, to RX_CP_TBL.
4512 * A flow to perform remained Q/RSS action will be created in RX_ACT_TBL by
4513 * flow_create_split_metadata() routine. The flow will look like,
4514 * - If flow ID matches (reg_c[2]), perform Q/RSS.
4517 * Pointer to Ethernet device.
4518 * @param[out] split_actions
4519 * Pointer to store split actions to jump to CP_TBL.
4520 * @param[in] actions
4521 * Pointer to the list of original flow actions.
4523 * Pointer to the Q/RSS action.
4524 * @param[in] actions_n
4525 * Number of original actions.
4527 * Perform verbose error reporting if not NULL.
4530 * non-zero unique flow_id on success, otherwise 0 and
4531 * error/rte_error are set.
4534 flow_mreg_split_qrss_prep(struct rte_eth_dev *dev,
4535 struct rte_flow_action *split_actions,
4536 const struct rte_flow_action *actions,
4537 const struct rte_flow_action *qrss,
4538 int actions_n, struct rte_flow_error *error)
4540 struct mlx5_priv *priv = dev->data->dev_private;
4541 struct mlx5_rte_flow_action_set_tag *set_tag;
4542 struct rte_flow_action_jump *jump;
4543 const int qrss_idx = qrss - actions;
4544 uint32_t flow_id = 0;
4548 * Given actions will be split
4549 * - Replace QUEUE/RSS action with SET_TAG to set flow ID.
4550 * - Add jump to mreg CP_TBL.
4551 * As a result, there will be one more action.
4554 memcpy(split_actions, actions, sizeof(*split_actions) * actions_n);
4555 set_tag = (void *)(split_actions + actions_n);
4557 * If tag action is not set to void(it means we are not the meter
4558 * suffix flow), add the tag action. Since meter suffix flow already
4559 * has the tag added.
4561 if (split_actions[qrss_idx].type != RTE_FLOW_ACTION_TYPE_VOID) {
4563 * Allocate the new subflow ID. This one is unique within
4564 * device and not shared with representors. Otherwise,
4565 * we would have to resolve multi-thread access synch
4566 * issue. Each flow on the shared device is appended
4567 * with source vport identifier, so the resulting
4568 * flows will be unique in the shared (by master and
4569 * representors) domain even if they have coinciding
4572 mlx5_ipool_malloc(priv->sh->ipool
4573 [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], &flow_id);
4575 return rte_flow_error_set(error, ENOMEM,
4576 RTE_FLOW_ERROR_TYPE_ACTION,
4577 NULL, "can't allocate id "
4578 "for split Q/RSS subflow");
4579 /* Internal SET_TAG action to set flow ID. */
4580 *set_tag = (struct mlx5_rte_flow_action_set_tag){
4583 ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0, error);
4587 /* Construct new actions array. */
4588 /* Replace QUEUE/RSS action. */
4589 split_actions[qrss_idx] = (struct rte_flow_action){
4590 .type = (enum rte_flow_action_type)
4591 MLX5_RTE_FLOW_ACTION_TYPE_TAG,
4595 /* JUMP action to jump to mreg copy table (CP_TBL). */
4596 jump = (void *)(set_tag + 1);
4597 *jump = (struct rte_flow_action_jump){
4598 .group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
4600 split_actions[actions_n - 2] = (struct rte_flow_action){
4601 .type = RTE_FLOW_ACTION_TYPE_JUMP,
4604 split_actions[actions_n - 1] = (struct rte_flow_action){
4605 .type = RTE_FLOW_ACTION_TYPE_END,
4611 * Extend the given action list for Tx metadata copy.
4613 * Copy the given action list to the ext_actions and add flow metadata register
4614 * copy action in order to copy reg_a set by WQE to reg_c[0].
4616 * @param[out] ext_actions
4617 * Pointer to the extended action list.
4618 * @param[in] actions
4619 * Pointer to the list of actions.
4620 * @param[in] actions_n
4621 * Number of actions in the list.
4623 * Perform verbose error reporting if not NULL.
4624 * @param[in] encap_idx
4625 * The encap action inndex.
4628 * 0 on success, negative value otherwise
4631 flow_mreg_tx_copy_prep(struct rte_eth_dev *dev,
4632 struct rte_flow_action *ext_actions,
4633 const struct rte_flow_action *actions,
4634 int actions_n, struct rte_flow_error *error,
4637 struct mlx5_flow_action_copy_mreg *cp_mreg =
4638 (struct mlx5_flow_action_copy_mreg *)
4639 (ext_actions + actions_n + 1);
4642 ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error);
4646 ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_TX, 0, error);
4651 memcpy(ext_actions, actions, sizeof(*ext_actions) * encap_idx);
4652 if (encap_idx == actions_n - 1) {
4653 ext_actions[actions_n - 1] = (struct rte_flow_action){
4654 .type = (enum rte_flow_action_type)
4655 MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
4658 ext_actions[actions_n] = (struct rte_flow_action){
4659 .type = RTE_FLOW_ACTION_TYPE_END,
4662 ext_actions[encap_idx] = (struct rte_flow_action){
4663 .type = (enum rte_flow_action_type)
4664 MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
4667 memcpy(ext_actions + encap_idx + 1, actions + encap_idx,
4668 sizeof(*ext_actions) * (actions_n - encap_idx));
4674 * Check the match action from the action list.
4676 * @param[in] actions
4677 * Pointer to the list of actions.
4679 * Flow rule attributes.
4681 * The action to be check if exist.
4682 * @param[out] match_action_pos
4683 * Pointer to the position of the matched action if exists, otherwise is -1.
4684 * @param[out] qrss_action_pos
4685 * Pointer to the position of the Queue/RSS action if exists, otherwise is -1.
4688 * > 0 the total number of actions.
4689 * 0 if not found match action in action list.
4692 flow_check_match_action(const struct rte_flow_action actions[],
4693 const struct rte_flow_attr *attr,
4694 enum rte_flow_action_type action,
4695 int *match_action_pos, int *qrss_action_pos)
4697 const struct rte_flow_action_sample *sample;
4704 *match_action_pos = -1;
4705 *qrss_action_pos = -1;
4706 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
4707 if (actions->type == action) {
4709 *match_action_pos = actions_n;
4711 if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE ||
4712 actions->type == RTE_FLOW_ACTION_TYPE_RSS)
4713 *qrss_action_pos = actions_n;
4714 if (actions->type == RTE_FLOW_ACTION_TYPE_JUMP)
4716 if (actions->type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
4717 sample = actions->conf;
4718 ratio = sample->ratio;
4719 sub_type = ((const struct rte_flow_action *)
4720 (sample->actions))->type;
4724 if (flag && action == RTE_FLOW_ACTION_TYPE_SAMPLE && attr->transfer) {
4726 /* JUMP Action not support for Mirroring;
4727 * Mirroring support multi-destination;
4729 if (!jump_flag && sub_type != RTE_FLOW_ACTION_TYPE_END)
4733 /* Count RTE_FLOW_ACTION_TYPE_END. */
4734 return flag ? actions_n + 1 : 0;
4737 #define SAMPLE_SUFFIX_ITEM 2
4740 * Split the sample flow.
4742 * As sample flow will split to two sub flow, sample flow with
4743 * sample action, the other actions will move to new suffix flow.
4745 * Also add unique tag id with tag action in the sample flow,
4746 * the same tag id will be as match in the suffix flow.
4749 * Pointer to Ethernet device.
4751 * FDB egress flow flag.
4752 * @param[out] sfx_items
4753 * Suffix flow match items (list terminated by the END pattern item).
4754 * @param[in] actions
4755 * Associated actions (list terminated by the END action).
4756 * @param[out] actions_sfx
4757 * Suffix flow actions.
4758 * @param[out] actions_pre
4759 * Prefix flow actions.
4760 * @param[in] actions_n
4761 * The total number of actions.
4762 * @param[in] sample_action_pos
4763 * The sample action position.
4764 * @param[in] qrss_action_pos
4765 * The Queue/RSS action position.
4767 * Perform verbose error reporting if not NULL.
4770 * 0 on success, or unique flow_id, a negative errno value
4771 * otherwise and rte_errno is set.
4774 flow_sample_split_prep(struct rte_eth_dev *dev,
4776 struct rte_flow_item sfx_items[],
4777 const struct rte_flow_action actions[],
4778 struct rte_flow_action actions_sfx[],
4779 struct rte_flow_action actions_pre[],
4781 int sample_action_pos,
4782 int qrss_action_pos,
4783 struct rte_flow_error *error)
4785 struct mlx5_priv *priv = dev->data->dev_private;
4786 struct mlx5_rte_flow_action_set_tag *set_tag;
4787 struct mlx5_rte_flow_item_tag *tag_spec;
4788 struct mlx5_rte_flow_item_tag *tag_mask;
4789 uint32_t tag_id = 0;
4793 if (sample_action_pos < 0)
4794 return rte_flow_error_set(error, EINVAL,
4795 RTE_FLOW_ERROR_TYPE_ACTION,
4796 NULL, "invalid position of sample "
4799 /* Prepare the prefix tag action. */
4800 set_tag = (void *)(actions_pre + actions_n + 1);
4801 ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, 0, error);
4805 mlx5_ipool_malloc(priv->sh->ipool
4806 [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], &tag_id);
4807 set_tag->data = tag_id;
4808 /* Prepare the suffix subflow items. */
4809 tag_spec = (void *)(sfx_items + SAMPLE_SUFFIX_ITEM);
4810 tag_spec->data = tag_id;
4811 tag_spec->id = set_tag->id;
4812 tag_mask = tag_spec + 1;
4813 tag_mask->data = UINT32_MAX;
4814 sfx_items[0] = (struct rte_flow_item){
4815 .type = (enum rte_flow_item_type)
4816 MLX5_RTE_FLOW_ITEM_TYPE_TAG,
4821 sfx_items[1] = (struct rte_flow_item){
4822 .type = (enum rte_flow_item_type)
4823 RTE_FLOW_ITEM_TYPE_END,
4826 /* Prepare the actions for prefix and suffix flow. */
4827 if (qrss_action_pos >= 0 && qrss_action_pos < sample_action_pos) {
4828 index = qrss_action_pos;
4829 /* Put the preceding the Queue/RSS action into prefix flow. */
4831 memcpy(actions_pre, actions,
4832 sizeof(struct rte_flow_action) * index);
4833 /* Put others preceding the sample action into prefix flow. */
4834 if (sample_action_pos > index + 1)
4835 memcpy(actions_pre + index, actions + index + 1,
4836 sizeof(struct rte_flow_action) *
4837 (sample_action_pos - index - 1));
4838 index = sample_action_pos - 1;
4839 /* Put Queue/RSS action into Suffix flow. */
4840 memcpy(actions_sfx, actions + qrss_action_pos,
4841 sizeof(struct rte_flow_action));
4844 index = sample_action_pos;
4846 memcpy(actions_pre, actions,
4847 sizeof(struct rte_flow_action) * index);
4849 /* Add the extra tag action for NIC-RX and E-Switch ingress. */
4851 actions_pre[index++] =
4852 (struct rte_flow_action){
4853 .type = (enum rte_flow_action_type)
4854 MLX5_RTE_FLOW_ACTION_TYPE_TAG,
4858 memcpy(actions_pre + index, actions + sample_action_pos,
4859 sizeof(struct rte_flow_action));
4861 actions_pre[index] = (struct rte_flow_action){
4862 .type = (enum rte_flow_action_type)
4863 RTE_FLOW_ACTION_TYPE_END,
4865 /* Put the actions after sample into Suffix flow. */
4866 memcpy(actions_sfx, actions + sample_action_pos + 1,
4867 sizeof(struct rte_flow_action) *
4868 (actions_n - sample_action_pos - 1));
4873 * The splitting for metadata feature.
4875 * - Q/RSS action on NIC Rx should be split in order to pass by
4876 * the mreg copy table (RX_CP_TBL) and then it jumps to the
4877 * action table (RX_ACT_TBL) which has the split Q/RSS action.
4879 * - All the actions on NIC Tx should have a mreg copy action to
4880 * copy reg_a from WQE to reg_c[0].
4883 * Pointer to Ethernet device.
4885 * Parent flow structure pointer.
4886 * @param[in] prefix_layers
4887 * Prefix flow layer flags.
4888 * @param[in] prefix_mark
4889 * Prefix subflow mark flag, may be 0.
4891 * Flow rule attributes.
4893 * Pattern specification (list terminated by the END pattern item).
4894 * @param[in] actions
4895 * Associated actions (list terminated by the END action).
4896 * @param[in] external
4897 * This flow rule is created by request external to PMD.
4898 * @param[in] flow_idx
4899 * This memory pool index to the flow.
4901 * Perform verbose error reporting if not NULL.
4903 * 0 on success, negative value otherwise
4906 flow_create_split_metadata(struct rte_eth_dev *dev,
4907 struct rte_flow *flow,
4908 uint64_t prefix_layers,
4909 uint32_t prefix_mark,
4910 const struct rte_flow_attr *attr,
4911 const struct rte_flow_item items[],
4912 const struct rte_flow_action actions[],
4913 bool external, uint32_t flow_idx,
4914 struct rte_flow_error *error)
4916 struct mlx5_priv *priv = dev->data->dev_private;
4917 struct mlx5_dev_config *config = &priv->config;
4918 const struct rte_flow_action *qrss = NULL;
4919 struct rte_flow_action *ext_actions = NULL;
4920 struct mlx5_flow *dev_flow = NULL;
4921 uint32_t qrss_id = 0;
4928 /* Check whether extensive metadata feature is engaged. */
4929 if (!config->dv_flow_en ||
4930 config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
4931 !mlx5_flow_ext_mreg_supported(dev))
4932 return flow_create_split_inner(dev, flow, NULL, prefix_layers,
4933 prefix_mark, attr, items,
4934 actions, external, flow_idx,
4936 actions_n = flow_parse_metadata_split_actions_info(actions, &qrss,
4939 /* Exclude hairpin flows from splitting. */
4940 if (qrss->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
4941 const struct rte_flow_action_queue *queue;
4944 if (mlx5_rxq_get_type(dev, queue->index) ==
4945 MLX5_RXQ_TYPE_HAIRPIN)
4947 } else if (qrss->type == RTE_FLOW_ACTION_TYPE_RSS) {
4948 const struct rte_flow_action_rss *rss;
4951 if (mlx5_rxq_get_type(dev, rss->queue[0]) ==
4952 MLX5_RXQ_TYPE_HAIRPIN)
4957 /* Check if it is in meter suffix table. */
4958 mtr_sfx = attr->group == (attr->transfer ?
4959 (MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) :
4960 MLX5_FLOW_TABLE_LEVEL_SUFFIX);
4962 * Q/RSS action on NIC Rx should be split in order to pass by
4963 * the mreg copy table (RX_CP_TBL) and then it jumps to the
4964 * action table (RX_ACT_TBL) which has the split Q/RSS action.
4966 act_size = sizeof(struct rte_flow_action) * (actions_n + 1) +
4967 sizeof(struct rte_flow_action_set_tag) +
4968 sizeof(struct rte_flow_action_jump);
4969 ext_actions = mlx5_malloc(MLX5_MEM_ZERO, act_size, 0,
4972 return rte_flow_error_set(error, ENOMEM,
4973 RTE_FLOW_ERROR_TYPE_ACTION,
4974 NULL, "no memory to split "
4977 * If we are the suffix flow of meter, tag already exist.
4978 * Set the tag action to void.
4981 ext_actions[qrss - actions].type =
4982 RTE_FLOW_ACTION_TYPE_VOID;
4984 ext_actions[qrss - actions].type =
4985 (enum rte_flow_action_type)
4986 MLX5_RTE_FLOW_ACTION_TYPE_TAG;
4988 * Create the new actions list with removed Q/RSS action
4989 * and appended set tag and jump to register copy table
4990 * (RX_CP_TBL). We should preallocate unique tag ID here
4991 * in advance, because it is needed for set tag action.
4993 qrss_id = flow_mreg_split_qrss_prep(dev, ext_actions, actions,
4994 qrss, actions_n, error);
4995 if (!mtr_sfx && !qrss_id) {
4999 } else if (attr->egress && !attr->transfer) {
5001 * All the actions on NIC Tx should have a metadata register
5002 * copy action to copy reg_a from WQE to reg_c[meta]
5004 act_size = sizeof(struct rte_flow_action) * (actions_n + 1) +
5005 sizeof(struct mlx5_flow_action_copy_mreg);
5006 ext_actions = mlx5_malloc(MLX5_MEM_ZERO, act_size, 0,
5009 return rte_flow_error_set(error, ENOMEM,
5010 RTE_FLOW_ERROR_TYPE_ACTION,
5011 NULL, "no memory to split "
5013 /* Create the action list appended with copy register. */
5014 ret = flow_mreg_tx_copy_prep(dev, ext_actions, actions,
5015 actions_n, error, encap_idx);
5019 /* Add the unmodified original or prefix subflow. */
5020 ret = flow_create_split_inner(dev, flow, &dev_flow, prefix_layers,
5022 items, ext_actions ? ext_actions :
5023 actions, external, flow_idx, error);
5026 MLX5_ASSERT(dev_flow);
5028 const struct rte_flow_attr q_attr = {
5029 .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP,
5032 /* Internal PMD action to set register. */
5033 struct mlx5_rte_flow_item_tag q_tag_spec = {
5037 struct rte_flow_item q_items[] = {
5039 .type = (enum rte_flow_item_type)
5040 MLX5_RTE_FLOW_ITEM_TYPE_TAG,
5041 .spec = &q_tag_spec,
5046 .type = RTE_FLOW_ITEM_TYPE_END,
5049 struct rte_flow_action q_actions[] = {
5055 .type = RTE_FLOW_ACTION_TYPE_END,
5058 uint64_t layers = flow_get_prefix_layer_flags(dev_flow);
5061 * Configure the tag item only if there is no meter subflow.
5062 * Since tag is already marked in the meter suffix subflow
5063 * we can just use the meter suffix items as is.
5066 /* Not meter subflow. */
5067 MLX5_ASSERT(!mtr_sfx);
5069 * Put unique id in prefix flow due to it is destroyed
5070 * after suffix flow and id will be freed after there
5071 * is no actual flows with this id and identifier
5072 * reallocation becomes possible (for example, for
5073 * other flows in other threads).
5075 dev_flow->handle->split_flow_id = qrss_id;
5076 ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0,
5080 q_tag_spec.id = ret;
5083 /* Add suffix subflow to execute Q/RSS. */
5084 ret = flow_create_split_inner(dev, flow, &dev_flow, layers, 0,
5085 &q_attr, mtr_sfx ? items :
5087 external, flow_idx, error);
5090 /* qrss ID should be freed if failed. */
5092 MLX5_ASSERT(dev_flow);
5097 * We do not destroy the partially created sub_flows in case of error.
5098 * These ones are included into parent flow list and will be destroyed
5099 * by flow_drv_destroy.
5101 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
5103 mlx5_free(ext_actions);
5108 * The splitting for meter feature.
5110 * - The meter flow will be split to two flows as prefix and
5111 * suffix flow. The packets make sense only it pass the prefix
5114 * - Reg_C_5 is used for the packet to match betweend prefix and
5118 * Pointer to Ethernet device.
5120 * Parent flow structure pointer.
5121 * @param[in] prefix_layers
5122 * Prefix subflow layers, may be 0.
5123 * @param[in] prefix_mark
5124 * Prefix subflow mark flag, may be 0.
5126 * Flow rule attributes.
5128 * Pattern specification (list terminated by the END pattern item).
5129 * @param[in] actions
5130 * Associated actions (list terminated by the END action).
5131 * @param[in] external
5132 * This flow rule is created by request external to PMD.
5133 * @param[in] flow_idx
5134 * This memory pool index to the flow.
5136 * Perform verbose error reporting if not NULL.
5138 * 0 on success, negative value otherwise
5141 flow_create_split_meter(struct rte_eth_dev *dev,
5142 struct rte_flow *flow,
5143 uint64_t prefix_layers,
5144 uint32_t prefix_mark,
5145 const struct rte_flow_attr *attr,
5146 const struct rte_flow_item items[],
5147 const struct rte_flow_action actions[],
5148 bool external, uint32_t flow_idx,
5149 struct rte_flow_error *error)
5151 struct mlx5_priv *priv = dev->data->dev_private;
5152 struct rte_flow_action *sfx_actions = NULL;
5153 struct rte_flow_action *pre_actions = NULL;
5154 struct rte_flow_item *sfx_items = NULL;
5155 struct mlx5_flow *dev_flow = NULL;
5156 struct rte_flow_attr sfx_attr = *attr;
5158 uint32_t mtr_tag_id = 0;
5165 actions_n = flow_check_meter_action(actions, &mtr);
5167 /* The five prefix actions: meter, decap, encap, tag, end. */
5168 act_size = sizeof(struct rte_flow_action) * (actions_n + 5) +
5169 sizeof(struct mlx5_rte_flow_action_set_tag);
5170 /* tag, vlan, port id, end. */
5171 #define METER_SUFFIX_ITEM 4
5172 item_size = sizeof(struct rte_flow_item) * METER_SUFFIX_ITEM +
5173 sizeof(struct mlx5_rte_flow_item_tag) * 2;
5174 sfx_actions = mlx5_malloc(MLX5_MEM_ZERO, (act_size + item_size),
5177 return rte_flow_error_set(error, ENOMEM,
5178 RTE_FLOW_ERROR_TYPE_ACTION,
5179 NULL, "no memory to split "
5181 sfx_items = (struct rte_flow_item *)((char *)sfx_actions +
5183 pre_actions = sfx_actions + actions_n;
5184 mtr_tag_id = flow_meter_split_prep(dev, items, sfx_items,
5185 actions, sfx_actions,
5191 /* Add the prefix subflow. */
5192 ret = flow_create_split_inner(dev, flow, &dev_flow,
5195 pre_actions, external,
5201 dev_flow->handle->split_flow_id = mtr_tag_id;
5202 /* Setting the sfx group atrr. */
5203 sfx_attr.group = sfx_attr.transfer ?
5204 (MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) :
5205 MLX5_FLOW_TABLE_LEVEL_SUFFIX;
5207 /* Add the prefix subflow. */
5208 ret = flow_create_split_metadata(dev, flow, dev_flow ?
5209 flow_get_prefix_layer_flags(dev_flow) :
5210 prefix_layers, dev_flow ?
5211 dev_flow->handle->mark : prefix_mark,
5212 &sfx_attr, sfx_items ?
5214 sfx_actions ? sfx_actions : actions,
5215 external, flow_idx, error);
5218 mlx5_free(sfx_actions);
5223 * The splitting for sample feature.
5225 * Once Sample action is detected in the action list, the flow actions should
5226 * be split into prefix sub flow and suffix sub flow.
5228 * The original items remain in the prefix sub flow, all actions preceding the
5229 * sample action and the sample action itself will be copied to the prefix
5230 * sub flow, the actions following the sample action will be copied to the
5231 * suffix sub flow, Queue action always be located in the suffix sub flow.
5233 * In order to make the packet from prefix sub flow matches with suffix sub
5234 * flow, an extra tag action be added into prefix sub flow, and the suffix sub
5235 * flow uses tag item with the unique flow id.
5238 * Pointer to Ethernet device.
5240 * Parent flow structure pointer.
5242 * Flow rule attributes.
5244 * Pattern specification (list terminated by the END pattern item).
5245 * @param[in] actions
5246 * Associated actions (list terminated by the END action).
5247 * @param[in] external
5248 * This flow rule is created by request external to PMD.
5249 * @param[in] flow_idx
5250 * This memory pool index to the flow.
5252 * Perform verbose error reporting if not NULL.
5254 * 0 on success, negative value otherwise
5257 flow_create_split_sample(struct rte_eth_dev *dev,
5258 struct rte_flow *flow,
5259 const struct rte_flow_attr *attr,
5260 const struct rte_flow_item items[],
5261 const struct rte_flow_action actions[],
5262 bool external, uint32_t flow_idx,
5263 struct rte_flow_error *error)
5265 struct mlx5_priv *priv = dev->data->dev_private;
5266 struct rte_flow_action *sfx_actions = NULL;
5267 struct rte_flow_action *pre_actions = NULL;
5268 struct rte_flow_item *sfx_items = NULL;
5269 struct mlx5_flow *dev_flow = NULL;
5270 struct rte_flow_attr sfx_attr = *attr;
5271 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
5272 struct mlx5_flow_dv_sample_resource *sample_res;
5273 struct mlx5_flow_tbl_data_entry *sfx_tbl_data;
5274 struct mlx5_flow_tbl_resource *sfx_tbl;
5275 union mlx5_flow_tbl_key sfx_table_key;
5279 uint32_t fdb_tx = 0;
5282 int sample_action_pos;
5283 int qrss_action_pos;
5286 if (priv->sampler_en)
5287 actions_n = flow_check_match_action(actions, attr,
5288 RTE_FLOW_ACTION_TYPE_SAMPLE,
5289 &sample_action_pos, &qrss_action_pos);
5291 /* The prefix actions must includes sample, tag, end. */
5292 act_size = sizeof(struct rte_flow_action) * (actions_n * 2 + 1)
5293 + sizeof(struct mlx5_rte_flow_action_set_tag);
5294 item_size = sizeof(struct rte_flow_item) * SAMPLE_SUFFIX_ITEM +
5295 sizeof(struct mlx5_rte_flow_item_tag) * 2;
5296 sfx_actions = mlx5_malloc(MLX5_MEM_ZERO, (act_size +
5297 item_size), 0, SOCKET_ID_ANY);
5299 return rte_flow_error_set(error, ENOMEM,
5300 RTE_FLOW_ERROR_TYPE_ACTION,
5301 NULL, "no memory to split "
5303 /* The representor_id is -1 for uplink. */
5304 fdb_tx = (attr->transfer && priv->representor_id != -1);
5306 sfx_items = (struct rte_flow_item *)((char *)sfx_actions
5308 pre_actions = sfx_actions + actions_n;
5309 tag_id = flow_sample_split_prep(dev, fdb_tx, sfx_items,
5310 actions, sfx_actions,
5311 pre_actions, actions_n,
5313 qrss_action_pos, error);
5314 if (tag_id < 0 || (!fdb_tx && !tag_id)) {
5318 /* Add the prefix subflow. */
5319 ret = flow_create_split_inner(dev, flow, &dev_flow, 0, 0, attr,
5320 items, pre_actions, external,
5326 dev_flow->handle->split_flow_id = tag_id;
5327 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
5328 /* Set the sfx group attr. */
5329 sample_res = (struct mlx5_flow_dv_sample_resource *)
5330 dev_flow->dv.sample_res;
5331 sfx_tbl = (struct mlx5_flow_tbl_resource *)
5332 sample_res->normal_path_tbl;
5333 sfx_tbl_data = container_of(sfx_tbl,
5334 struct mlx5_flow_tbl_data_entry, tbl);
5335 sfx_table_key.v64 = sfx_tbl_data->entry.key;
5336 sfx_attr.group = sfx_attr.transfer ?
5337 (sfx_table_key.table_id - 1) :
5338 sfx_table_key.table_id;
5341 /* Add the suffix subflow. */
5342 ret = flow_create_split_meter(dev, flow, dev_flow ?
5343 flow_get_prefix_layer_flags(dev_flow) : 0,
5344 dev_flow ? dev_flow->handle->mark : 0,
5345 &sfx_attr, sfx_items ? sfx_items : items,
5346 sfx_actions ? sfx_actions : actions,
5347 external, flow_idx, error);
5350 mlx5_free(sfx_actions);
5355 * Split the flow to subflow set. The splitters might be linked
5356 * in the chain, like this:
5357 * flow_create_split_outer() calls:
5358 * flow_create_split_meter() calls:
5359 * flow_create_split_metadata(meter_subflow_0) calls:
5360 * flow_create_split_inner(metadata_subflow_0)
5361 * flow_create_split_inner(metadata_subflow_1)
5362 * flow_create_split_inner(metadata_subflow_2)
5363 * flow_create_split_metadata(meter_subflow_1) calls:
5364 * flow_create_split_inner(metadata_subflow_0)
5365 * flow_create_split_inner(metadata_subflow_1)
5366 * flow_create_split_inner(metadata_subflow_2)
5368 * This provide flexible way to add new levels of flow splitting.
5369 * The all of successfully created subflows are included to the
5370 * parent flow dev_flow list.
5373 * Pointer to Ethernet device.
5375 * Parent flow structure pointer.
5377 * Flow rule attributes.
5379 * Pattern specification (list terminated by the END pattern item).
5380 * @param[in] actions
5381 * Associated actions (list terminated by the END action).
5382 * @param[in] external
5383 * This flow rule is created by request external to PMD.
5384 * @param[in] flow_idx
5385 * This memory pool index to the flow.
5387 * Perform verbose error reporting if not NULL.
5389 * 0 on success, negative value otherwise
5392 flow_create_split_outer(struct rte_eth_dev *dev,
5393 struct rte_flow *flow,
5394 const struct rte_flow_attr *attr,
5395 const struct rte_flow_item items[],
5396 const struct rte_flow_action actions[],
5397 bool external, uint32_t flow_idx,
5398 struct rte_flow_error *error)
5402 ret = flow_create_split_sample(dev, flow, attr, items,
5403 actions, external, flow_idx, error);
5404 MLX5_ASSERT(ret <= 0);
5408 static struct mlx5_flow_tunnel *
5409 flow_tunnel_from_rule(struct rte_eth_dev *dev,
5410 const struct rte_flow_attr *attr,
5411 const struct rte_flow_item items[],
5412 const struct rte_flow_action actions[])
5414 struct mlx5_flow_tunnel *tunnel;
5416 #pragma GCC diagnostic push
5417 #pragma GCC diagnostic ignored "-Wcast-qual"
5418 if (is_flow_tunnel_match_rule(dev, attr, items, actions))
5419 tunnel = (struct mlx5_flow_tunnel *)items[0].spec;
5420 else if (is_flow_tunnel_steer_rule(dev, attr, items, actions))
5421 tunnel = (struct mlx5_flow_tunnel *)actions[0].conf;
5424 #pragma GCC diagnostic pop
5430 * Adjust flow RSS workspace if needed.
5433 * Pointer to thread flow work space.
5435 * Pointer to RSS descriptor.
5436 * @param[in] nrssq_num
5437 * New RSS queue number.
5440 * 0 on success, -1 otherwise and rte_errno is set.
5443 flow_rss_workspace_adjust(struct mlx5_flow_workspace *wks,
5444 struct mlx5_flow_rss_desc *rss_desc,
5447 bool fidx = !!wks->flow_idx;
5449 if (likely(nrssq_num <= wks->rssq_num[fidx]))
5451 rss_desc->queue = realloc(rss_desc->queue,
5452 sizeof(rss_desc->queue[0]) * RTE_ALIGN(nrssq_num, 2));
5453 if (!rss_desc->queue) {
5457 wks->rssq_num[fidx] = RTE_ALIGN(nrssq_num, 2);
5462 * Create a flow and add it to @p list.
5465 * Pointer to Ethernet device.
5467 * Pointer to a TAILQ flow list. If this parameter NULL,
5468 * no list insertion occurred, flow is just created,
5469 * this is caller's responsibility to track the
5472 * Flow rule attributes.
5474 * Pattern specification (list terminated by the END pattern item).
5475 * @param[in] actions
5476 * Associated actions (list terminated by the END action).
5477 * @param[in] external
5478 * This flow rule is created by request external to PMD.
5480 * Perform verbose error reporting if not NULL.
5483 * A flow index on success, 0 otherwise and rte_errno is set.
5486 flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
5487 const struct rte_flow_attr *attr,
5488 const struct rte_flow_item items[],
5489 const struct rte_flow_action original_actions[],
5490 bool external, struct rte_flow_error *error)
5492 struct mlx5_priv *priv = dev->data->dev_private;
5493 struct rte_flow *flow = NULL;
5494 struct mlx5_flow *dev_flow;
5495 const struct rte_flow_action_rss *rss;
5496 struct mlx5_translated_shared_action
5497 shared_actions[MLX5_MAX_SHARED_ACTIONS];
5498 int shared_actions_n = MLX5_MAX_SHARED_ACTIONS;
5500 struct mlx5_flow_expand_rss buf;
5501 uint8_t buffer[2048];
5504 struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS];
5505 uint8_t buffer[2048];
5508 struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS];
5509 uint8_t buffer[2048];
5510 } actions_hairpin_tx;
5512 struct rte_flow_item items[MLX5_MAX_SPLIT_ITEMS];
5513 uint8_t buffer[2048];
5515 struct mlx5_flow_expand_rss *buf = &expand_buffer.buf;
5516 struct mlx5_flow_rss_desc *rss_desc;
5517 const struct rte_flow_action *p_actions_rx;
5521 struct rte_flow_attr attr_tx = { .priority = 0 };
5522 struct rte_flow_attr attr_factor = {0};
5523 const struct rte_flow_action *actions;
5524 struct rte_flow_action *translated_actions = NULL;
5525 struct mlx5_flow_tunnel *tunnel;
5526 struct tunnel_default_miss_ctx default_miss_ctx = { 0, };
5527 struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
5528 bool fidx = !!wks->flow_idx;
5532 rss_desc = &wks->rss_desc[fidx];
5533 ret = flow_shared_actions_translate(original_actions,
5536 &translated_actions, error);
5538 MLX5_ASSERT(translated_actions == NULL);
5541 actions = translated_actions ? translated_actions : original_actions;
5542 memcpy((void *)&attr_factor, (const void *)attr, sizeof(*attr));
5543 p_actions_rx = actions;
5544 hairpin_flow = flow_check_hairpin_split(dev, &attr_factor, actions);
5545 ret = flow_drv_validate(dev, &attr_factor, items, p_actions_rx,
5546 external, hairpin_flow, error);
5548 goto error_before_hairpin_split;
5549 flow = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], &idx);
5552 goto error_before_hairpin_split;
5554 if (hairpin_flow > 0) {
5555 if (hairpin_flow > MLX5_MAX_SPLIT_ACTIONS) {
5557 goto error_before_hairpin_split;
5559 flow_hairpin_split(dev, actions, actions_rx.actions,
5560 actions_hairpin_tx.actions, items_tx.items,
5562 p_actions_rx = actions_rx.actions;
5564 flow->drv_type = flow_get_drv_type(dev, &attr_factor);
5565 MLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN &&
5566 flow->drv_type < MLX5_FLOW_TYPE_MAX);
5567 memset(rss_desc, 0, offsetof(struct mlx5_flow_rss_desc, queue));
5568 rss = flow_get_rss_action(p_actions_rx);
5570 if (flow_rss_workspace_adjust(wks, rss_desc, rss->queue_num))
5573 * The following information is required by
5574 * mlx5_flow_hashfields_adjust() in advance.
5576 rss_desc->level = rss->level;
5577 /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
5578 rss_desc->types = !rss->types ? ETH_RSS_IP : rss->types;
5580 flow->dev_handles = 0;
5581 if (rss && rss->types) {
5582 unsigned int graph_root;
5584 graph_root = find_graph_root(items, rss->level);
5585 ret = mlx5_flow_expand_rss(buf, sizeof(expand_buffer.buffer),
5587 mlx5_support_expansion, graph_root);
5588 MLX5_ASSERT(ret > 0 &&
5589 (unsigned int)ret < sizeof(expand_buffer.buffer));
5592 buf->entry[0].pattern = (void *)(uintptr_t)items;
5594 flow->shared_rss = flow_get_shared_rss_action(shared_actions,
5597 * Record the start index when there is a nested call. All sub-flows
5598 * need to be translated before another calling.
5599 * No need to use ping-pong buffer to save memory here.
5602 MLX5_ASSERT(!wks->flow_nested_idx);
5603 wks->flow_nested_idx = fidx;
5605 for (i = 0; i < buf->entries; ++i) {
5607 * The splitter may create multiple dev_flows,
5608 * depending on configuration. In the simplest
5609 * case it just creates unmodified original flow.
5611 ret = flow_create_split_outer(dev, flow, &attr_factor,
5612 buf->entry[i].pattern,
5613 p_actions_rx, external, idx,
5617 if (is_flow_tunnel_steer_rule(dev, attr,
5618 buf->entry[i].pattern,
5620 ret = flow_tunnel_add_default_miss(dev, flow, attr,
5626 mlx5_free(default_miss_ctx.queue);
5631 /* Create the tx flow. */
5633 attr_tx.group = MLX5_HAIRPIN_TX_TABLE;
5634 attr_tx.ingress = 0;
5636 dev_flow = flow_drv_prepare(dev, flow, &attr_tx, items_tx.items,
5637 actions_hairpin_tx.actions,
5641 dev_flow->flow = flow;
5642 dev_flow->external = 0;
5643 SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
5644 dev_flow->handle, next);
5645 ret = flow_drv_translate(dev, dev_flow, &attr_tx,
5647 actions_hairpin_tx.actions, error);
5652 * Update the metadata register copy table. If extensive
5653 * metadata feature is enabled and registers are supported
5654 * we might create the extra rte_flow for each unique
5655 * MARK/FLAG action ID.
5657 * The table is updated for ingress Flows only, because
5658 * the egress Flows belong to the different device and
5659 * copy table should be updated in peer NIC Rx domain.
5661 if (attr_factor.ingress &&
5662 (external || attr_factor.group != MLX5_FLOW_MREG_CP_TABLE_GROUP)) {
5663 ret = flow_mreg_update_copy_table(dev, flow, actions, error);
5668 * If the flow is external (from application) OR device is started, then
5669 * the flow will be applied immediately.
5671 if (external || dev->data->dev_started) {
5672 ret = flow_drv_apply(dev, flow, error);
5677 rte_spinlock_lock(&priv->flow_list_lock);
5678 ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list, idx,
5680 rte_spinlock_unlock(&priv->flow_list_lock);
5682 flow_rxq_flags_set(dev, flow);
5683 rte_free(translated_actions);
5684 /* Nested flow creation index recovery. */
5685 wks->flow_idx = wks->flow_nested_idx;
5686 if (wks->flow_nested_idx)
5687 wks->flow_nested_idx = 0;
5688 tunnel = flow_tunnel_from_rule(dev, attr, items, actions);
5691 flow->tunnel_id = tunnel->tunnel_id;
5692 __atomic_add_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED);
5693 mlx5_free(default_miss_ctx.queue);
5698 ret = rte_errno; /* Save rte_errno before cleanup. */
5699 flow_mreg_del_copy_action(dev, flow);
5700 flow_drv_destroy(dev, flow);
5701 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], idx);
5702 rte_errno = ret; /* Restore rte_errno. */
5705 wks->flow_idx = wks->flow_nested_idx;
5706 if (wks->flow_nested_idx)
5707 wks->flow_nested_idx = 0;
5708 error_before_hairpin_split:
5709 rte_free(translated_actions);
5714 * Create a dedicated flow rule on e-switch table 0 (root table), to direct all
5715 * incoming packets to table 1.
5717 * Other flow rules, requested for group n, will be created in
5718 * e-switch table n+1.
5719 * Jump action to e-switch group n will be created to group n+1.
5721 * Used when working in switchdev mode, to utilise advantages of table 1
5725 * Pointer to Ethernet device.
5728 * Pointer to flow on success, NULL otherwise and rte_errno is set.
5731 mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev)
5733 const struct rte_flow_attr attr = {
5740 const struct rte_flow_item pattern = {
5741 .type = RTE_FLOW_ITEM_TYPE_END,
5743 struct rte_flow_action_jump jump = {
5746 const struct rte_flow_action actions[] = {
5748 .type = RTE_FLOW_ACTION_TYPE_JUMP,
5752 .type = RTE_FLOW_ACTION_TYPE_END,
5755 struct mlx5_priv *priv = dev->data->dev_private;
5756 struct rte_flow_error error;
5758 return (void *)(uintptr_t)flow_list_create(dev, &priv->ctrl_flows,
5760 actions, false, &error);
5764 * Validate a flow supported by the NIC.
5766 * @see rte_flow_validate()
5770 mlx5_flow_validate(struct rte_eth_dev *dev,
5771 const struct rte_flow_attr *attr,
5772 const struct rte_flow_item items[],
5773 const struct rte_flow_action original_actions[],
5774 struct rte_flow_error *error)
5777 struct mlx5_translated_shared_action
5778 shared_actions[MLX5_MAX_SHARED_ACTIONS];
5779 int shared_actions_n = MLX5_MAX_SHARED_ACTIONS;
5780 const struct rte_flow_action *actions;
5781 struct rte_flow_action *translated_actions = NULL;
5782 int ret = flow_shared_actions_translate(original_actions,
5785 &translated_actions, error);
5789 actions = translated_actions ? translated_actions : original_actions;
5790 hairpin_flow = flow_check_hairpin_split(dev, attr, actions);
5791 ret = flow_drv_validate(dev, attr, items, actions,
5792 true, hairpin_flow, error);
5793 rte_free(translated_actions);
5800 * @see rte_flow_create()
5804 mlx5_flow_create(struct rte_eth_dev *dev,
5805 const struct rte_flow_attr *attr,
5806 const struct rte_flow_item items[],
5807 const struct rte_flow_action actions[],
5808 struct rte_flow_error *error)
5810 struct mlx5_priv *priv = dev->data->dev_private;
5813 * If the device is not started yet, it is not allowed to created a
5814 * flow from application. PMD default flows and traffic control flows
5817 if (unlikely(!dev->data->dev_started)) {
5818 DRV_LOG(DEBUG, "port %u is not started when "
5819 "inserting a flow", dev->data->port_id);
5820 rte_flow_error_set(error, ENODEV,
5821 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5823 "port not started");
5827 return (void *)(uintptr_t)flow_list_create(dev, &priv->flows,
5828 attr, items, actions, true, error);
5832 * Destroy a flow in a list.
5835 * Pointer to Ethernet device.
5837 * Pointer to the Indexed flow list. If this parameter NULL,
5838 * there is no flow removal from the list. Be noted that as
5839 * flow is add to the indexed list, memory of the indexed
5840 * list points to maybe changed as flow destroyed.
5841 * @param[in] flow_idx
5842 * Index of flow to destroy.
5845 flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,
5848 struct mlx5_priv *priv = dev->data->dev_private;
5849 struct mlx5_fdir_flow *priv_fdir_flow = NULL;
5850 struct rte_flow *flow = mlx5_ipool_get(priv->sh->ipool
5851 [MLX5_IPOOL_RTE_FLOW], flow_idx);
5856 * Update RX queue flags only if port is started, otherwise it is
5859 if (dev->data->dev_started)
5860 flow_rxq_flags_trim(dev, flow);
5861 flow_drv_destroy(dev, flow);
5863 rte_spinlock_lock(&priv->flow_list_lock);
5864 ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list,
5865 flow_idx, flow, next);
5866 rte_spinlock_unlock(&priv->flow_list_lock);
5868 flow_mreg_del_copy_action(dev, flow);
5870 LIST_FOREACH(priv_fdir_flow, &priv->fdir_flows, next) {
5871 if (priv_fdir_flow->rix_flow == flow_idx)
5874 if (priv_fdir_flow) {
5875 LIST_REMOVE(priv_fdir_flow, next);
5876 mlx5_free(priv_fdir_flow->fdir);
5877 mlx5_free(priv_fdir_flow);
5880 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], flow_idx);
5882 struct mlx5_flow_tunnel *tunnel;
5883 tunnel = mlx5_find_tunnel_id(dev, flow->tunnel_id);
5885 if (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED))
5886 mlx5_flow_tunnel_free(dev, tunnel);
5891 * Destroy all flows.
5894 * Pointer to Ethernet device.
5896 * Pointer to the Indexed flow list.
5898 * If flushing is called avtively.
5901 mlx5_flow_list_flush(struct rte_eth_dev *dev, uint32_t *list, bool active)
5903 uint32_t num_flushed = 0;
5906 flow_list_destroy(dev, list, *list);
5910 DRV_LOG(INFO, "port %u: %u flows flushed before stopping",
5911 dev->data->port_id, num_flushed);
5916 * Stop all default actions for flows.
5919 * Pointer to Ethernet device.
5922 mlx5_flow_stop_default(struct rte_eth_dev *dev)
5924 flow_mreg_del_default_copy_action(dev);
5925 flow_rxq_flags_clear(dev);
5929 * Start all default actions for flows.
5932 * Pointer to Ethernet device.
5934 * 0 on success, a negative errno value otherwise and rte_errno is set.
5937 mlx5_flow_start_default(struct rte_eth_dev *dev)
5939 struct rte_flow_error error;
5941 /* Make sure default copy action (reg_c[0] -> reg_b) is created. */
5942 return flow_mreg_add_default_copy_action(dev, &error);
5946 * Release key of thread specific flow workspace data.
5949 flow_release_workspace(void *data)
5951 struct mlx5_flow_workspace *wks = data;
5955 free(wks->rss_desc[0].queue);
5956 free(wks->rss_desc[1].queue);
5961 * Initialize key of thread specific flow workspace data.
5964 flow_alloc_workspace(void)
5966 if (pthread_key_create(&key_workspace, flow_release_workspace))
5967 DRV_LOG(ERR, "Can't create flow workspace data thread key.");
5971 * Get thread specific flow workspace.
5973 * @return pointer to thread specific flowworkspace data, NULL on error.
5975 struct mlx5_flow_workspace*
5976 mlx5_flow_get_thread_workspace(void)
5978 struct mlx5_flow_workspace *data;
5980 if (pthread_once(&key_workspace_init, flow_alloc_workspace)) {
5981 DRV_LOG(ERR, "Failed to init flow workspace data thread key.");
5984 data = pthread_getspecific(key_workspace);
5986 data = calloc(1, sizeof(*data));
5988 DRV_LOG(ERR, "Failed to allocate flow workspace "
5992 data->rss_desc[0].queue = calloc(1,
5993 sizeof(uint16_t) * MLX5_RSSQ_DEFAULT_NUM);
5994 if (!data->rss_desc[0].queue)
5996 data->rss_desc[1].queue = calloc(1,
5997 sizeof(uint16_t) * MLX5_RSSQ_DEFAULT_NUM);
5998 if (!data->rss_desc[1].queue)
6000 data->rssq_num[0] = MLX5_RSSQ_DEFAULT_NUM;
6001 data->rssq_num[1] = MLX5_RSSQ_DEFAULT_NUM;
6002 if (pthread_setspecific(key_workspace, data)) {
6003 DRV_LOG(ERR, "Failed to set flow workspace to thread.");
6009 if (data->rss_desc[0].queue)
6010 free(data->rss_desc[0].queue);
6011 if (data->rss_desc[1].queue)
6012 free(data->rss_desc[1].queue);
6018 * Verify the flow list is empty
6021 * Pointer to Ethernet device.
6023 * @return the number of flows not released.
6026 mlx5_flow_verify(struct rte_eth_dev *dev)
6028 struct mlx5_priv *priv = dev->data->dev_private;
6029 struct rte_flow *flow;
6033 ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], priv->flows, idx,
6035 DRV_LOG(DEBUG, "port %u flow %p still referenced",
6036 dev->data->port_id, (void *)flow);
6043 * Enable default hairpin egress flow.
6046 * Pointer to Ethernet device.
6051 * 0 on success, a negative errno value otherwise and rte_errno is set.
6054 mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev,
6057 struct mlx5_priv *priv = dev->data->dev_private;
6058 const struct rte_flow_attr attr = {
6062 struct mlx5_rte_flow_item_tx_queue queue_spec = {
6065 struct mlx5_rte_flow_item_tx_queue queue_mask = {
6066 .queue = UINT32_MAX,
6068 struct rte_flow_item items[] = {
6070 .type = (enum rte_flow_item_type)
6071 MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE,
6072 .spec = &queue_spec,
6074 .mask = &queue_mask,
6077 .type = RTE_FLOW_ITEM_TYPE_END,
6080 struct rte_flow_action_jump jump = {
6081 .group = MLX5_HAIRPIN_TX_TABLE,
6083 struct rte_flow_action actions[2];
6085 struct rte_flow_error error;
6087 actions[0].type = RTE_FLOW_ACTION_TYPE_JUMP;
6088 actions[0].conf = &jump;
6089 actions[1].type = RTE_FLOW_ACTION_TYPE_END;
6090 flow_idx = flow_list_create(dev, &priv->ctrl_flows,
6091 &attr, items, actions, false, &error);
6094 "Failed to create ctrl flow: rte_errno(%d),"
6095 " type(%d), message(%s)",
6096 rte_errno, error.type,
6097 error.message ? error.message : " (no stated reason)");
6104 * Enable a control flow configured from the control plane.
6107 * Pointer to Ethernet device.
6109 * An Ethernet flow spec to apply.
6111 * An Ethernet flow mask to apply.
6113 * A VLAN flow spec to apply.
6115 * A VLAN flow mask to apply.
6118 * 0 on success, a negative errno value otherwise and rte_errno is set.
6121 mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
6122 struct rte_flow_item_eth *eth_spec,
6123 struct rte_flow_item_eth *eth_mask,
6124 struct rte_flow_item_vlan *vlan_spec,
6125 struct rte_flow_item_vlan *vlan_mask)
6127 struct mlx5_priv *priv = dev->data->dev_private;
6128 const struct rte_flow_attr attr = {
6130 .priority = MLX5_FLOW_PRIO_RSVD,
6132 struct rte_flow_item items[] = {
6134 .type = RTE_FLOW_ITEM_TYPE_ETH,
6140 .type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN :
6141 RTE_FLOW_ITEM_TYPE_END,
6147 .type = RTE_FLOW_ITEM_TYPE_END,
6150 uint16_t queue[priv->reta_idx_n];
6151 struct rte_flow_action_rss action_rss = {
6152 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
6154 .types = priv->rss_conf.rss_hf,
6155 .key_len = priv->rss_conf.rss_key_len,
6156 .queue_num = priv->reta_idx_n,
6157 .key = priv->rss_conf.rss_key,
6160 struct rte_flow_action actions[] = {
6162 .type = RTE_FLOW_ACTION_TYPE_RSS,
6163 .conf = &action_rss,
6166 .type = RTE_FLOW_ACTION_TYPE_END,
6170 struct rte_flow_error error;
6173 if (!priv->reta_idx_n || !priv->rxqs_n) {
6176 if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
6177 action_rss.types = 0;
6178 for (i = 0; i != priv->reta_idx_n; ++i)
6179 queue[i] = (*priv->reta_idx)[i];
6180 flow_idx = flow_list_create(dev, &priv->ctrl_flows,
6181 &attr, items, actions, false, &error);
6188 * Enable a flow control configured from the control plane.
6191 * Pointer to Ethernet device.
6193 * An Ethernet flow spec to apply.
6195 * An Ethernet flow mask to apply.
6198 * 0 on success, a negative errno value otherwise and rte_errno is set.
6201 mlx5_ctrl_flow(struct rte_eth_dev *dev,
6202 struct rte_flow_item_eth *eth_spec,
6203 struct rte_flow_item_eth *eth_mask)
6205 return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL);
6209 * Create default miss flow rule matching lacp traffic
6212 * Pointer to Ethernet device.
6214 * An Ethernet flow spec to apply.
6217 * 0 on success, a negative errno value otherwise and rte_errno is set.
6220 mlx5_flow_lacp_miss(struct rte_eth_dev *dev)
6222 struct mlx5_priv *priv = dev->data->dev_private;
6224 * The LACP matching is done by only using ether type since using
6225 * a multicast dst mac causes kernel to give low priority to this flow.
6227 static const struct rte_flow_item_eth lacp_spec = {
6228 .type = RTE_BE16(0x8809),
6230 static const struct rte_flow_item_eth lacp_mask = {
6233 const struct rte_flow_attr attr = {
6236 struct rte_flow_item items[] = {
6238 .type = RTE_FLOW_ITEM_TYPE_ETH,
6243 .type = RTE_FLOW_ITEM_TYPE_END,
6246 struct rte_flow_action actions[] = {
6248 .type = (enum rte_flow_action_type)
6249 MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS,
6252 .type = RTE_FLOW_ACTION_TYPE_END,
6255 struct rte_flow_error error;
6256 uint32_t flow_idx = flow_list_create(dev, &priv->ctrl_flows,
6257 &attr, items, actions, false, &error);
6267 * @see rte_flow_destroy()
6271 mlx5_flow_destroy(struct rte_eth_dev *dev,
6272 struct rte_flow *flow,
6273 struct rte_flow_error *error __rte_unused)
6275 struct mlx5_priv *priv = dev->data->dev_private;
6277 flow_list_destroy(dev, &priv->flows, (uintptr_t)(void *)flow);
6282 * Destroy all flows.
6284 * @see rte_flow_flush()
6288 mlx5_flow_flush(struct rte_eth_dev *dev,
6289 struct rte_flow_error *error __rte_unused)
6291 struct mlx5_priv *priv = dev->data->dev_private;
6293 mlx5_flow_list_flush(dev, &priv->flows, false);
6300 * @see rte_flow_isolate()
6304 mlx5_flow_isolate(struct rte_eth_dev *dev,
6306 struct rte_flow_error *error)
6308 struct mlx5_priv *priv = dev->data->dev_private;
6310 if (dev->data->dev_started) {
6311 rte_flow_error_set(error, EBUSY,
6312 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6314 "port must be stopped first");
6317 priv->isolated = !!enable;
6319 dev->dev_ops = &mlx5_os_dev_ops_isolate;
6321 dev->dev_ops = &mlx5_os_dev_ops;
6323 dev->rx_descriptor_status = mlx5_rx_descriptor_status;
6324 dev->tx_descriptor_status = mlx5_tx_descriptor_status;
6332 * @see rte_flow_query()
6336 flow_drv_query(struct rte_eth_dev *dev,
6338 const struct rte_flow_action *actions,
6340 struct rte_flow_error *error)
6342 struct mlx5_priv *priv = dev->data->dev_private;
6343 const struct mlx5_flow_driver_ops *fops;
6344 struct rte_flow *flow = mlx5_ipool_get(priv->sh->ipool
6345 [MLX5_IPOOL_RTE_FLOW],
6347 enum mlx5_flow_drv_type ftype;
6350 return rte_flow_error_set(error, ENOENT,
6351 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6353 "invalid flow handle");
6355 ftype = flow->drv_type;
6356 MLX5_ASSERT(ftype > MLX5_FLOW_TYPE_MIN && ftype < MLX5_FLOW_TYPE_MAX);
6357 fops = flow_get_drv_ops(ftype);
6359 return fops->query(dev, flow, actions, data, error);
6365 * @see rte_flow_query()
6369 mlx5_flow_query(struct rte_eth_dev *dev,
6370 struct rte_flow *flow,
6371 const struct rte_flow_action *actions,
6373 struct rte_flow_error *error)
6377 ret = flow_drv_query(dev, (uintptr_t)(void *)flow, actions, data,
6385 * Convert a flow director filter to a generic flow.
6388 * Pointer to Ethernet device.
6389 * @param fdir_filter
6390 * Flow director filter to add.
6392 * Generic flow parameters structure.
6395 * 0 on success, a negative errno value otherwise and rte_errno is set.
6398 flow_fdir_filter_convert(struct rte_eth_dev *dev,
6399 const struct rte_eth_fdir_filter *fdir_filter,
6400 struct mlx5_fdir *attributes)
6402 struct mlx5_priv *priv = dev->data->dev_private;
6403 const struct rte_eth_fdir_input *input = &fdir_filter->input;
6404 const struct rte_eth_fdir_masks *mask =
6405 &dev->data->dev_conf.fdir_conf.mask;
6407 /* Validate queue number. */
6408 if (fdir_filter->action.rx_queue >= priv->rxqs_n) {
6409 DRV_LOG(ERR, "port %u invalid queue number %d",
6410 dev->data->port_id, fdir_filter->action.rx_queue);
6414 attributes->attr.ingress = 1;
6415 attributes->items[0] = (struct rte_flow_item) {
6416 .type = RTE_FLOW_ITEM_TYPE_ETH,
6417 .spec = &attributes->l2,
6418 .mask = &attributes->l2_mask,
6420 switch (fdir_filter->action.behavior) {
6421 case RTE_ETH_FDIR_ACCEPT:
6422 attributes->actions[0] = (struct rte_flow_action){
6423 .type = RTE_FLOW_ACTION_TYPE_QUEUE,
6424 .conf = &attributes->queue,
6427 case RTE_ETH_FDIR_REJECT:
6428 attributes->actions[0] = (struct rte_flow_action){
6429 .type = RTE_FLOW_ACTION_TYPE_DROP,
6433 DRV_LOG(ERR, "port %u invalid behavior %d",
6435 fdir_filter->action.behavior);
6436 rte_errno = ENOTSUP;
6439 attributes->queue.index = fdir_filter->action.rx_queue;
6441 switch (fdir_filter->input.flow_type) {
6442 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
6443 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
6444 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
6445 attributes->l3.ipv4.hdr = (struct rte_ipv4_hdr){
6446 .src_addr = input->flow.ip4_flow.src_ip,
6447 .dst_addr = input->flow.ip4_flow.dst_ip,
6448 .time_to_live = input->flow.ip4_flow.ttl,
6449 .type_of_service = input->flow.ip4_flow.tos,
6451 attributes->l3_mask.ipv4.hdr = (struct rte_ipv4_hdr){
6452 .src_addr = mask->ipv4_mask.src_ip,
6453 .dst_addr = mask->ipv4_mask.dst_ip,
6454 .time_to_live = mask->ipv4_mask.ttl,
6455 .type_of_service = mask->ipv4_mask.tos,
6456 .next_proto_id = mask->ipv4_mask.proto,
6458 attributes->items[1] = (struct rte_flow_item){
6459 .type = RTE_FLOW_ITEM_TYPE_IPV4,
6460 .spec = &attributes->l3,
6461 .mask = &attributes->l3_mask,
6464 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
6465 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
6466 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
6467 attributes->l3.ipv6.hdr = (struct rte_ipv6_hdr){
6468 .hop_limits = input->flow.ipv6_flow.hop_limits,
6469 .proto = input->flow.ipv6_flow.proto,
6472 memcpy(attributes->l3.ipv6.hdr.src_addr,
6473 input->flow.ipv6_flow.src_ip,
6474 RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
6475 memcpy(attributes->l3.ipv6.hdr.dst_addr,
6476 input->flow.ipv6_flow.dst_ip,
6477 RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
6478 memcpy(attributes->l3_mask.ipv6.hdr.src_addr,
6479 mask->ipv6_mask.src_ip,
6480 RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr));
6481 memcpy(attributes->l3_mask.ipv6.hdr.dst_addr,
6482 mask->ipv6_mask.dst_ip,
6483 RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr));
6484 attributes->items[1] = (struct rte_flow_item){
6485 .type = RTE_FLOW_ITEM_TYPE_IPV6,
6486 .spec = &attributes->l3,
6487 .mask = &attributes->l3_mask,
6491 DRV_LOG(ERR, "port %u invalid flow type%d",
6492 dev->data->port_id, fdir_filter->input.flow_type);
6493 rte_errno = ENOTSUP;
6497 switch (fdir_filter->input.flow_type) {
6498 case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
6499 attributes->l4.udp.hdr = (struct rte_udp_hdr){
6500 .src_port = input->flow.udp4_flow.src_port,
6501 .dst_port = input->flow.udp4_flow.dst_port,
6503 attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){
6504 .src_port = mask->src_port_mask,
6505 .dst_port = mask->dst_port_mask,
6507 attributes->items[2] = (struct rte_flow_item){
6508 .type = RTE_FLOW_ITEM_TYPE_UDP,
6509 .spec = &attributes->l4,
6510 .mask = &attributes->l4_mask,
6513 case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
6514 attributes->l4.tcp.hdr = (struct rte_tcp_hdr){
6515 .src_port = input->flow.tcp4_flow.src_port,
6516 .dst_port = input->flow.tcp4_flow.dst_port,
6518 attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){
6519 .src_port = mask->src_port_mask,
6520 .dst_port = mask->dst_port_mask,
6522 attributes->items[2] = (struct rte_flow_item){
6523 .type = RTE_FLOW_ITEM_TYPE_TCP,
6524 .spec = &attributes->l4,
6525 .mask = &attributes->l4_mask,
6528 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
6529 attributes->l4.udp.hdr = (struct rte_udp_hdr){
6530 .src_port = input->flow.udp6_flow.src_port,
6531 .dst_port = input->flow.udp6_flow.dst_port,
6533 attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){
6534 .src_port = mask->src_port_mask,
6535 .dst_port = mask->dst_port_mask,
6537 attributes->items[2] = (struct rte_flow_item){
6538 .type = RTE_FLOW_ITEM_TYPE_UDP,
6539 .spec = &attributes->l4,
6540 .mask = &attributes->l4_mask,
6543 case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
6544 attributes->l4.tcp.hdr = (struct rte_tcp_hdr){
6545 .src_port = input->flow.tcp6_flow.src_port,
6546 .dst_port = input->flow.tcp6_flow.dst_port,
6548 attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){
6549 .src_port = mask->src_port_mask,
6550 .dst_port = mask->dst_port_mask,
6552 attributes->items[2] = (struct rte_flow_item){
6553 .type = RTE_FLOW_ITEM_TYPE_TCP,
6554 .spec = &attributes->l4,
6555 .mask = &attributes->l4_mask,
6558 case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
6559 case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
6562 DRV_LOG(ERR, "port %u invalid flow type%d",
6563 dev->data->port_id, fdir_filter->input.flow_type);
6564 rte_errno = ENOTSUP;
6570 #define FLOW_FDIR_CMP(f1, f2, fld) \
6571 memcmp(&(f1)->fld, &(f2)->fld, sizeof(f1->fld))
6574 * Compare two FDIR flows. If items and actions are identical, the two flows are
6578 * Pointer to Ethernet device.
6580 * FDIR flow to compare.
6582 * FDIR flow to compare.
6585 * Zero on match, 1 otherwise.
6588 flow_fdir_cmp(const struct mlx5_fdir *f1, const struct mlx5_fdir *f2)
6590 if (FLOW_FDIR_CMP(f1, f2, attr) ||
6591 FLOW_FDIR_CMP(f1, f2, l2) ||
6592 FLOW_FDIR_CMP(f1, f2, l2_mask) ||
6593 FLOW_FDIR_CMP(f1, f2, l3) ||
6594 FLOW_FDIR_CMP(f1, f2, l3_mask) ||
6595 FLOW_FDIR_CMP(f1, f2, l4) ||
6596 FLOW_FDIR_CMP(f1, f2, l4_mask) ||
6597 FLOW_FDIR_CMP(f1, f2, actions[0].type))
6599 if (f1->actions[0].type == RTE_FLOW_ACTION_TYPE_QUEUE &&
6600 FLOW_FDIR_CMP(f1, f2, queue))
6606 * Search device flow list to find out a matched FDIR flow.
6609 * Pointer to Ethernet device.
6611 * FDIR flow to lookup.
6614 * Index of flow if found, 0 otherwise.
6617 flow_fdir_filter_lookup(struct rte_eth_dev *dev, struct mlx5_fdir *fdir_flow)
6619 struct mlx5_priv *priv = dev->data->dev_private;
6620 uint32_t flow_idx = 0;
6621 struct mlx5_fdir_flow *priv_fdir_flow = NULL;
6623 MLX5_ASSERT(fdir_flow);
6624 LIST_FOREACH(priv_fdir_flow, &priv->fdir_flows, next) {
6625 if (!flow_fdir_cmp(priv_fdir_flow->fdir, fdir_flow)) {
6626 DRV_LOG(DEBUG, "port %u found FDIR flow %u",
6627 dev->data->port_id, flow_idx);
6628 flow_idx = priv_fdir_flow->rix_flow;
6636 * Add new flow director filter and store it in list.
6639 * Pointer to Ethernet device.
6640 * @param fdir_filter
6641 * Flow director filter to add.
6644 * 0 on success, a negative errno value otherwise and rte_errno is set.
6647 flow_fdir_filter_add(struct rte_eth_dev *dev,
6648 const struct rte_eth_fdir_filter *fdir_filter)
6650 struct mlx5_priv *priv = dev->data->dev_private;
6651 struct mlx5_fdir *fdir_flow;
6652 struct rte_flow *flow;
6653 struct mlx5_fdir_flow *priv_fdir_flow = NULL;
6657 fdir_flow = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*fdir_flow), 0,
6663 ret = flow_fdir_filter_convert(dev, fdir_filter, fdir_flow);
6666 flow_idx = flow_fdir_filter_lookup(dev, fdir_flow);
6671 priv_fdir_flow = mlx5_malloc(MLX5_MEM_ZERO,
6672 sizeof(struct mlx5_fdir_flow),
6674 if (!priv_fdir_flow) {
6678 flow_idx = flow_list_create(dev, &priv->flows, &fdir_flow->attr,
6679 fdir_flow->items, fdir_flow->actions, true,
6681 flow = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], flow_idx);
6685 priv_fdir_flow->fdir = fdir_flow;
6686 priv_fdir_flow->rix_flow = flow_idx;
6687 LIST_INSERT_HEAD(&priv->fdir_flows, priv_fdir_flow, next);
6688 DRV_LOG(DEBUG, "port %u created FDIR flow %p",
6689 dev->data->port_id, (void *)flow);
6692 mlx5_free(priv_fdir_flow);
6693 mlx5_free(fdir_flow);
6698 * Delete specific filter.
6701 * Pointer to Ethernet device.
6702 * @param fdir_filter
6703 * Filter to be deleted.
6706 * 0 on success, a negative errno value otherwise and rte_errno is set.
6709 flow_fdir_filter_delete(struct rte_eth_dev *dev,
6710 const struct rte_eth_fdir_filter *fdir_filter)
6712 struct mlx5_priv *priv = dev->data->dev_private;
6714 struct mlx5_fdir fdir_flow = {
6717 struct mlx5_fdir_flow *priv_fdir_flow = NULL;
6720 ret = flow_fdir_filter_convert(dev, fdir_filter, &fdir_flow);
6723 LIST_FOREACH(priv_fdir_flow, &priv->fdir_flows, next) {
6724 /* Find the fdir in priv list */
6725 if (!flow_fdir_cmp(priv_fdir_flow->fdir, &fdir_flow))
6728 if (!priv_fdir_flow)
6730 LIST_REMOVE(priv_fdir_flow, next);
6731 flow_idx = priv_fdir_flow->rix_flow;
6732 flow_list_destroy(dev, &priv->flows, flow_idx);
6733 mlx5_free(priv_fdir_flow->fdir);
6734 mlx5_free(priv_fdir_flow);
6735 DRV_LOG(DEBUG, "port %u deleted FDIR flow %u",
6736 dev->data->port_id, flow_idx);
6741 * Update queue for specific filter.
6744 * Pointer to Ethernet device.
6745 * @param fdir_filter
6746 * Filter to be updated.
6749 * 0 on success, a negative errno value otherwise and rte_errno is set.
6752 flow_fdir_filter_update(struct rte_eth_dev *dev,
6753 const struct rte_eth_fdir_filter *fdir_filter)
6757 ret = flow_fdir_filter_delete(dev, fdir_filter);
6760 return flow_fdir_filter_add(dev, fdir_filter);
6764 * Flush all filters.
6767 * Pointer to Ethernet device.
6770 flow_fdir_filter_flush(struct rte_eth_dev *dev)
6772 struct mlx5_priv *priv = dev->data->dev_private;
6773 struct mlx5_fdir_flow *priv_fdir_flow = NULL;
6775 while (!LIST_EMPTY(&priv->fdir_flows)) {
6776 priv_fdir_flow = LIST_FIRST(&priv->fdir_flows);
6777 LIST_REMOVE(priv_fdir_flow, next);
6778 flow_list_destroy(dev, &priv->flows, priv_fdir_flow->rix_flow);
6779 mlx5_free(priv_fdir_flow->fdir);
6780 mlx5_free(priv_fdir_flow);
6785 * Get flow director information.
6788 * Pointer to Ethernet device.
6789 * @param[out] fdir_info
6790 * Resulting flow director information.
6793 flow_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info)
6795 struct rte_eth_fdir_masks *mask =
6796 &dev->data->dev_conf.fdir_conf.mask;
6798 fdir_info->mode = dev->data->dev_conf.fdir_conf.mode;
6799 fdir_info->guarant_spc = 0;
6800 rte_memcpy(&fdir_info->mask, mask, sizeof(fdir_info->mask));
6801 fdir_info->max_flexpayload = 0;
6802 fdir_info->flow_types_mask[0] = 0;
6803 fdir_info->flex_payload_unit = 0;
6804 fdir_info->max_flex_payload_segment_num = 0;
6805 fdir_info->flex_payload_limit = 0;
6806 memset(&fdir_info->flex_conf, 0, sizeof(fdir_info->flex_conf));
6810 * Deal with flow director operations.
6813 * Pointer to Ethernet device.
6815 * Operation to perform.
6817 * Pointer to operation-specific structure.
6820 * 0 on success, a negative errno value otherwise and rte_errno is set.
6823 flow_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
6826 enum rte_fdir_mode fdir_mode =
6827 dev->data->dev_conf.fdir_conf.mode;
6829 if (filter_op == RTE_ETH_FILTER_NOP)
6831 if (fdir_mode != RTE_FDIR_MODE_PERFECT &&
6832 fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
6833 DRV_LOG(ERR, "port %u flow director mode %d not supported",
6834 dev->data->port_id, fdir_mode);
6838 switch (filter_op) {
6839 case RTE_ETH_FILTER_ADD:
6840 return flow_fdir_filter_add(dev, arg);
6841 case RTE_ETH_FILTER_UPDATE:
6842 return flow_fdir_filter_update(dev, arg);
6843 case RTE_ETH_FILTER_DELETE:
6844 return flow_fdir_filter_delete(dev, arg);
6845 case RTE_ETH_FILTER_FLUSH:
6846 flow_fdir_filter_flush(dev);
6848 case RTE_ETH_FILTER_INFO:
6849 flow_fdir_info_get(dev, arg);
6852 DRV_LOG(DEBUG, "port %u unknown operation %u",
6853 dev->data->port_id, filter_op);
6861 * Manage filter operations.
6864 * Pointer to Ethernet device structure.
6865 * @param filter_type
6868 * Operation to perform.
6870 * Pointer to operation-specific structure.
6873 * 0 on success, a negative errno value otherwise and rte_errno is set.
6876 mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
6877 enum rte_filter_type filter_type,
6878 enum rte_filter_op filter_op,
6881 switch (filter_type) {
6882 case RTE_ETH_FILTER_GENERIC:
6883 if (filter_op != RTE_ETH_FILTER_GET) {
6887 *(const void **)arg = &mlx5_flow_ops;
6889 case RTE_ETH_FILTER_FDIR:
6890 return flow_fdir_ctrl_func(dev, filter_op, arg);
6892 DRV_LOG(ERR, "port %u filter type (%d) not supported",
6893 dev->data->port_id, filter_type);
6894 rte_errno = ENOTSUP;
6901 * Create the needed meter and suffix tables.
6904 * Pointer to Ethernet device.
6906 * Pointer to the flow meter.
6909 * Pointer to table set on success, NULL otherwise.
6911 struct mlx5_meter_domains_infos *
6912 mlx5_flow_create_mtr_tbls(struct rte_eth_dev *dev,
6913 const struct mlx5_flow_meter *fm)
6915 const struct mlx5_flow_driver_ops *fops;
6917 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
6918 return fops->create_mtr_tbls(dev, fm);
6922 * Destroy the meter table set.
6925 * Pointer to Ethernet device.
6927 * Pointer to the meter table set.
6933 mlx5_flow_destroy_mtr_tbls(struct rte_eth_dev *dev,
6934 struct mlx5_meter_domains_infos *tbls)
6936 const struct mlx5_flow_driver_ops *fops;
6938 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
6939 return fops->destroy_mtr_tbls(dev, tbls);
6943 * Create policer rules.
6946 * Pointer to Ethernet device.
6948 * Pointer to flow meter structure.
6950 * Pointer to flow attributes.
6953 * 0 on success, -1 otherwise.
6956 mlx5_flow_create_policer_rules(struct rte_eth_dev *dev,
6957 struct mlx5_flow_meter *fm,
6958 const struct rte_flow_attr *attr)
6960 const struct mlx5_flow_driver_ops *fops;
6962 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
6963 return fops->create_policer_rules(dev, fm, attr);
6967 * Destroy policer rules.
6970 * Pointer to flow meter structure.
6972 * Pointer to flow attributes.
6975 * 0 on success, -1 otherwise.
6978 mlx5_flow_destroy_policer_rules(struct rte_eth_dev *dev,
6979 struct mlx5_flow_meter *fm,
6980 const struct rte_flow_attr *attr)
6982 const struct mlx5_flow_driver_ops *fops;
6984 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
6985 return fops->destroy_policer_rules(dev, fm, attr);
6989 * Allocate a counter.
6992 * Pointer to Ethernet device structure.
6995 * Index to allocated counter on success, 0 otherwise.
6998 mlx5_counter_alloc(struct rte_eth_dev *dev)
7000 const struct mlx5_flow_driver_ops *fops;
7001 struct rte_flow_attr attr = { .transfer = 0 };
7003 if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
7004 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
7005 return fops->counter_alloc(dev);
7008 "port %u counter allocate is not supported.",
7009 dev->data->port_id);
7017 * Pointer to Ethernet device structure.
7019 * Index to counter to be free.
7022 mlx5_counter_free(struct rte_eth_dev *dev, uint32_t cnt)
7024 const struct mlx5_flow_driver_ops *fops;
7025 struct rte_flow_attr attr = { .transfer = 0 };
7027 if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
7028 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
7029 fops->counter_free(dev, cnt);
7033 "port %u counter free is not supported.",
7034 dev->data->port_id);
7038 * Query counter statistics.
7041 * Pointer to Ethernet device structure.
7043 * Index to counter to query.
7045 * Set to clear counter statistics.
7047 * The counter hits packets number to save.
7049 * The counter hits bytes number to save.
7052 * 0 on success, a negative errno value otherwise.
7055 mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt,
7056 bool clear, uint64_t *pkts, uint64_t *bytes)
7058 const struct mlx5_flow_driver_ops *fops;
7059 struct rte_flow_attr attr = { .transfer = 0 };
7061 if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
7062 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
7063 return fops->counter_query(dev, cnt, clear, pkts, bytes);
7066 "port %u counter query is not supported.",
7067 dev->data->port_id);
7072 * Allocate a new memory for the counter values wrapped by all the needed
7076 * Pointer to mlx5_dev_ctx_shared object.
7079 * 0 on success, a negative errno value otherwise.
7082 mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh)
7084 struct mlx5_devx_mkey_attr mkey_attr;
7085 struct mlx5_counter_stats_mem_mng *mem_mng;
7086 volatile struct flow_counter_stats *raw_data;
7087 int raws_n = MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES;
7088 int size = (sizeof(struct flow_counter_stats) *
7089 MLX5_COUNTERS_PER_POOL +
7090 sizeof(struct mlx5_counter_stats_raw)) * raws_n +
7091 sizeof(struct mlx5_counter_stats_mem_mng);
7092 size_t pgsize = rte_mem_page_size();
7096 if (pgsize == (size_t)-1) {
7097 DRV_LOG(ERR, "Failed to get mem page size");
7101 mem = mlx5_malloc(MLX5_MEM_ZERO, size, pgsize, SOCKET_ID_ANY);
7106 mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1;
7107 size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n;
7108 mem_mng->umem = mlx5_glue->devx_umem_reg(sh->ctx, mem, size,
7109 IBV_ACCESS_LOCAL_WRITE);
7110 if (!mem_mng->umem) {
7115 mkey_attr.addr = (uintptr_t)mem;
7116 mkey_attr.size = size;
7117 mkey_attr.umem_id = mlx5_os_get_umem_id(mem_mng->umem);
7118 mkey_attr.pd = sh->pdn;
7119 mkey_attr.log_entity_size = 0;
7120 mkey_attr.pg_access = 0;
7121 mkey_attr.klm_array = NULL;
7122 mkey_attr.klm_num = 0;
7123 mkey_attr.relaxed_ordering = sh->cmng.relaxed_ordering;
7124 mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);
7126 mlx5_glue->devx_umem_dereg(mem_mng->umem);
7131 mem_mng->raws = (struct mlx5_counter_stats_raw *)(mem + size);
7132 raw_data = (volatile struct flow_counter_stats *)mem;
7133 for (i = 0; i < raws_n; ++i) {
7134 mem_mng->raws[i].mem_mng = mem_mng;
7135 mem_mng->raws[i].data = raw_data + i * MLX5_COUNTERS_PER_POOL;
7137 for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i)
7138 LIST_INSERT_HEAD(&sh->cmng.free_stat_raws,
7139 mem_mng->raws + MLX5_CNT_CONTAINER_RESIZE + i,
7141 LIST_INSERT_HEAD(&sh->cmng.mem_mngs, mem_mng, next);
7142 sh->cmng.mem_mng = mem_mng;
7147 * Set the statistic memory to the new counter pool.
7150 * Pointer to mlx5_dev_ctx_shared object.
7152 * Pointer to the pool to set the statistic memory.
7155 * 0 on success, a negative errno value otherwise.
7158 mlx5_flow_set_counter_stat_mem(struct mlx5_dev_ctx_shared *sh,
7159 struct mlx5_flow_counter_pool *pool)
7161 struct mlx5_flow_counter_mng *cmng = &sh->cmng;
7162 /* Resize statistic memory once used out. */
7163 if (!(pool->index % MLX5_CNT_CONTAINER_RESIZE) &&
7164 mlx5_flow_create_counter_stat_mem_mng(sh)) {
7165 DRV_LOG(ERR, "Cannot resize counter stat mem.");
7168 rte_spinlock_lock(&pool->sl);
7169 pool->raw = cmng->mem_mng->raws + pool->index %
7170 MLX5_CNT_CONTAINER_RESIZE;
7171 rte_spinlock_unlock(&pool->sl);
7172 pool->raw_hw = NULL;
7176 #define MLX5_POOL_QUERY_FREQ_US 1000000
7179 * Set the periodic procedure for triggering asynchronous batch queries for all
7180 * the counter pools.
7183 * Pointer to mlx5_dev_ctx_shared object.
7186 mlx5_set_query_alarm(struct mlx5_dev_ctx_shared *sh)
7188 uint32_t pools_n, us;
7190 pools_n = __atomic_load_n(&sh->cmng.n_valid, __ATOMIC_RELAXED);
7191 us = MLX5_POOL_QUERY_FREQ_US / pools_n;
7192 DRV_LOG(DEBUG, "Set alarm for %u pools each %u us", pools_n, us);
7193 if (rte_eal_alarm_set(us, mlx5_flow_query_alarm, sh)) {
7194 sh->cmng.query_thread_on = 0;
7195 DRV_LOG(ERR, "Cannot reinitialize query alarm");
7197 sh->cmng.query_thread_on = 1;
7202 * The periodic procedure for triggering asynchronous batch queries for all the
7203 * counter pools. This function is probably called by the host thread.
7206 * The parameter for the alarm process.
7209 mlx5_flow_query_alarm(void *arg)
7211 struct mlx5_dev_ctx_shared *sh = arg;
7213 uint16_t pool_index = sh->cmng.pool_index;
7214 struct mlx5_flow_counter_mng *cmng = &sh->cmng;
7215 struct mlx5_flow_counter_pool *pool;
7218 if (sh->cmng.pending_queries >= MLX5_MAX_PENDING_QUERIES)
7220 rte_spinlock_lock(&cmng->pool_update_sl);
7221 pool = cmng->pools[pool_index];
7222 n_valid = cmng->n_valid;
7223 rte_spinlock_unlock(&cmng->pool_update_sl);
7224 /* Set the statistic memory to the new created pool. */
7225 if ((!pool->raw && mlx5_flow_set_counter_stat_mem(sh, pool)))
7228 /* There is a pool query in progress. */
7231 LIST_FIRST(&sh->cmng.free_stat_raws);
7233 /* No free counter statistics raw memory. */
7236 * Identify the counters released between query trigger and query
7237 * handle more efficiently. The counter released in this gap period
7238 * should wait for a new round of query as the new arrived packets
7239 * will not be taken into account.
7242 ret = mlx5_devx_cmd_flow_counter_query(pool->min_dcs, 0,
7243 MLX5_COUNTERS_PER_POOL,
7245 pool->raw_hw->mem_mng->dm->id,
7249 (uint64_t)(uintptr_t)pool);
7251 DRV_LOG(ERR, "Failed to trigger asynchronous query for dcs ID"
7252 " %d", pool->min_dcs->id);
7253 pool->raw_hw = NULL;
7256 LIST_REMOVE(pool->raw_hw, next);
7257 sh->cmng.pending_queries++;
7259 if (pool_index >= n_valid)
7262 sh->cmng.pool_index = pool_index;
7263 mlx5_set_query_alarm(sh);
7267 * Check and callback event for new aged flow in the counter pool
7270 * Pointer to mlx5_dev_ctx_shared object.
7272 * Pointer to Current counter pool.
7275 mlx5_flow_aging_check(struct mlx5_dev_ctx_shared *sh,
7276 struct mlx5_flow_counter_pool *pool)
7278 struct mlx5_priv *priv;
7279 struct mlx5_flow_counter *cnt;
7280 struct mlx5_age_info *age_info;
7281 struct mlx5_age_param *age_param;
7282 struct mlx5_counter_stats_raw *cur = pool->raw_hw;
7283 struct mlx5_counter_stats_raw *prev = pool->raw;
7284 const uint64_t curr_time = MLX5_CURR_TIME_SEC;
7285 const uint32_t time_delta = curr_time - pool->time_of_last_age_check;
7286 uint16_t expected = AGE_CANDIDATE;
7289 pool->time_of_last_age_check = curr_time;
7290 for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
7291 cnt = MLX5_POOL_GET_CNT(pool, i);
7292 age_param = MLX5_CNT_TO_AGE(cnt);
7293 if (__atomic_load_n(&age_param->state,
7294 __ATOMIC_RELAXED) != AGE_CANDIDATE)
7296 if (cur->data[i].hits != prev->data[i].hits) {
7297 __atomic_store_n(&age_param->sec_since_last_hit, 0,
7301 if (__atomic_add_fetch(&age_param->sec_since_last_hit,
7303 __ATOMIC_RELAXED) <= age_param->timeout)
7306 * Hold the lock first, or if between the
7307 * state AGE_TMOUT and tailq operation the
7308 * release happened, the release procedure
7309 * may delete a non-existent tailq node.
7311 priv = rte_eth_devices[age_param->port_id].data->dev_private;
7312 age_info = GET_PORT_AGE_INFO(priv);
7313 rte_spinlock_lock(&age_info->aged_sl);
7314 if (__atomic_compare_exchange_n(&age_param->state, &expected,
7317 __ATOMIC_RELAXED)) {
7318 TAILQ_INSERT_TAIL(&age_info->aged_counters, cnt, next);
7319 MLX5_AGE_SET(age_info, MLX5_AGE_EVENT_NEW);
7321 rte_spinlock_unlock(&age_info->aged_sl);
7323 for (i = 0; i < sh->max_port; i++) {
7324 age_info = &sh->port[i].age_info;
7325 if (!MLX5_AGE_GET(age_info, MLX5_AGE_EVENT_NEW))
7327 if (MLX5_AGE_GET(age_info, MLX5_AGE_TRIGGER))
7328 rte_eth_dev_callback_process
7329 (&rte_eth_devices[sh->port[i].devx_ih_port_id],
7330 RTE_ETH_EVENT_FLOW_AGED, NULL);
7331 age_info->flags = 0;
7336 * Handler for the HW respond about ready values from an asynchronous batch
7337 * query. This function is probably called by the host thread.
7340 * The pointer to the shared device context.
7341 * @param[in] async_id
7342 * The Devx async ID.
7344 * The status of the completion.
7347 mlx5_flow_async_pool_query_handle(struct mlx5_dev_ctx_shared *sh,
7348 uint64_t async_id, int status)
7350 struct mlx5_flow_counter_pool *pool =
7351 (struct mlx5_flow_counter_pool *)(uintptr_t)async_id;
7352 struct mlx5_counter_stats_raw *raw_to_free;
7353 uint8_t query_gen = pool->query_gen ^ 1;
7354 struct mlx5_flow_counter_mng *cmng = &sh->cmng;
7355 enum mlx5_counter_type cnt_type =
7356 pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
7357 MLX5_COUNTER_TYPE_ORIGIN;
7359 if (unlikely(status)) {
7360 raw_to_free = pool->raw_hw;
7362 raw_to_free = pool->raw;
7364 mlx5_flow_aging_check(sh, pool);
7365 rte_spinlock_lock(&pool->sl);
7366 pool->raw = pool->raw_hw;
7367 rte_spinlock_unlock(&pool->sl);
7368 /* Be sure the new raw counters data is updated in memory. */
7370 if (!TAILQ_EMPTY(&pool->counters[query_gen])) {
7371 rte_spinlock_lock(&cmng->csl[cnt_type]);
7372 TAILQ_CONCAT(&cmng->counters[cnt_type],
7373 &pool->counters[query_gen], next);
7374 rte_spinlock_unlock(&cmng->csl[cnt_type]);
7377 LIST_INSERT_HEAD(&sh->cmng.free_stat_raws, raw_to_free, next);
7378 pool->raw_hw = NULL;
7379 sh->cmng.pending_queries--;
7382 static const struct mlx5_flow_tbl_data_entry *
7383 tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark)
7385 struct mlx5_priv *priv = dev->data->dev_private;
7386 struct mlx5_dev_ctx_shared *sh = priv->sh;
7387 struct mlx5_hlist_entry *he;
7388 union tunnel_offload_mark mbits = { .val = mark };
7389 union mlx5_flow_tbl_key table_key = {
7391 .table_id = tunnel_id_to_flow_tbl(mbits.table_id),
7393 .domain = !!mbits.transfer,
7397 he = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, NULL);
7399 container_of(he, struct mlx5_flow_tbl_data_entry, entry) : NULL;
7403 mlx5_flow_tunnel_grp2tbl_remove_cb(struct mlx5_hlist *list,
7404 struct mlx5_hlist_entry *entry)
7406 struct mlx5_dev_ctx_shared *sh = list->ctx;
7407 struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash);
7409 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
7410 tunnel_flow_tbl_to_id(tte->flow_table));
7414 static struct mlx5_hlist_entry *
7415 mlx5_flow_tunnel_grp2tbl_create_cb(struct mlx5_hlist *list,
7416 uint64_t key __rte_unused,
7417 void *ctx __rte_unused)
7419 struct mlx5_dev_ctx_shared *sh = list->ctx;
7420 struct tunnel_tbl_entry *tte;
7422 tte = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO,
7427 mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
7429 if (tte->flow_table >= MLX5_MAX_TABLES) {
7430 DRV_LOG(ERR, "Tunnel TBL ID %d exceed max limit.",
7432 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
7435 } else if (!tte->flow_table) {
7438 tte->flow_table = tunnel_id_to_flow_tbl(tte->flow_table);
7447 tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev,
7448 const struct mlx5_flow_tunnel *tunnel,
7449 uint32_t group, uint32_t *table,
7450 struct rte_flow_error *error)
7452 struct mlx5_hlist_entry *he;
7453 struct tunnel_tbl_entry *tte;
7454 union tunnel_tbl_key key = {
7455 .tunnel_id = tunnel ? tunnel->tunnel_id : 0,
7458 struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
7459 struct mlx5_hlist *group_hash;
7461 group_hash = tunnel ? tunnel->groups : thub->groups;
7462 he = mlx5_hlist_register(group_hash, key.val, NULL);
7464 return rte_flow_error_set(error, EINVAL,
7465 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
7467 "tunnel group index not supported");
7468 tte = container_of(he, typeof(*tte), hash);
7469 *table = tte->flow_table;
7470 DRV_LOG(DEBUG, "port %u tunnel %u group=%#x table=%#x",
7471 dev->data->port_id, key.tunnel_id, group, *table);
7476 flow_group_to_table(uint32_t port_id, uint32_t group, uint32_t *table,
7477 struct flow_grp_info grp_info, struct rte_flow_error *error)
7479 if (grp_info.transfer && grp_info.external && grp_info.fdb_def_rule) {
7480 if (group == UINT32_MAX)
7481 return rte_flow_error_set
7483 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
7485 "group index not supported");
7490 DRV_LOG(DEBUG, "port %u group=%#x table=%#x", port_id, group, *table);
7495 * Translate the rte_flow group index to HW table value.
7497 * If tunnel offload is disabled, all group ids converted to flow table
7498 * id using the standard method.
7499 * If tunnel offload is enabled, group id can be converted using the
7500 * standard or tunnel conversion method. Group conversion method
7501 * selection depends on flags in `grp_info` parameter:
7502 * - Internal (grp_info.external == 0) groups conversion uses the
7504 * - Group ids in JUMP action converted with the tunnel conversion.
7505 * - Group id in rule attribute conversion depends on a rule type and
7507 * ** non zero group attributes converted with the tunnel method
7508 * ** zero group attribute in non-tunnel rule is converted using the
7509 * standard method - there's only one root table
7510 * ** zero group attribute in steer tunnel rule is converted with the
7511 * standard method - single root table
7512 * ** zero group attribute in match tunnel rule is a special OvS
7513 * case: that value is used for portability reasons. That group
7514 * id is converted with the tunnel conversion method.
7519 * PMD tunnel offload object
7521 * rte_flow group index value.
7524 * @param[in] grp_info
7525 * flags used for conversion
7527 * Pointer to error structure.
7530 * 0 on success, a negative errno value otherwise and rte_errno is set.
7533 mlx5_flow_group_to_table(struct rte_eth_dev *dev,
7534 const struct mlx5_flow_tunnel *tunnel,
7535 uint32_t group, uint32_t *table,
7536 struct flow_grp_info grp_info,
7537 struct rte_flow_error *error)
7540 bool standard_translation;
7542 if (grp_info.external && group < MLX5_MAX_TABLES_EXTERNAL)
7543 group *= MLX5_FLOW_TABLE_FACTOR;
7544 if (is_tunnel_offload_active(dev)) {
7545 standard_translation = !grp_info.external ||
7546 grp_info.std_tbl_fix;
7548 standard_translation = true;
7551 "port %u group=%#x transfer=%d external=%d fdb_def_rule=%d translate=%s",
7552 dev->data->port_id, group, grp_info.transfer,
7553 grp_info.external, grp_info.fdb_def_rule,
7554 standard_translation ? "STANDARD" : "TUNNEL");
7555 if (standard_translation)
7556 ret = flow_group_to_table(dev->data->port_id, group, table,
7559 ret = tunnel_flow_group_to_flow_table(dev, tunnel, group,
7566 * Discover availability of metadata reg_c's.
7568 * Iteratively use test flows to check availability.
7571 * Pointer to the Ethernet device structure.
7574 * 0 on success, a negative errno value otherwise and rte_errno is set.
7577 mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev)
7579 struct mlx5_priv *priv = dev->data->dev_private;
7580 struct mlx5_dev_config *config = &priv->config;
7581 enum modify_reg idx;
7584 /* reg_c[0] and reg_c[1] are reserved. */
7585 config->flow_mreg_c[n++] = REG_C_0;
7586 config->flow_mreg_c[n++] = REG_C_1;
7587 /* Discover availability of other reg_c's. */
7588 for (idx = REG_C_2; idx <= REG_C_7; ++idx) {
7589 struct rte_flow_attr attr = {
7590 .group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
7591 .priority = MLX5_FLOW_PRIO_RSVD,
7594 struct rte_flow_item items[] = {
7596 .type = RTE_FLOW_ITEM_TYPE_END,
7599 struct rte_flow_action actions[] = {
7601 .type = (enum rte_flow_action_type)
7602 MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
7603 .conf = &(struct mlx5_flow_action_copy_mreg){
7609 .type = RTE_FLOW_ACTION_TYPE_JUMP,
7610 .conf = &(struct rte_flow_action_jump){
7611 .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP,
7615 .type = RTE_FLOW_ACTION_TYPE_END,
7619 struct rte_flow *flow;
7620 struct rte_flow_error error;
7622 if (!config->dv_flow_en)
7624 /* Create internal flow, validation skips copy action. */
7625 flow_idx = flow_list_create(dev, NULL, &attr, items,
7626 actions, false, &error);
7627 flow = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW],
7631 if (dev->data->dev_started || !flow_drv_apply(dev, flow, NULL))
7632 config->flow_mreg_c[n++] = idx;
7633 flow_list_destroy(dev, NULL, flow_idx);
7635 for (; n < MLX5_MREG_C_NUM; ++n)
7636 config->flow_mreg_c[n] = REG_NON;
7641 * Dump flow raw hw data to file
7644 * The pointer to Ethernet device.
7646 * A pointer to a file for output.
7648 * Perform verbose error reporting if not NULL. PMDs initialize this
7649 * structure in case of error only.
7651 * 0 on success, a nagative value otherwise.
7654 mlx5_flow_dev_dump(struct rte_eth_dev *dev,
7656 struct rte_flow_error *error __rte_unused)
7658 struct mlx5_priv *priv = dev->data->dev_private;
7659 struct mlx5_dev_ctx_shared *sh = priv->sh;
7661 if (!priv->config.dv_flow_en) {
7662 if (fputs("device dv flow disabled\n", file) <= 0)
7666 return mlx5_devx_cmd_flow_dump(sh->fdb_domain, sh->rx_domain,
7667 sh->tx_domain, file);
7671 * Get aged-out flows.
7674 * Pointer to the Ethernet device structure.
7675 * @param[in] context
7676 * The address of an array of pointers to the aged-out flows contexts.
7677 * @param[in] nb_countexts
7678 * The length of context array pointers.
7680 * Perform verbose error reporting if not NULL. Initialized in case of
7684 * how many contexts get in success, otherwise negative errno value.
7685 * if nb_contexts is 0, return the amount of all aged contexts.
7686 * if nb_contexts is not 0 , return the amount of aged flows reported
7687 * in the context array.
7690 mlx5_flow_get_aged_flows(struct rte_eth_dev *dev, void **contexts,
7691 uint32_t nb_contexts, struct rte_flow_error *error)
7693 const struct mlx5_flow_driver_ops *fops;
7694 struct rte_flow_attr attr = { .transfer = 0 };
7696 if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
7697 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
7698 return fops->get_aged_flows(dev, contexts, nb_contexts,
7702 "port %u get aged flows is not supported.",
7703 dev->data->port_id);
7707 /* Wrapper for driver action_validate op callback */
7709 flow_drv_action_validate(struct rte_eth_dev *dev,
7710 const struct rte_flow_shared_action_conf *conf,
7711 const struct rte_flow_action *action,
7712 const struct mlx5_flow_driver_ops *fops,
7713 struct rte_flow_error *error)
7715 static const char err_msg[] = "shared action validation unsupported";
7717 if (!fops->action_validate) {
7718 DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
7719 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
7723 return fops->action_validate(dev, conf, action, error);
7727 * Destroys the shared action by handle.
7730 * Pointer to Ethernet device structure.
7732 * Handle for the shared action to be destroyed.
7734 * Perform verbose error reporting if not NULL. PMDs initialize this
7735 * structure in case of error only.
7738 * 0 on success, a negative errno value otherwise and rte_errno is set.
7740 * @note: wrapper for driver action_create op callback.
7743 mlx5_shared_action_destroy(struct rte_eth_dev *dev,
7744 struct rte_flow_shared_action *action,
7745 struct rte_flow_error *error)
7747 static const char err_msg[] = "shared action destruction unsupported";
7748 struct rte_flow_attr attr = { .transfer = 0 };
7749 const struct mlx5_flow_driver_ops *fops =
7750 flow_get_drv_ops(flow_get_drv_type(dev, &attr));
7752 if (!fops->action_destroy) {
7753 DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
7754 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
7758 return fops->action_destroy(dev, action, error);
7761 /* Wrapper for driver action_destroy op callback */
7763 flow_drv_action_update(struct rte_eth_dev *dev,
7764 struct rte_flow_shared_action *action,
7765 const void *action_conf,
7766 const struct mlx5_flow_driver_ops *fops,
7767 struct rte_flow_error *error)
7769 static const char err_msg[] = "shared action update unsupported";
7771 if (!fops->action_update) {
7772 DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
7773 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
7777 return fops->action_update(dev, action, action_conf, error);
7781 * Create shared action for reuse in multiple flow rules.
7784 * Pointer to Ethernet device structure.
7786 * Action configuration for shared action creation.
7788 * Perform verbose error reporting if not NULL. PMDs initialize this
7789 * structure in case of error only.
7791 * A valid handle in case of success, NULL otherwise and rte_errno is set.
7793 static struct rte_flow_shared_action *
7794 mlx5_shared_action_create(struct rte_eth_dev *dev,
7795 const struct rte_flow_shared_action_conf *conf,
7796 const struct rte_flow_action *action,
7797 struct rte_flow_error *error)
7799 static const char err_msg[] = "shared action creation unsupported";
7800 struct rte_flow_attr attr = { .transfer = 0 };
7801 const struct mlx5_flow_driver_ops *fops =
7802 flow_get_drv_ops(flow_get_drv_type(dev, &attr));
7804 if (flow_drv_action_validate(dev, conf, action, fops, error))
7806 if (!fops->action_create) {
7807 DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
7808 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
7812 return fops->action_create(dev, conf, action, error);
7816 * Updates inplace the shared action configuration pointed by *action* handle
7817 * with the configuration provided as *action* argument.
7818 * The update of the shared action configuration effects all flow rules reusing
7819 * the action via handle.
7822 * Pointer to Ethernet device structure.
7823 * @param[in] shared_action
7824 * Handle for the shared action to be updated.
7826 * Action specification used to modify the action pointed by handle.
7827 * *action* should be of same type with the action pointed by the *action*
7828 * handle argument, otherwise considered as invalid.
7830 * Perform verbose error reporting if not NULL. PMDs initialize this
7831 * structure in case of error only.
7834 * 0 on success, a negative errno value otherwise and rte_errno is set.
7837 mlx5_shared_action_update(struct rte_eth_dev *dev,
7838 struct rte_flow_shared_action *shared_action,
7839 const struct rte_flow_action *action,
7840 struct rte_flow_error *error)
7842 struct rte_flow_attr attr = { .transfer = 0 };
7843 const struct mlx5_flow_driver_ops *fops =
7844 flow_get_drv_ops(flow_get_drv_type(dev, &attr));
7847 switch (shared_action->type) {
7848 case MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS:
7849 if (action->type != RTE_FLOW_ACTION_TYPE_RSS) {
7850 return rte_flow_error_set(error, EINVAL,
7851 RTE_FLOW_ERROR_TYPE_ACTION,
7853 "update action type invalid");
7855 ret = flow_drv_action_validate(dev, NULL, action, fops, error);
7858 return flow_drv_action_update(dev, shared_action, action->conf,
7861 return rte_flow_error_set(error, ENOTSUP,
7862 RTE_FLOW_ERROR_TYPE_ACTION,
7864 "action type not supported");
7869 * Query the shared action by handle.
7871 * This function allows retrieving action-specific data such as counters.
7872 * Data is gathered by special action which may be present/referenced in
7873 * more than one flow rule definition.
7875 * \see RTE_FLOW_ACTION_TYPE_COUNT
7878 * Pointer to Ethernet device structure.
7880 * Handle for the shared action to query.
7881 * @param[in, out] data
7882 * Pointer to storage for the associated query data type.
7884 * Perform verbose error reporting if not NULL. PMDs initialize this
7885 * structure in case of error only.
7888 * 0 on success, a negative errno value otherwise and rte_errno is set.
7891 mlx5_shared_action_query(struct rte_eth_dev *dev,
7892 const struct rte_flow_shared_action *action,
7894 struct rte_flow_error *error)
7897 switch (action->type) {
7898 case MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS:
7899 __atomic_load(&action->refcnt, (uint32_t *)data,
7903 return rte_flow_error_set(error, ENOTSUP,
7904 RTE_FLOW_ERROR_TYPE_ACTION,
7906 "action type not supported");
7911 * Destroy all shared actions.
7914 * Pointer to Ethernet device.
7917 * 0 on success, a negative errno value otherwise and rte_errno is set.
7920 mlx5_shared_action_flush(struct rte_eth_dev *dev)
7922 struct rte_flow_error error;
7923 struct mlx5_priv *priv = dev->data->dev_private;
7924 struct rte_flow_shared_action *action;
7927 while (!LIST_EMPTY(&priv->shared_actions)) {
7928 action = LIST_FIRST(&priv->shared_actions);
7929 ret = mlx5_shared_action_destroy(dev, action, &error);
7935 mlx5_flow_tunnel_free(struct rte_eth_dev *dev,
7936 struct mlx5_flow_tunnel *tunnel)
7938 struct mlx5_priv *priv = dev->data->dev_private;
7940 DRV_LOG(DEBUG, "port %u release pmd tunnel id=0x%x",
7941 dev->data->port_id, tunnel->tunnel_id);
7942 RTE_VERIFY(!__atomic_load_n(&tunnel->refctn, __ATOMIC_RELAXED));
7943 LIST_REMOVE(tunnel, chain);
7944 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID],
7946 mlx5_hlist_destroy(tunnel->groups);
7950 static struct mlx5_flow_tunnel *
7951 mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id)
7953 struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
7954 struct mlx5_flow_tunnel *tun;
7956 LIST_FOREACH(tun, &thub->tunnels, chain) {
7957 if (tun->tunnel_id == id)
7964 static struct mlx5_flow_tunnel *
7965 mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev,
7966 const struct rte_flow_tunnel *app_tunnel)
7968 struct mlx5_priv *priv = dev->data->dev_private;
7969 struct mlx5_flow_tunnel *tunnel;
7972 mlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
7974 if (id >= MLX5_MAX_TUNNELS) {
7975 mlx5_ipool_free(priv->sh->ipool
7976 [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);
7977 DRV_LOG(ERR, "Tunnel ID %d exceed max limit.", id);
7983 * mlx5 flow tunnel is an auxlilary data structure
7984 * It's not part of IO. No need to allocate it from
7985 * huge pages pools dedicated for IO
7987 tunnel = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, sizeof(*tunnel),
7990 mlx5_ipool_free(priv->sh->ipool
7991 [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);
7994 tunnel->groups = mlx5_hlist_create("tunnel groups", 1024, 0, 0,
7995 mlx5_flow_tunnel_grp2tbl_create_cb,
7997 mlx5_flow_tunnel_grp2tbl_remove_cb);
7998 if (!tunnel->groups) {
7999 mlx5_ipool_free(priv->sh->ipool
8000 [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);
8004 tunnel->groups->ctx = priv->sh;
8005 /* initiate new PMD tunnel */
8006 memcpy(&tunnel->app_tunnel, app_tunnel, sizeof(*app_tunnel));
8007 tunnel->tunnel_id = id;
8008 tunnel->action.type = (typeof(tunnel->action.type))
8009 MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET;
8010 tunnel->action.conf = tunnel;
8011 tunnel->item.type = (typeof(tunnel->item.type))
8012 MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL;
8013 tunnel->item.spec = tunnel;
8014 tunnel->item.last = NULL;
8015 tunnel->item.mask = NULL;
8017 DRV_LOG(DEBUG, "port %u new pmd tunnel id=0x%x",
8018 dev->data->port_id, tunnel->tunnel_id);
8024 mlx5_get_flow_tunnel(struct rte_eth_dev *dev,
8025 const struct rte_flow_tunnel *app_tunnel,
8026 struct mlx5_flow_tunnel **tunnel)
8029 struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
8030 struct mlx5_flow_tunnel *tun;
8032 LIST_FOREACH(tun, &thub->tunnels, chain) {
8033 if (!memcmp(app_tunnel, &tun->app_tunnel,
8034 sizeof(*app_tunnel))) {
8041 tun = mlx5_flow_tunnel_allocate(dev, app_tunnel);
8043 LIST_INSERT_HEAD(&thub->tunnels, tun, chain);
8050 __atomic_add_fetch(&tun->refctn, 1, __ATOMIC_RELAXED);
8055 void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id)
8057 struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
8061 if (!LIST_EMPTY(&thub->tunnels))
8062 DRV_LOG(WARNING, "port %u tunnels present\n", port_id);
8063 mlx5_hlist_destroy(thub->groups);
8067 int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh)
8070 struct mlx5_flow_tunnel_hub *thub;
8072 thub = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, sizeof(*thub),
8076 LIST_INIT(&thub->tunnels);
8077 thub->groups = mlx5_hlist_create("flow groups", MLX5_MAX_TABLES, 0,
8078 0, mlx5_flow_tunnel_grp2tbl_create_cb,
8080 mlx5_flow_tunnel_grp2tbl_remove_cb);
8081 if (!thub->groups) {
8085 thub->groups->ctx = sh;
8086 sh->tunnel_hub = thub;
8092 mlx5_hlist_destroy(thub->groups);
8098 #ifndef HAVE_MLX5DV_DR
8099 #define MLX5_DOMAIN_SYNC_FLOW ((1 << 0) | (1 << 1))
8101 #define MLX5_DOMAIN_SYNC_FLOW \
8102 (MLX5DV_DR_DOMAIN_SYNC_FLAGS_SW | MLX5DV_DR_DOMAIN_SYNC_FLAGS_HW)
8105 int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains)
8107 struct rte_eth_dev *dev = &rte_eth_devices[port_id];
8108 const struct mlx5_flow_driver_ops *fops;
8110 struct rte_flow_attr attr = { .transfer = 0 };
8112 fops = flow_get_drv_ops(flow_get_drv_type(dev, &attr));
8113 ret = fops->sync_domain(dev, domains, MLX5_DOMAIN_SYNC_FLOW);