ethdev: add pre-defined meter policy API
[dpdk.git] / drivers / net / mlx5 / mlx5_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  * Copyright 2016 Mellanox Technologies, Ltd
4  */
5
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <stdbool.h>
10 #include <sys/queue.h>
11
12 #include <rte_common.h>
13 #include <rte_ether.h>
14 #include <ethdev_driver.h>
15 #include <rte_eal_paging.h>
16 #include <rte_flow.h>
17 #include <rte_cycles.h>
18 #include <rte_flow_driver.h>
19 #include <rte_malloc.h>
20 #include <rte_ip.h>
21
22 #include <mlx5_glue.h>
23 #include <mlx5_devx_cmds.h>
24 #include <mlx5_prm.h>
25 #include <mlx5_malloc.h>
26
27 #include "mlx5_defs.h"
28 #include "mlx5.h"
29 #include "mlx5_flow.h"
30 #include "mlx5_flow_os.h"
31 #include "mlx5_rx.h"
32 #include "mlx5_tx.h"
33 #include "mlx5_common_os.h"
34 #include "rte_pmd_mlx5.h"
35
36 struct tunnel_default_miss_ctx {
37         uint16_t *queue;
38         __extension__
39         union {
40                 struct rte_flow_action_rss action_rss;
41                 struct rte_flow_action_queue miss_queue;
42                 struct rte_flow_action_jump miss_jump;
43                 uint8_t raw[0];
44         };
45 };
46
47 static int
48 flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
49                              struct rte_flow *flow,
50                              const struct rte_flow_attr *attr,
51                              const struct rte_flow_action *app_actions,
52                              uint32_t flow_idx,
53                              struct tunnel_default_miss_ctx *ctx,
54                              struct rte_flow_error *error);
55 static struct mlx5_flow_tunnel *
56 mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id);
57 static void
58 mlx5_flow_tunnel_free(struct rte_eth_dev *dev, struct mlx5_flow_tunnel *tunnel);
59 static uint32_t
60 tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev,
61                                 const struct mlx5_flow_tunnel *tunnel,
62                                 uint32_t group, uint32_t *table,
63                                 struct rte_flow_error *error);
64
65 static struct mlx5_flow_workspace *mlx5_flow_push_thread_workspace(void);
66 static void mlx5_flow_pop_thread_workspace(void);
67
68
69 /** Device flow drivers. */
70 extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops;
71
72 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops;
73
74 const struct mlx5_flow_driver_ops *flow_drv_ops[] = {
75         [MLX5_FLOW_TYPE_MIN] = &mlx5_flow_null_drv_ops,
76 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
77         [MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops,
78 #endif
79         [MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops,
80         [MLX5_FLOW_TYPE_MAX] = &mlx5_flow_null_drv_ops
81 };
82
83 /** Helper macro to build input graph for mlx5_flow_expand_rss(). */
84 #define MLX5_FLOW_EXPAND_RSS_NEXT(...) \
85         (const int []){ \
86                 __VA_ARGS__, 0, \
87         }
88
89 /** Node object of input graph for mlx5_flow_expand_rss(). */
90 struct mlx5_flow_expand_node {
91         const int *const next;
92         /**<
93          * List of next node indexes. Index 0 is interpreted as a terminator.
94          */
95         const enum rte_flow_item_type type;
96         /**< Pattern item type of current node. */
97         uint64_t rss_types;
98         /**<
99          * RSS types bit-field associated with this node
100          * (see ETH_RSS_* definitions).
101          */
102 };
103
104 /** Object returned by mlx5_flow_expand_rss(). */
105 struct mlx5_flow_expand_rss {
106         uint32_t entries;
107         /**< Number of entries @p patterns and @p priorities. */
108         struct {
109                 struct rte_flow_item *pattern; /**< Expanded pattern array. */
110                 uint32_t priority; /**< Priority offset for each expansion. */
111         } entry[];
112 };
113
114 static enum rte_flow_item_type
115 mlx5_flow_expand_rss_item_complete(const struct rte_flow_item *item)
116 {
117         enum rte_flow_item_type ret = RTE_FLOW_ITEM_TYPE_VOID;
118         uint16_t ether_type = 0;
119         uint16_t ether_type_m;
120         uint8_t ip_next_proto = 0;
121         uint8_t ip_next_proto_m;
122
123         if (item == NULL || item->spec == NULL)
124                 return ret;
125         switch (item->type) {
126         case RTE_FLOW_ITEM_TYPE_ETH:
127                 if (item->mask)
128                         ether_type_m = ((const struct rte_flow_item_eth *)
129                                                 (item->mask))->type;
130                 else
131                         ether_type_m = rte_flow_item_eth_mask.type;
132                 if (ether_type_m != RTE_BE16(0xFFFF))
133                         break;
134                 ether_type = ((const struct rte_flow_item_eth *)
135                                 (item->spec))->type;
136                 if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4)
137                         ret = RTE_FLOW_ITEM_TYPE_IPV4;
138                 else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6)
139                         ret = RTE_FLOW_ITEM_TYPE_IPV6;
140                 else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN)
141                         ret = RTE_FLOW_ITEM_TYPE_VLAN;
142                 else
143                         ret = RTE_FLOW_ITEM_TYPE_END;
144                 break;
145         case RTE_FLOW_ITEM_TYPE_VLAN:
146                 if (item->mask)
147                         ether_type_m = ((const struct rte_flow_item_vlan *)
148                                                 (item->mask))->inner_type;
149                 else
150                         ether_type_m = rte_flow_item_vlan_mask.inner_type;
151                 if (ether_type_m != RTE_BE16(0xFFFF))
152                         break;
153                 ether_type = ((const struct rte_flow_item_vlan *)
154                                 (item->spec))->inner_type;
155                 if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4)
156                         ret = RTE_FLOW_ITEM_TYPE_IPV4;
157                 else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6)
158                         ret = RTE_FLOW_ITEM_TYPE_IPV6;
159                 else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN)
160                         ret = RTE_FLOW_ITEM_TYPE_VLAN;
161                 else
162                         ret = RTE_FLOW_ITEM_TYPE_END;
163                 break;
164         case RTE_FLOW_ITEM_TYPE_IPV4:
165                 if (item->mask)
166                         ip_next_proto_m = ((const struct rte_flow_item_ipv4 *)
167                                         (item->mask))->hdr.next_proto_id;
168                 else
169                         ip_next_proto_m =
170                                 rte_flow_item_ipv4_mask.hdr.next_proto_id;
171                 if (ip_next_proto_m != 0xFF)
172                         break;
173                 ip_next_proto = ((const struct rte_flow_item_ipv4 *)
174                                 (item->spec))->hdr.next_proto_id;
175                 if (ip_next_proto == IPPROTO_UDP)
176                         ret = RTE_FLOW_ITEM_TYPE_UDP;
177                 else if (ip_next_proto == IPPROTO_TCP)
178                         ret = RTE_FLOW_ITEM_TYPE_TCP;
179                 else if (ip_next_proto == IPPROTO_IP)
180                         ret = RTE_FLOW_ITEM_TYPE_IPV4;
181                 else if (ip_next_proto == IPPROTO_IPV6)
182                         ret = RTE_FLOW_ITEM_TYPE_IPV6;
183                 else
184                         ret = RTE_FLOW_ITEM_TYPE_END;
185                 break;
186         case RTE_FLOW_ITEM_TYPE_IPV6:
187                 if (item->mask)
188                         ip_next_proto_m = ((const struct rte_flow_item_ipv6 *)
189                                                 (item->mask))->hdr.proto;
190                 else
191                         ip_next_proto_m =
192                                 rte_flow_item_ipv6_mask.hdr.proto;
193                 if (ip_next_proto_m != 0xFF)
194                         break;
195                 ip_next_proto = ((const struct rte_flow_item_ipv6 *)
196                                 (item->spec))->hdr.proto;
197                 if (ip_next_proto == IPPROTO_UDP)
198                         ret = RTE_FLOW_ITEM_TYPE_UDP;
199                 else if (ip_next_proto == IPPROTO_TCP)
200                         ret = RTE_FLOW_ITEM_TYPE_TCP;
201                 else if (ip_next_proto == IPPROTO_IP)
202                         ret = RTE_FLOW_ITEM_TYPE_IPV4;
203                 else if (ip_next_proto == IPPROTO_IPV6)
204                         ret = RTE_FLOW_ITEM_TYPE_IPV6;
205                 else
206                         ret = RTE_FLOW_ITEM_TYPE_END;
207                 break;
208         default:
209                 ret = RTE_FLOW_ITEM_TYPE_VOID;
210                 break;
211         }
212         return ret;
213 }
214
215 #define MLX5_RSS_EXP_ELT_N 8
216
217 /**
218  * Expand RSS flows into several possible flows according to the RSS hash
219  * fields requested and the driver capabilities.
220  *
221  * @param[out] buf
222  *   Buffer to store the result expansion.
223  * @param[in] size
224  *   Buffer size in bytes. If 0, @p buf can be NULL.
225  * @param[in] pattern
226  *   User flow pattern.
227  * @param[in] types
228  *   RSS types to expand (see ETH_RSS_* definitions).
229  * @param[in] graph
230  *   Input graph to expand @p pattern according to @p types.
231  * @param[in] graph_root_index
232  *   Index of root node in @p graph, typically 0.
233  *
234  * @return
235  *   A positive value representing the size of @p buf in bytes regardless of
236  *   @p size on success, a negative errno value otherwise and rte_errno is
237  *   set, the following errors are defined:
238  *
239  *   -E2BIG: graph-depth @p graph is too deep.
240  */
241 static int
242 mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size,
243                      const struct rte_flow_item *pattern, uint64_t types,
244                      const struct mlx5_flow_expand_node graph[],
245                      int graph_root_index)
246 {
247         const struct rte_flow_item *item;
248         const struct mlx5_flow_expand_node *node = &graph[graph_root_index];
249         const int *next_node;
250         const int *stack[MLX5_RSS_EXP_ELT_N];
251         int stack_pos = 0;
252         struct rte_flow_item flow_items[MLX5_RSS_EXP_ELT_N];
253         unsigned int i;
254         size_t lsize;
255         size_t user_pattern_size = 0;
256         void *addr = NULL;
257         const struct mlx5_flow_expand_node *next = NULL;
258         struct rte_flow_item missed_item;
259         int missed = 0;
260         int elt = 0;
261         const struct rte_flow_item *last_item = NULL;
262
263         memset(&missed_item, 0, sizeof(missed_item));
264         lsize = offsetof(struct mlx5_flow_expand_rss, entry) +
265                 MLX5_RSS_EXP_ELT_N * sizeof(buf->entry[0]);
266         if (lsize <= size) {
267                 buf->entry[0].priority = 0;
268                 buf->entry[0].pattern = (void *)&buf->entry[MLX5_RSS_EXP_ELT_N];
269                 buf->entries = 0;
270                 addr = buf->entry[0].pattern;
271         }
272         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
273                 if (item->type != RTE_FLOW_ITEM_TYPE_VOID)
274                         last_item = item;
275                 for (i = 0; node->next && node->next[i]; ++i) {
276                         next = &graph[node->next[i]];
277                         if (next->type == item->type)
278                                 break;
279                 }
280                 if (next)
281                         node = next;
282                 user_pattern_size += sizeof(*item);
283         }
284         user_pattern_size += sizeof(*item); /* Handle END item. */
285         lsize += user_pattern_size;
286         /* Copy the user pattern in the first entry of the buffer. */
287         if (lsize <= size) {
288                 rte_memcpy(addr, pattern, user_pattern_size);
289                 addr = (void *)(((uintptr_t)addr) + user_pattern_size);
290                 buf->entries = 1;
291         }
292         /* Start expanding. */
293         memset(flow_items, 0, sizeof(flow_items));
294         user_pattern_size -= sizeof(*item);
295         /*
296          * Check if the last valid item has spec set, need complete pattern,
297          * and the pattern can be used for expansion.
298          */
299         missed_item.type = mlx5_flow_expand_rss_item_complete(last_item);
300         if (missed_item.type == RTE_FLOW_ITEM_TYPE_END) {
301                 /* Item type END indicates expansion is not required. */
302                 return lsize;
303         }
304         if (missed_item.type != RTE_FLOW_ITEM_TYPE_VOID) {
305                 next = NULL;
306                 missed = 1;
307                 for (i = 0; node->next && node->next[i]; ++i) {
308                         next = &graph[node->next[i]];
309                         if (next->type == missed_item.type) {
310                                 flow_items[0].type = missed_item.type;
311                                 flow_items[1].type = RTE_FLOW_ITEM_TYPE_END;
312                                 break;
313                         }
314                         next = NULL;
315                 }
316         }
317         if (next && missed) {
318                 elt = 2; /* missed item + item end. */
319                 node = next;
320                 lsize += elt * sizeof(*item) + user_pattern_size;
321                 if ((node->rss_types & types) && lsize <= size) {
322                         buf->entry[buf->entries].priority = 1;
323                         buf->entry[buf->entries].pattern = addr;
324                         buf->entries++;
325                         rte_memcpy(addr, buf->entry[0].pattern,
326                                    user_pattern_size);
327                         addr = (void *)(((uintptr_t)addr) + user_pattern_size);
328                         rte_memcpy(addr, flow_items, elt * sizeof(*item));
329                         addr = (void *)(((uintptr_t)addr) +
330                                         elt * sizeof(*item));
331                 }
332         }
333         memset(flow_items, 0, sizeof(flow_items));
334         next_node = node->next;
335         stack[stack_pos] = next_node;
336         node = next_node ? &graph[*next_node] : NULL;
337         while (node) {
338                 flow_items[stack_pos].type = node->type;
339                 if (node->rss_types & types) {
340                         /*
341                          * compute the number of items to copy from the
342                          * expansion and copy it.
343                          * When the stack_pos is 0, there are 1 element in it,
344                          * plus the addition END item.
345                          */
346                         elt = stack_pos + 2;
347                         flow_items[stack_pos + 1].type = RTE_FLOW_ITEM_TYPE_END;
348                         lsize += elt * sizeof(*item) + user_pattern_size;
349                         if (lsize <= size) {
350                                 size_t n = elt * sizeof(*item);
351
352                                 buf->entry[buf->entries].priority =
353                                         stack_pos + 1 + missed;
354                                 buf->entry[buf->entries].pattern = addr;
355                                 buf->entries++;
356                                 rte_memcpy(addr, buf->entry[0].pattern,
357                                            user_pattern_size);
358                                 addr = (void *)(((uintptr_t)addr) +
359                                                 user_pattern_size);
360                                 rte_memcpy(addr, &missed_item,
361                                            missed * sizeof(*item));
362                                 addr = (void *)(((uintptr_t)addr) +
363                                         missed * sizeof(*item));
364                                 rte_memcpy(addr, flow_items, n);
365                                 addr = (void *)(((uintptr_t)addr) + n);
366                         }
367                 }
368                 /* Go deeper. */
369                 if (node->next) {
370                         next_node = node->next;
371                         if (stack_pos++ == MLX5_RSS_EXP_ELT_N) {
372                                 rte_errno = E2BIG;
373                                 return -rte_errno;
374                         }
375                         stack[stack_pos] = next_node;
376                 } else if (*(next_node + 1)) {
377                         /* Follow up with the next possibility. */
378                         ++next_node;
379                 } else {
380                         /* Move to the next path. */
381                         if (stack_pos)
382                                 next_node = stack[--stack_pos];
383                         next_node++;
384                         stack[stack_pos] = next_node;
385                 }
386                 node = *next_node ? &graph[*next_node] : NULL;
387         };
388         return lsize;
389 }
390
391 enum mlx5_expansion {
392         MLX5_EXPANSION_ROOT,
393         MLX5_EXPANSION_ROOT_OUTER,
394         MLX5_EXPANSION_ROOT_ETH_VLAN,
395         MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN,
396         MLX5_EXPANSION_OUTER_ETH,
397         MLX5_EXPANSION_OUTER_ETH_VLAN,
398         MLX5_EXPANSION_OUTER_VLAN,
399         MLX5_EXPANSION_OUTER_IPV4,
400         MLX5_EXPANSION_OUTER_IPV4_UDP,
401         MLX5_EXPANSION_OUTER_IPV4_TCP,
402         MLX5_EXPANSION_OUTER_IPV6,
403         MLX5_EXPANSION_OUTER_IPV6_UDP,
404         MLX5_EXPANSION_OUTER_IPV6_TCP,
405         MLX5_EXPANSION_VXLAN,
406         MLX5_EXPANSION_VXLAN_GPE,
407         MLX5_EXPANSION_GRE,
408         MLX5_EXPANSION_MPLS,
409         MLX5_EXPANSION_ETH,
410         MLX5_EXPANSION_ETH_VLAN,
411         MLX5_EXPANSION_VLAN,
412         MLX5_EXPANSION_IPV4,
413         MLX5_EXPANSION_IPV4_UDP,
414         MLX5_EXPANSION_IPV4_TCP,
415         MLX5_EXPANSION_IPV6,
416         MLX5_EXPANSION_IPV6_UDP,
417         MLX5_EXPANSION_IPV6_TCP,
418 };
419
420 /** Supported expansion of items. */
421 static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
422         [MLX5_EXPANSION_ROOT] = {
423                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
424                                                   MLX5_EXPANSION_IPV4,
425                                                   MLX5_EXPANSION_IPV6),
426                 .type = RTE_FLOW_ITEM_TYPE_END,
427         },
428         [MLX5_EXPANSION_ROOT_OUTER] = {
429                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH,
430                                                   MLX5_EXPANSION_OUTER_IPV4,
431                                                   MLX5_EXPANSION_OUTER_IPV6),
432                 .type = RTE_FLOW_ITEM_TYPE_END,
433         },
434         [MLX5_EXPANSION_ROOT_ETH_VLAN] = {
435                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH_VLAN),
436                 .type = RTE_FLOW_ITEM_TYPE_END,
437         },
438         [MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN] = {
439                 .next = MLX5_FLOW_EXPAND_RSS_NEXT
440                                                 (MLX5_EXPANSION_OUTER_ETH_VLAN),
441                 .type = RTE_FLOW_ITEM_TYPE_END,
442         },
443         [MLX5_EXPANSION_OUTER_ETH] = {
444                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
445                                                   MLX5_EXPANSION_OUTER_IPV6,
446                                                   MLX5_EXPANSION_MPLS),
447                 .type = RTE_FLOW_ITEM_TYPE_ETH,
448                 .rss_types = 0,
449         },
450         [MLX5_EXPANSION_OUTER_ETH_VLAN] = {
451                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_VLAN),
452                 .type = RTE_FLOW_ITEM_TYPE_ETH,
453                 .rss_types = 0,
454         },
455         [MLX5_EXPANSION_OUTER_VLAN] = {
456                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
457                                                   MLX5_EXPANSION_OUTER_IPV6),
458                 .type = RTE_FLOW_ITEM_TYPE_VLAN,
459         },
460         [MLX5_EXPANSION_OUTER_IPV4] = {
461                 .next = MLX5_FLOW_EXPAND_RSS_NEXT
462                         (MLX5_EXPANSION_OUTER_IPV4_UDP,
463                          MLX5_EXPANSION_OUTER_IPV4_TCP,
464                          MLX5_EXPANSION_GRE,
465                          MLX5_EXPANSION_IPV4,
466                          MLX5_EXPANSION_IPV6),
467                 .type = RTE_FLOW_ITEM_TYPE_IPV4,
468                 .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
469                         ETH_RSS_NONFRAG_IPV4_OTHER,
470         },
471         [MLX5_EXPANSION_OUTER_IPV4_UDP] = {
472                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
473                                                   MLX5_EXPANSION_VXLAN_GPE),
474                 .type = RTE_FLOW_ITEM_TYPE_UDP,
475                 .rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
476         },
477         [MLX5_EXPANSION_OUTER_IPV4_TCP] = {
478                 .type = RTE_FLOW_ITEM_TYPE_TCP,
479                 .rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
480         },
481         [MLX5_EXPANSION_OUTER_IPV6] = {
482                 .next = MLX5_FLOW_EXPAND_RSS_NEXT
483                         (MLX5_EXPANSION_OUTER_IPV6_UDP,
484                          MLX5_EXPANSION_OUTER_IPV6_TCP,
485                          MLX5_EXPANSION_IPV4,
486                          MLX5_EXPANSION_IPV6,
487                          MLX5_EXPANSION_GRE),
488                 .type = RTE_FLOW_ITEM_TYPE_IPV6,
489                 .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
490                         ETH_RSS_NONFRAG_IPV6_OTHER,
491         },
492         [MLX5_EXPANSION_OUTER_IPV6_UDP] = {
493                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
494                                                   MLX5_EXPANSION_VXLAN_GPE),
495                 .type = RTE_FLOW_ITEM_TYPE_UDP,
496                 .rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
497         },
498         [MLX5_EXPANSION_OUTER_IPV6_TCP] = {
499                 .type = RTE_FLOW_ITEM_TYPE_TCP,
500                 .rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
501         },
502         [MLX5_EXPANSION_VXLAN] = {
503                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
504                                                   MLX5_EXPANSION_IPV4,
505                                                   MLX5_EXPANSION_IPV6),
506                 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
507         },
508         [MLX5_EXPANSION_VXLAN_GPE] = {
509                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
510                                                   MLX5_EXPANSION_IPV4,
511                                                   MLX5_EXPANSION_IPV6),
512                 .type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
513         },
514         [MLX5_EXPANSION_GRE] = {
515                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
516                                                   MLX5_EXPANSION_IPV6),
517                 .type = RTE_FLOW_ITEM_TYPE_GRE,
518         },
519         [MLX5_EXPANSION_MPLS] = {
520                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
521                                                   MLX5_EXPANSION_IPV6),
522                 .type = RTE_FLOW_ITEM_TYPE_MPLS,
523         },
524         [MLX5_EXPANSION_ETH] = {
525                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
526                                                   MLX5_EXPANSION_IPV6),
527                 .type = RTE_FLOW_ITEM_TYPE_ETH,
528         },
529         [MLX5_EXPANSION_ETH_VLAN] = {
530                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VLAN),
531                 .type = RTE_FLOW_ITEM_TYPE_ETH,
532         },
533         [MLX5_EXPANSION_VLAN] = {
534                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
535                                                   MLX5_EXPANSION_IPV6),
536                 .type = RTE_FLOW_ITEM_TYPE_VLAN,
537         },
538         [MLX5_EXPANSION_IPV4] = {
539                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP,
540                                                   MLX5_EXPANSION_IPV4_TCP),
541                 .type = RTE_FLOW_ITEM_TYPE_IPV4,
542                 .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
543                         ETH_RSS_NONFRAG_IPV4_OTHER,
544         },
545         [MLX5_EXPANSION_IPV4_UDP] = {
546                 .type = RTE_FLOW_ITEM_TYPE_UDP,
547                 .rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
548         },
549         [MLX5_EXPANSION_IPV4_TCP] = {
550                 .type = RTE_FLOW_ITEM_TYPE_TCP,
551                 .rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
552         },
553         [MLX5_EXPANSION_IPV6] = {
554                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP,
555                                                   MLX5_EXPANSION_IPV6_TCP),
556                 .type = RTE_FLOW_ITEM_TYPE_IPV6,
557                 .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
558                         ETH_RSS_NONFRAG_IPV6_OTHER,
559         },
560         [MLX5_EXPANSION_IPV6_UDP] = {
561                 .type = RTE_FLOW_ITEM_TYPE_UDP,
562                 .rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
563         },
564         [MLX5_EXPANSION_IPV6_TCP] = {
565                 .type = RTE_FLOW_ITEM_TYPE_TCP,
566                 .rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
567         },
568 };
569
570 static struct rte_flow_action_handle *
571 mlx5_action_handle_create(struct rte_eth_dev *dev,
572                           const struct rte_flow_indir_action_conf *conf,
573                           const struct rte_flow_action *action,
574                           struct rte_flow_error *error);
575 static int mlx5_action_handle_destroy
576                                 (struct rte_eth_dev *dev,
577                                  struct rte_flow_action_handle *handle,
578                                  struct rte_flow_error *error);
579 static int mlx5_action_handle_update
580                                 (struct rte_eth_dev *dev,
581                                  struct rte_flow_action_handle *handle,
582                                  const void *update,
583                                  struct rte_flow_error *error);
584 static int mlx5_action_handle_query
585                                 (struct rte_eth_dev *dev,
586                                  const struct rte_flow_action_handle *handle,
587                                  void *data,
588                                  struct rte_flow_error *error);
589 static int
590 mlx5_flow_tunnel_decap_set(struct rte_eth_dev *dev,
591                     struct rte_flow_tunnel *app_tunnel,
592                     struct rte_flow_action **actions,
593                     uint32_t *num_of_actions,
594                     struct rte_flow_error *error);
595 static int
596 mlx5_flow_tunnel_match(struct rte_eth_dev *dev,
597                        struct rte_flow_tunnel *app_tunnel,
598                        struct rte_flow_item **items,
599                        uint32_t *num_of_items,
600                        struct rte_flow_error *error);
601 static int
602 mlx5_flow_tunnel_item_release(struct rte_eth_dev *dev,
603                               struct rte_flow_item *pmd_items,
604                               uint32_t num_items, struct rte_flow_error *err);
605 static int
606 mlx5_flow_tunnel_action_release(struct rte_eth_dev *dev,
607                                 struct rte_flow_action *pmd_actions,
608                                 uint32_t num_actions,
609                                 struct rte_flow_error *err);
610 static int
611 mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev,
612                                   struct rte_mbuf *m,
613                                   struct rte_flow_restore_info *info,
614                                   struct rte_flow_error *err);
615
616 static const struct rte_flow_ops mlx5_flow_ops = {
617         .validate = mlx5_flow_validate,
618         .create = mlx5_flow_create,
619         .destroy = mlx5_flow_destroy,
620         .flush = mlx5_flow_flush,
621         .isolate = mlx5_flow_isolate,
622         .query = mlx5_flow_query,
623         .dev_dump = mlx5_flow_dev_dump,
624         .get_aged_flows = mlx5_flow_get_aged_flows,
625         .action_handle_create = mlx5_action_handle_create,
626         .action_handle_destroy = mlx5_action_handle_destroy,
627         .action_handle_update = mlx5_action_handle_update,
628         .action_handle_query = mlx5_action_handle_query,
629         .tunnel_decap_set = mlx5_flow_tunnel_decap_set,
630         .tunnel_match = mlx5_flow_tunnel_match,
631         .tunnel_action_decap_release = mlx5_flow_tunnel_action_release,
632         .tunnel_item_release = mlx5_flow_tunnel_item_release,
633         .get_restore_info = mlx5_flow_tunnel_get_restore_info,
634 };
635
636 /* Tunnel information. */
637 struct mlx5_flow_tunnel_info {
638         uint64_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */
639         uint32_t ptype; /**< Tunnel Ptype (see RTE_PTYPE_*). */
640 };
641
642 static struct mlx5_flow_tunnel_info tunnels_info[] = {
643         {
644                 .tunnel = MLX5_FLOW_LAYER_VXLAN,
645                 .ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP,
646         },
647         {
648                 .tunnel = MLX5_FLOW_LAYER_GENEVE,
649                 .ptype = RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L4_UDP,
650         },
651         {
652                 .tunnel = MLX5_FLOW_LAYER_VXLAN_GPE,
653                 .ptype = RTE_PTYPE_TUNNEL_VXLAN_GPE | RTE_PTYPE_L4_UDP,
654         },
655         {
656                 .tunnel = MLX5_FLOW_LAYER_GRE,
657                 .ptype = RTE_PTYPE_TUNNEL_GRE,
658         },
659         {
660                 .tunnel = MLX5_FLOW_LAYER_MPLS | MLX5_FLOW_LAYER_OUTER_L4_UDP,
661                 .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_UDP | RTE_PTYPE_L4_UDP,
662         },
663         {
664                 .tunnel = MLX5_FLOW_LAYER_MPLS,
665                 .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE,
666         },
667         {
668                 .tunnel = MLX5_FLOW_LAYER_NVGRE,
669                 .ptype = RTE_PTYPE_TUNNEL_NVGRE,
670         },
671         {
672                 .tunnel = MLX5_FLOW_LAYER_IPIP,
673                 .ptype = RTE_PTYPE_TUNNEL_IP,
674         },
675         {
676                 .tunnel = MLX5_FLOW_LAYER_IPV6_ENCAP,
677                 .ptype = RTE_PTYPE_TUNNEL_IP,
678         },
679         {
680                 .tunnel = MLX5_FLOW_LAYER_GTP,
681                 .ptype = RTE_PTYPE_TUNNEL_GTPU,
682         },
683 };
684
685
686
687 /**
688  * Translate tag ID to register.
689  *
690  * @param[in] dev
691  *   Pointer to the Ethernet device structure.
692  * @param[in] feature
693  *   The feature that request the register.
694  * @param[in] id
695  *   The request register ID.
696  * @param[out] error
697  *   Error description in case of any.
698  *
699  * @return
700  *   The request register on success, a negative errno
701  *   value otherwise and rte_errno is set.
702  */
703 int
704 mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
705                      enum mlx5_feature_name feature,
706                      uint32_t id,
707                      struct rte_flow_error *error)
708 {
709         struct mlx5_priv *priv = dev->data->dev_private;
710         struct mlx5_dev_config *config = &priv->config;
711         enum modify_reg start_reg;
712         bool skip_mtr_reg = false;
713
714         switch (feature) {
715         case MLX5_HAIRPIN_RX:
716                 return REG_B;
717         case MLX5_HAIRPIN_TX:
718                 return REG_A;
719         case MLX5_METADATA_RX:
720                 switch (config->dv_xmeta_en) {
721                 case MLX5_XMETA_MODE_LEGACY:
722                         return REG_B;
723                 case MLX5_XMETA_MODE_META16:
724                         return REG_C_0;
725                 case MLX5_XMETA_MODE_META32:
726                         return REG_C_1;
727                 }
728                 break;
729         case MLX5_METADATA_TX:
730                 return REG_A;
731         case MLX5_METADATA_FDB:
732                 switch (config->dv_xmeta_en) {
733                 case MLX5_XMETA_MODE_LEGACY:
734                         return REG_NON;
735                 case MLX5_XMETA_MODE_META16:
736                         return REG_C_0;
737                 case MLX5_XMETA_MODE_META32:
738                         return REG_C_1;
739                 }
740                 break;
741         case MLX5_FLOW_MARK:
742                 switch (config->dv_xmeta_en) {
743                 case MLX5_XMETA_MODE_LEGACY:
744                         return REG_NON;
745                 case MLX5_XMETA_MODE_META16:
746                         return REG_C_1;
747                 case MLX5_XMETA_MODE_META32:
748                         return REG_C_0;
749                 }
750                 break;
751         case MLX5_MTR_ID:
752                 /*
753                  * If meter color and meter id share one register, flow match
754                  * should use the meter color register for match.
755                  */
756                 if (priv->mtr_reg_share)
757                         return priv->mtr_color_reg;
758                 else
759                         return priv->mtr_color_reg != REG_C_2 ? REG_C_2 :
760                                REG_C_3;
761         case MLX5_MTR_COLOR:
762         case MLX5_ASO_FLOW_HIT: /* Both features use the same REG_C. */
763                 MLX5_ASSERT(priv->mtr_color_reg != REG_NON);
764                 return priv->mtr_color_reg;
765         case MLX5_COPY_MARK:
766                 /*
767                  * Metadata COPY_MARK register using is in meter suffix sub
768                  * flow while with meter. It's safe to share the same register.
769                  */
770                 return priv->mtr_color_reg != REG_C_2 ? REG_C_2 : REG_C_3;
771         case MLX5_APP_TAG:
772                 /*
773                  * If meter is enable, it will engage the register for color
774                  * match and flow match. If meter color match is not using the
775                  * REG_C_2, need to skip the REG_C_x be used by meter color
776                  * match.
777                  * If meter is disable, free to use all available registers.
778                  */
779                 start_reg = priv->mtr_color_reg != REG_C_2 ? REG_C_2 :
780                             (priv->mtr_reg_share ? REG_C_3 : REG_C_4);
781                 skip_mtr_reg = !!(priv->mtr_en && start_reg == REG_C_2);
782                 if (id > (uint32_t)(REG_C_7 - start_reg))
783                         return rte_flow_error_set(error, EINVAL,
784                                                   RTE_FLOW_ERROR_TYPE_ITEM,
785                                                   NULL, "invalid tag id");
786                 if (config->flow_mreg_c[id + start_reg - REG_C_0] == REG_NON)
787                         return rte_flow_error_set(error, ENOTSUP,
788                                                   RTE_FLOW_ERROR_TYPE_ITEM,
789                                                   NULL, "unsupported tag id");
790                 /*
791                  * This case means meter is using the REG_C_x great than 2.
792                  * Take care not to conflict with meter color REG_C_x.
793                  * If the available index REG_C_y >= REG_C_x, skip the
794                  * color register.
795                  */
796                 if (skip_mtr_reg && config->flow_mreg_c
797                     [id + start_reg - REG_C_0] >= priv->mtr_color_reg) {
798                         if (id >= (uint32_t)(REG_C_7 - start_reg))
799                                 return rte_flow_error_set(error, EINVAL,
800                                                        RTE_FLOW_ERROR_TYPE_ITEM,
801                                                         NULL, "invalid tag id");
802                         if (config->flow_mreg_c
803                             [id + 1 + start_reg - REG_C_0] != REG_NON)
804                                 return config->flow_mreg_c
805                                                [id + 1 + start_reg - REG_C_0];
806                         return rte_flow_error_set(error, ENOTSUP,
807                                                   RTE_FLOW_ERROR_TYPE_ITEM,
808                                                   NULL, "unsupported tag id");
809                 }
810                 return config->flow_mreg_c[id + start_reg - REG_C_0];
811         }
812         MLX5_ASSERT(false);
813         return rte_flow_error_set(error, EINVAL,
814                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
815                                   NULL, "invalid feature name");
816 }
817
818 /**
819  * Check extensive flow metadata register support.
820  *
821  * @param dev
822  *   Pointer to rte_eth_dev structure.
823  *
824  * @return
825  *   True if device supports extensive flow metadata register, otherwise false.
826  */
827 bool
828 mlx5_flow_ext_mreg_supported(struct rte_eth_dev *dev)
829 {
830         struct mlx5_priv *priv = dev->data->dev_private;
831         struct mlx5_dev_config *config = &priv->config;
832
833         /*
834          * Having available reg_c can be regarded inclusively as supporting
835          * extensive flow metadata register, which could mean,
836          * - metadata register copy action by modify header.
837          * - 16 modify header actions is supported.
838          * - reg_c's are preserved across different domain (FDB and NIC) on
839          *   packet loopback by flow lookup miss.
840          */
841         return config->flow_mreg_c[2] != REG_NON;
842 }
843
844 /**
845  * Get the lowest priority.
846  *
847  * @param[in] dev
848  *   Pointer to the Ethernet device structure.
849  * @param[in] attributes
850  *   Pointer to device flow rule attributes.
851  *
852  * @return
853  *   The value of lowest priority of flow.
854  */
855 uint32_t
856 mlx5_get_lowest_priority(struct rte_eth_dev *dev,
857                           const struct rte_flow_attr *attr)
858 {
859         struct mlx5_priv *priv = dev->data->dev_private;
860
861         if (!attr->group && !attr->transfer)
862                 return priv->config.flow_prio - 2;
863         return MLX5_NON_ROOT_FLOW_MAX_PRIO - 1;
864 }
865
866 /**
867  * Calculate matcher priority of the flow.
868  *
869  * @param[in] dev
870  *   Pointer to the Ethernet device structure.
871  * @param[in] attr
872  *   Pointer to device flow rule attributes.
873  * @param[in] subpriority
874  *   The priority based on the items.
875  * @return
876  *   The matcher priority of the flow.
877  */
878 uint16_t
879 mlx5_get_matcher_priority(struct rte_eth_dev *dev,
880                           const struct rte_flow_attr *attr,
881                           uint32_t subpriority)
882 {
883         uint16_t priority = (uint16_t)attr->priority;
884         struct mlx5_priv *priv = dev->data->dev_private;
885
886         if (!attr->group && !attr->transfer) {
887                 if (attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)
888                         priority = priv->config.flow_prio - 1;
889                 return mlx5_os_flow_adjust_priority(dev, priority, subpriority);
890         }
891         if (attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)
892                 priority = MLX5_NON_ROOT_FLOW_MAX_PRIO;
893         return priority * 3 + subpriority;
894 }
895
896 /**
897  * Verify the @p item specifications (spec, last, mask) are compatible with the
898  * NIC capabilities.
899  *
900  * @param[in] item
901  *   Item specification.
902  * @param[in] mask
903  *   @p item->mask or flow default bit-masks.
904  * @param[in] nic_mask
905  *   Bit-masks covering supported fields by the NIC to compare with user mask.
906  * @param[in] size
907  *   Bit-masks size in bytes.
908  * @param[in] range_accepted
909  *   True if range of values is accepted for specific fields, false otherwise.
910  * @param[out] error
911  *   Pointer to error structure.
912  *
913  * @return
914  *   0 on success, a negative errno value otherwise and rte_errno is set.
915  */
916 int
917 mlx5_flow_item_acceptable(const struct rte_flow_item *item,
918                           const uint8_t *mask,
919                           const uint8_t *nic_mask,
920                           unsigned int size,
921                           bool range_accepted,
922                           struct rte_flow_error *error)
923 {
924         unsigned int i;
925
926         MLX5_ASSERT(nic_mask);
927         for (i = 0; i < size; ++i)
928                 if ((nic_mask[i] | mask[i]) != nic_mask[i])
929                         return rte_flow_error_set(error, ENOTSUP,
930                                                   RTE_FLOW_ERROR_TYPE_ITEM,
931                                                   item,
932                                                   "mask enables non supported"
933                                                   " bits");
934         if (!item->spec && (item->mask || item->last))
935                 return rte_flow_error_set(error, EINVAL,
936                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
937                                           "mask/last without a spec is not"
938                                           " supported");
939         if (item->spec && item->last && !range_accepted) {
940                 uint8_t spec[size];
941                 uint8_t last[size];
942                 unsigned int i;
943                 int ret;
944
945                 for (i = 0; i < size; ++i) {
946                         spec[i] = ((const uint8_t *)item->spec)[i] & mask[i];
947                         last[i] = ((const uint8_t *)item->last)[i] & mask[i];
948                 }
949                 ret = memcmp(spec, last, size);
950                 if (ret != 0)
951                         return rte_flow_error_set(error, EINVAL,
952                                                   RTE_FLOW_ERROR_TYPE_ITEM,
953                                                   item,
954                                                   "range is not valid");
955         }
956         return 0;
957 }
958
959 /**
960  * Adjust the hash fields according to the @p flow information.
961  *
962  * @param[in] dev_flow.
963  *   Pointer to the mlx5_flow.
964  * @param[in] tunnel
965  *   1 when the hash field is for a tunnel item.
966  * @param[in] layer_types
967  *   ETH_RSS_* types.
968  * @param[in] hash_fields
969  *   Item hash fields.
970  *
971  * @return
972  *   The hash fields that should be used.
973  */
974 uint64_t
975 mlx5_flow_hashfields_adjust(struct mlx5_flow_rss_desc *rss_desc,
976                             int tunnel __rte_unused, uint64_t layer_types,
977                             uint64_t hash_fields)
978 {
979 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
980         int rss_request_inner = rss_desc->level >= 2;
981
982         /* Check RSS hash level for tunnel. */
983         if (tunnel && rss_request_inner)
984                 hash_fields |= IBV_RX_HASH_INNER;
985         else if (tunnel || rss_request_inner)
986                 return 0;
987 #endif
988         /* Check if requested layer matches RSS hash fields. */
989         if (!(rss_desc->types & layer_types))
990                 return 0;
991         return hash_fields;
992 }
993
994 /**
995  * Lookup and set the ptype in the data Rx part.  A single Ptype can be used,
996  * if several tunnel rules are used on this queue, the tunnel ptype will be
997  * cleared.
998  *
999  * @param rxq_ctrl
1000  *   Rx queue to update.
1001  */
1002 static void
1003 flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl)
1004 {
1005         unsigned int i;
1006         uint32_t tunnel_ptype = 0;
1007
1008         /* Look up for the ptype to use. */
1009         for (i = 0; i != MLX5_FLOW_TUNNEL; ++i) {
1010                 if (!rxq_ctrl->flow_tunnels_n[i])
1011                         continue;
1012                 if (!tunnel_ptype) {
1013                         tunnel_ptype = tunnels_info[i].ptype;
1014                 } else {
1015                         tunnel_ptype = 0;
1016                         break;
1017                 }
1018         }
1019         rxq_ctrl->rxq.tunnel = tunnel_ptype;
1020 }
1021
1022 /**
1023  * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the devive
1024  * flow.
1025  *
1026  * @param[in] dev
1027  *   Pointer to the Ethernet device structure.
1028  * @param[in] dev_handle
1029  *   Pointer to device flow handle structure.
1030  */
1031 static void
1032 flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
1033                        struct mlx5_flow_handle *dev_handle)
1034 {
1035         struct mlx5_priv *priv = dev->data->dev_private;
1036         const int mark = dev_handle->mark;
1037         const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
1038         struct mlx5_ind_table_obj *ind_tbl = NULL;
1039         unsigned int i;
1040
1041         if (dev_handle->fate_action == MLX5_FLOW_FATE_QUEUE) {
1042                 struct mlx5_hrxq *hrxq;
1043
1044                 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
1045                               dev_handle->rix_hrxq);
1046                 if (hrxq)
1047                         ind_tbl = hrxq->ind_table;
1048         } else if (dev_handle->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
1049                 struct mlx5_shared_action_rss *shared_rss;
1050
1051                 shared_rss = mlx5_ipool_get
1052                         (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
1053                          dev_handle->rix_srss);
1054                 if (shared_rss)
1055                         ind_tbl = shared_rss->ind_tbl;
1056         }
1057         if (!ind_tbl)
1058                 return;
1059         for (i = 0; i != ind_tbl->queues_n; ++i) {
1060                 int idx = ind_tbl->queues[i];
1061                 struct mlx5_rxq_ctrl *rxq_ctrl =
1062                         container_of((*priv->rxqs)[idx],
1063                                      struct mlx5_rxq_ctrl, rxq);
1064
1065                 /*
1066                  * To support metadata register copy on Tx loopback,
1067                  * this must be always enabled (metadata may arive
1068                  * from other port - not from local flows only.
1069                  */
1070                 if (priv->config.dv_flow_en &&
1071                     priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
1072                     mlx5_flow_ext_mreg_supported(dev)) {
1073                         rxq_ctrl->rxq.mark = 1;
1074                         rxq_ctrl->flow_mark_n = 1;
1075                 } else if (mark) {
1076                         rxq_ctrl->rxq.mark = 1;
1077                         rxq_ctrl->flow_mark_n++;
1078                 }
1079                 if (tunnel) {
1080                         unsigned int j;
1081
1082                         /* Increase the counter matching the flow. */
1083                         for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
1084                                 if ((tunnels_info[j].tunnel &
1085                                      dev_handle->layers) ==
1086                                     tunnels_info[j].tunnel) {
1087                                         rxq_ctrl->flow_tunnels_n[j]++;
1088                                         break;
1089                                 }
1090                         }
1091                         flow_rxq_tunnel_ptype_update(rxq_ctrl);
1092                 }
1093         }
1094 }
1095
1096 /**
1097  * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) for a flow
1098  *
1099  * @param[in] dev
1100  *   Pointer to the Ethernet device structure.
1101  * @param[in] flow
1102  *   Pointer to flow structure.
1103  */
1104 static void
1105 flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
1106 {
1107         struct mlx5_priv *priv = dev->data->dev_private;
1108         uint32_t handle_idx;
1109         struct mlx5_flow_handle *dev_handle;
1110
1111         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
1112                        handle_idx, dev_handle, next)
1113                 flow_drv_rxq_flags_set(dev, dev_handle);
1114 }
1115
1116 /**
1117  * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the
1118  * device flow if no other flow uses it with the same kind of request.
1119  *
1120  * @param dev
1121  *   Pointer to Ethernet device.
1122  * @param[in] dev_handle
1123  *   Pointer to the device flow handle structure.
1124  */
1125 static void
1126 flow_drv_rxq_flags_trim(struct rte_eth_dev *dev,
1127                         struct mlx5_flow_handle *dev_handle)
1128 {
1129         struct mlx5_priv *priv = dev->data->dev_private;
1130         const int mark = dev_handle->mark;
1131         const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
1132         struct mlx5_ind_table_obj *ind_tbl = NULL;
1133         unsigned int i;
1134
1135         if (dev_handle->fate_action == MLX5_FLOW_FATE_QUEUE) {
1136                 struct mlx5_hrxq *hrxq;
1137
1138                 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
1139                               dev_handle->rix_hrxq);
1140                 if (hrxq)
1141                         ind_tbl = hrxq->ind_table;
1142         } else if (dev_handle->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
1143                 struct mlx5_shared_action_rss *shared_rss;
1144
1145                 shared_rss = mlx5_ipool_get
1146                         (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
1147                          dev_handle->rix_srss);
1148                 if (shared_rss)
1149                         ind_tbl = shared_rss->ind_tbl;
1150         }
1151         if (!ind_tbl)
1152                 return;
1153         MLX5_ASSERT(dev->data->dev_started);
1154         for (i = 0; i != ind_tbl->queues_n; ++i) {
1155                 int idx = ind_tbl->queues[i];
1156                 struct mlx5_rxq_ctrl *rxq_ctrl =
1157                         container_of((*priv->rxqs)[idx],
1158                                      struct mlx5_rxq_ctrl, rxq);
1159
1160                 if (priv->config.dv_flow_en &&
1161                     priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
1162                     mlx5_flow_ext_mreg_supported(dev)) {
1163                         rxq_ctrl->rxq.mark = 1;
1164                         rxq_ctrl->flow_mark_n = 1;
1165                 } else if (mark) {
1166                         rxq_ctrl->flow_mark_n--;
1167                         rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n;
1168                 }
1169                 if (tunnel) {
1170                         unsigned int j;
1171
1172                         /* Decrease the counter matching the flow. */
1173                         for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
1174                                 if ((tunnels_info[j].tunnel &
1175                                      dev_handle->layers) ==
1176                                     tunnels_info[j].tunnel) {
1177                                         rxq_ctrl->flow_tunnels_n[j]--;
1178                                         break;
1179                                 }
1180                         }
1181                         flow_rxq_tunnel_ptype_update(rxq_ctrl);
1182                 }
1183         }
1184 }
1185
1186 /**
1187  * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the
1188  * @p flow if no other flow uses it with the same kind of request.
1189  *
1190  * @param dev
1191  *   Pointer to Ethernet device.
1192  * @param[in] flow
1193  *   Pointer to the flow.
1194  */
1195 static void
1196 flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
1197 {
1198         struct mlx5_priv *priv = dev->data->dev_private;
1199         uint32_t handle_idx;
1200         struct mlx5_flow_handle *dev_handle;
1201
1202         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
1203                        handle_idx, dev_handle, next)
1204                 flow_drv_rxq_flags_trim(dev, dev_handle);
1205 }
1206
1207 /**
1208  * Clear the Mark/Flag and Tunnel ptype information in all Rx queues.
1209  *
1210  * @param dev
1211  *   Pointer to Ethernet device.
1212  */
1213 static void
1214 flow_rxq_flags_clear(struct rte_eth_dev *dev)
1215 {
1216         struct mlx5_priv *priv = dev->data->dev_private;
1217         unsigned int i;
1218
1219         for (i = 0; i != priv->rxqs_n; ++i) {
1220                 struct mlx5_rxq_ctrl *rxq_ctrl;
1221                 unsigned int j;
1222
1223                 if (!(*priv->rxqs)[i])
1224                         continue;
1225                 rxq_ctrl = container_of((*priv->rxqs)[i],
1226                                         struct mlx5_rxq_ctrl, rxq);
1227                 rxq_ctrl->flow_mark_n = 0;
1228                 rxq_ctrl->rxq.mark = 0;
1229                 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j)
1230                         rxq_ctrl->flow_tunnels_n[j] = 0;
1231                 rxq_ctrl->rxq.tunnel = 0;
1232         }
1233 }
1234
1235 /**
1236  * Set the Rx queue dynamic metadata (mask and offset) for a flow
1237  *
1238  * @param[in] dev
1239  *   Pointer to the Ethernet device structure.
1240  */
1241 void
1242 mlx5_flow_rxq_dynf_metadata_set(struct rte_eth_dev *dev)
1243 {
1244         struct mlx5_priv *priv = dev->data->dev_private;
1245         struct mlx5_rxq_data *data;
1246         unsigned int i;
1247
1248         for (i = 0; i != priv->rxqs_n; ++i) {
1249                 if (!(*priv->rxqs)[i])
1250                         continue;
1251                 data = (*priv->rxqs)[i];
1252                 if (!rte_flow_dynf_metadata_avail()) {
1253                         data->dynf_meta = 0;
1254                         data->flow_meta_mask = 0;
1255                         data->flow_meta_offset = -1;
1256                         data->flow_meta_port_mask = 0;
1257                 } else {
1258                         data->dynf_meta = 1;
1259                         data->flow_meta_mask = rte_flow_dynf_metadata_mask;
1260                         data->flow_meta_offset = rte_flow_dynf_metadata_offs;
1261                         data->flow_meta_port_mask = (uint32_t)~0;
1262                         if (priv->config.dv_xmeta_en == MLX5_XMETA_MODE_META16)
1263                                 data->flow_meta_port_mask >>= 16;
1264                 }
1265         }
1266 }
1267
1268 /*
1269  * return a pointer to the desired action in the list of actions.
1270  *
1271  * @param[in] actions
1272  *   The list of actions to search the action in.
1273  * @param[in] action
1274  *   The action to find.
1275  *
1276  * @return
1277  *   Pointer to the action in the list, if found. NULL otherwise.
1278  */
1279 const struct rte_flow_action *
1280 mlx5_flow_find_action(const struct rte_flow_action *actions,
1281                       enum rte_flow_action_type action)
1282 {
1283         if (actions == NULL)
1284                 return NULL;
1285         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++)
1286                 if (actions->type == action)
1287                         return actions;
1288         return NULL;
1289 }
1290
1291 /*
1292  * Validate the flag action.
1293  *
1294  * @param[in] action_flags
1295  *   Bit-fields that holds the actions detected until now.
1296  * @param[in] attr
1297  *   Attributes of flow that includes this action.
1298  * @param[out] error
1299  *   Pointer to error structure.
1300  *
1301  * @return
1302  *   0 on success, a negative errno value otherwise and rte_errno is set.
1303  */
1304 int
1305 mlx5_flow_validate_action_flag(uint64_t action_flags,
1306                                const struct rte_flow_attr *attr,
1307                                struct rte_flow_error *error)
1308 {
1309         if (action_flags & MLX5_FLOW_ACTION_MARK)
1310                 return rte_flow_error_set(error, EINVAL,
1311                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1312                                           "can't mark and flag in same flow");
1313         if (action_flags & MLX5_FLOW_ACTION_FLAG)
1314                 return rte_flow_error_set(error, EINVAL,
1315                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1316                                           "can't have 2 flag"
1317                                           " actions in same flow");
1318         if (attr->egress)
1319                 return rte_flow_error_set(error, ENOTSUP,
1320                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1321                                           "flag action not supported for "
1322                                           "egress");
1323         return 0;
1324 }
1325
1326 /*
1327  * Validate the mark action.
1328  *
1329  * @param[in] action
1330  *   Pointer to the queue action.
1331  * @param[in] action_flags
1332  *   Bit-fields that holds the actions detected until now.
1333  * @param[in] attr
1334  *   Attributes of flow that includes this action.
1335  * @param[out] error
1336  *   Pointer to error structure.
1337  *
1338  * @return
1339  *   0 on success, a negative errno value otherwise and rte_errno is set.
1340  */
1341 int
1342 mlx5_flow_validate_action_mark(const struct rte_flow_action *action,
1343                                uint64_t action_flags,
1344                                const struct rte_flow_attr *attr,
1345                                struct rte_flow_error *error)
1346 {
1347         const struct rte_flow_action_mark *mark = action->conf;
1348
1349         if (!mark)
1350                 return rte_flow_error_set(error, EINVAL,
1351                                           RTE_FLOW_ERROR_TYPE_ACTION,
1352                                           action,
1353                                           "configuration cannot be null");
1354         if (mark->id >= MLX5_FLOW_MARK_MAX)
1355                 return rte_flow_error_set(error, EINVAL,
1356                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1357                                           &mark->id,
1358                                           "mark id must in 0 <= id < "
1359                                           RTE_STR(MLX5_FLOW_MARK_MAX));
1360         if (action_flags & MLX5_FLOW_ACTION_FLAG)
1361                 return rte_flow_error_set(error, EINVAL,
1362                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1363                                           "can't flag and mark in same flow");
1364         if (action_flags & MLX5_FLOW_ACTION_MARK)
1365                 return rte_flow_error_set(error, EINVAL,
1366                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1367                                           "can't have 2 mark actions in same"
1368                                           " flow");
1369         if (attr->egress)
1370                 return rte_flow_error_set(error, ENOTSUP,
1371                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1372                                           "mark action not supported for "
1373                                           "egress");
1374         return 0;
1375 }
1376
1377 /*
1378  * Validate the drop action.
1379  *
1380  * @param[in] action_flags
1381  *   Bit-fields that holds the actions detected until now.
1382  * @param[in] attr
1383  *   Attributes of flow that includes this action.
1384  * @param[out] error
1385  *   Pointer to error structure.
1386  *
1387  * @return
1388  *   0 on success, a negative errno value otherwise and rte_errno is set.
1389  */
1390 int
1391 mlx5_flow_validate_action_drop(uint64_t action_flags __rte_unused,
1392                                const struct rte_flow_attr *attr,
1393                                struct rte_flow_error *error)
1394 {
1395         if (attr->egress)
1396                 return rte_flow_error_set(error, ENOTSUP,
1397                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1398                                           "drop action not supported for "
1399                                           "egress");
1400         return 0;
1401 }
1402
1403 /*
1404  * Validate the queue action.
1405  *
1406  * @param[in] action
1407  *   Pointer to the queue action.
1408  * @param[in] action_flags
1409  *   Bit-fields that holds the actions detected until now.
1410  * @param[in] dev
1411  *   Pointer to the Ethernet device structure.
1412  * @param[in] attr
1413  *   Attributes of flow that includes this action.
1414  * @param[out] error
1415  *   Pointer to error structure.
1416  *
1417  * @return
1418  *   0 on success, a negative errno value otherwise and rte_errno is set.
1419  */
1420 int
1421 mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
1422                                 uint64_t action_flags,
1423                                 struct rte_eth_dev *dev,
1424                                 const struct rte_flow_attr *attr,
1425                                 struct rte_flow_error *error)
1426 {
1427         struct mlx5_priv *priv = dev->data->dev_private;
1428         const struct rte_flow_action_queue *queue = action->conf;
1429
1430         if (action_flags & MLX5_FLOW_FATE_ACTIONS)
1431                 return rte_flow_error_set(error, EINVAL,
1432                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1433                                           "can't have 2 fate actions in"
1434                                           " same flow");
1435         if (!priv->rxqs_n)
1436                 return rte_flow_error_set(error, EINVAL,
1437                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1438                                           NULL, "No Rx queues configured");
1439         if (queue->index >= priv->rxqs_n)
1440                 return rte_flow_error_set(error, EINVAL,
1441                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1442                                           &queue->index,
1443                                           "queue index out of range");
1444         if (!(*priv->rxqs)[queue->index])
1445                 return rte_flow_error_set(error, EINVAL,
1446                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1447                                           &queue->index,
1448                                           "queue is not configured");
1449         if (attr->egress)
1450                 return rte_flow_error_set(error, ENOTSUP,
1451                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1452                                           "queue action not supported for "
1453                                           "egress");
1454         return 0;
1455 }
1456
1457 /*
1458  * Validate the rss action.
1459  *
1460  * @param[in] dev
1461  *   Pointer to the Ethernet device structure.
1462  * @param[in] action
1463  *   Pointer to the queue action.
1464  * @param[out] error
1465  *   Pointer to error structure.
1466  *
1467  * @return
1468  *   0 on success, a negative errno value otherwise and rte_errno is set.
1469  */
1470 int
1471 mlx5_validate_action_rss(struct rte_eth_dev *dev,
1472                          const struct rte_flow_action *action,
1473                          struct rte_flow_error *error)
1474 {
1475         struct mlx5_priv *priv = dev->data->dev_private;
1476         const struct rte_flow_action_rss *rss = action->conf;
1477         enum mlx5_rxq_type rxq_type = MLX5_RXQ_TYPE_UNDEFINED;
1478         unsigned int i;
1479
1480         if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT &&
1481             rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ)
1482                 return rte_flow_error_set(error, ENOTSUP,
1483                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1484                                           &rss->func,
1485                                           "RSS hash function not supported");
1486 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
1487         if (rss->level > 2)
1488 #else
1489         if (rss->level > 1)
1490 #endif
1491                 return rte_flow_error_set(error, ENOTSUP,
1492                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1493                                           &rss->level,
1494                                           "tunnel RSS is not supported");
1495         /* allow RSS key_len 0 in case of NULL (default) RSS key. */
1496         if (rss->key_len == 0 && rss->key != NULL)
1497                 return rte_flow_error_set(error, ENOTSUP,
1498                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1499                                           &rss->key_len,
1500                                           "RSS hash key length 0");
1501         if (rss->key_len > 0 && rss->key_len < MLX5_RSS_HASH_KEY_LEN)
1502                 return rte_flow_error_set(error, ENOTSUP,
1503                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1504                                           &rss->key_len,
1505                                           "RSS hash key too small");
1506         if (rss->key_len > MLX5_RSS_HASH_KEY_LEN)
1507                 return rte_flow_error_set(error, ENOTSUP,
1508                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1509                                           &rss->key_len,
1510                                           "RSS hash key too large");
1511         if (rss->queue_num > priv->config.ind_table_max_size)
1512                 return rte_flow_error_set(error, ENOTSUP,
1513                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1514                                           &rss->queue_num,
1515                                           "number of queues too large");
1516         if (rss->types & MLX5_RSS_HF_MASK)
1517                 return rte_flow_error_set(error, ENOTSUP,
1518                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1519                                           &rss->types,
1520                                           "some RSS protocols are not"
1521                                           " supported");
1522         if ((rss->types & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY)) &&
1523             !(rss->types & ETH_RSS_IP))
1524                 return rte_flow_error_set(error, EINVAL,
1525                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
1526                                           "L3 partial RSS requested but L3 RSS"
1527                                           " type not specified");
1528         if ((rss->types & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)) &&
1529             !(rss->types & (ETH_RSS_UDP | ETH_RSS_TCP)))
1530                 return rte_flow_error_set(error, EINVAL,
1531                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
1532                                           "L4 partial RSS requested but L4 RSS"
1533                                           " type not specified");
1534         if (!priv->rxqs_n)
1535                 return rte_flow_error_set(error, EINVAL,
1536                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1537                                           NULL, "No Rx queues configured");
1538         if (!rss->queue_num)
1539                 return rte_flow_error_set(error, EINVAL,
1540                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1541                                           NULL, "No queues configured");
1542         for (i = 0; i != rss->queue_num; ++i) {
1543                 struct mlx5_rxq_ctrl *rxq_ctrl;
1544
1545                 if (rss->queue[i] >= priv->rxqs_n)
1546                         return rte_flow_error_set
1547                                 (error, EINVAL,
1548                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1549                                  &rss->queue[i], "queue index out of range");
1550                 if (!(*priv->rxqs)[rss->queue[i]])
1551                         return rte_flow_error_set
1552                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1553                                  &rss->queue[i], "queue is not configured");
1554                 rxq_ctrl = container_of((*priv->rxqs)[rss->queue[i]],
1555                                         struct mlx5_rxq_ctrl, rxq);
1556                 if (i == 0)
1557                         rxq_type = rxq_ctrl->type;
1558                 if (rxq_type != rxq_ctrl->type)
1559                         return rte_flow_error_set
1560                                 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1561                                  &rss->queue[i],
1562                                  "combining hairpin and regular RSS queues is not supported");
1563         }
1564         return 0;
1565 }
1566
1567 /*
1568  * Validate the rss action.
1569  *
1570  * @param[in] action
1571  *   Pointer to the queue action.
1572  * @param[in] action_flags
1573  *   Bit-fields that holds the actions detected until now.
1574  * @param[in] dev
1575  *   Pointer to the Ethernet device structure.
1576  * @param[in] attr
1577  *   Attributes of flow that includes this action.
1578  * @param[in] item_flags
1579  *   Items that were detected.
1580  * @param[out] error
1581  *   Pointer to error structure.
1582  *
1583  * @return
1584  *   0 on success, a negative errno value otherwise and rte_errno is set.
1585  */
1586 int
1587 mlx5_flow_validate_action_rss(const struct rte_flow_action *action,
1588                               uint64_t action_flags,
1589                               struct rte_eth_dev *dev,
1590                               const struct rte_flow_attr *attr,
1591                               uint64_t item_flags,
1592                               struct rte_flow_error *error)
1593 {
1594         const struct rte_flow_action_rss *rss = action->conf;
1595         int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1596         int ret;
1597
1598         if (action_flags & MLX5_FLOW_FATE_ACTIONS)
1599                 return rte_flow_error_set(error, EINVAL,
1600                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1601                                           "can't have 2 fate actions"
1602                                           " in same flow");
1603         ret = mlx5_validate_action_rss(dev, action, error);
1604         if (ret)
1605                 return ret;
1606         if (attr->egress)
1607                 return rte_flow_error_set(error, ENOTSUP,
1608                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1609                                           "rss action not supported for "
1610                                           "egress");
1611         if (rss->level > 1 && !tunnel)
1612                 return rte_flow_error_set(error, EINVAL,
1613                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
1614                                           "inner RSS is not supported for "
1615                                           "non-tunnel flows");
1616         if ((item_flags & MLX5_FLOW_LAYER_ECPRI) &&
1617             !(item_flags & MLX5_FLOW_LAYER_INNER_L4_UDP)) {
1618                 return rte_flow_error_set(error, EINVAL,
1619                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
1620                                           "RSS on eCPRI is not supported now");
1621         }
1622         return 0;
1623 }
1624
1625 /*
1626  * Validate the default miss action.
1627  *
1628  * @param[in] action_flags
1629  *   Bit-fields that holds the actions detected until now.
1630  * @param[out] error
1631  *   Pointer to error structure.
1632  *
1633  * @return
1634  *   0 on success, a negative errno value otherwise and rte_errno is set.
1635  */
1636 int
1637 mlx5_flow_validate_action_default_miss(uint64_t action_flags,
1638                                 const struct rte_flow_attr *attr,
1639                                 struct rte_flow_error *error)
1640 {
1641         if (action_flags & MLX5_FLOW_FATE_ACTIONS)
1642                 return rte_flow_error_set(error, EINVAL,
1643                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1644                                           "can't have 2 fate actions in"
1645                                           " same flow");
1646         if (attr->egress)
1647                 return rte_flow_error_set(error, ENOTSUP,
1648                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1649                                           "default miss action not supported "
1650                                           "for egress");
1651         if (attr->group)
1652                 return rte_flow_error_set(error, ENOTSUP,
1653                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL,
1654                                           "only group 0 is supported");
1655         if (attr->transfer)
1656                 return rte_flow_error_set(error, ENOTSUP,
1657                                           RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1658                                           NULL, "transfer is not supported");
1659         return 0;
1660 }
1661
1662 /*
1663  * Validate the count action.
1664  *
1665  * @param[in] dev
1666  *   Pointer to the Ethernet device structure.
1667  * @param[in] attr
1668  *   Attributes of flow that includes this action.
1669  * @param[out] error
1670  *   Pointer to error structure.
1671  *
1672  * @return
1673  *   0 on success, a negative errno value otherwise and rte_errno is set.
1674  */
1675 int
1676 mlx5_flow_validate_action_count(struct rte_eth_dev *dev __rte_unused,
1677                                 const struct rte_flow_attr *attr,
1678                                 struct rte_flow_error *error)
1679 {
1680         if (attr->egress)
1681                 return rte_flow_error_set(error, ENOTSUP,
1682                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1683                                           "count action not supported for "
1684                                           "egress");
1685         return 0;
1686 }
1687
1688 /**
1689  * Verify the @p attributes will be correctly understood by the NIC and store
1690  * them in the @p flow if everything is correct.
1691  *
1692  * @param[in] dev
1693  *   Pointer to the Ethernet device structure.
1694  * @param[in] attributes
1695  *   Pointer to flow attributes
1696  * @param[out] error
1697  *   Pointer to error structure.
1698  *
1699  * @return
1700  *   0 on success, a negative errno value otherwise and rte_errno is set.
1701  */
1702 int
1703 mlx5_flow_validate_attributes(struct rte_eth_dev *dev,
1704                               const struct rte_flow_attr *attributes,
1705                               struct rte_flow_error *error)
1706 {
1707         struct mlx5_priv *priv = dev->data->dev_private;
1708         uint32_t priority_max = priv->config.flow_prio - 1;
1709
1710         if (attributes->group)
1711                 return rte_flow_error_set(error, ENOTSUP,
1712                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1713                                           NULL, "groups is not supported");
1714         if (attributes->priority != MLX5_FLOW_LOWEST_PRIO_INDICATOR &&
1715             attributes->priority >= priority_max)
1716                 return rte_flow_error_set(error, ENOTSUP,
1717                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1718                                           NULL, "priority out of range");
1719         if (attributes->egress)
1720                 return rte_flow_error_set(error, ENOTSUP,
1721                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1722                                           "egress is not supported");
1723         if (attributes->transfer && !priv->config.dv_esw_en)
1724                 return rte_flow_error_set(error, ENOTSUP,
1725                                           RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1726                                           NULL, "transfer is not supported");
1727         if (!attributes->ingress)
1728                 return rte_flow_error_set(error, EINVAL,
1729                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1730                                           NULL,
1731                                           "ingress attribute is mandatory");
1732         return 0;
1733 }
1734
1735 /**
1736  * Validate ICMP6 item.
1737  *
1738  * @param[in] item
1739  *   Item specification.
1740  * @param[in] item_flags
1741  *   Bit-fields that holds the items detected until now.
1742  * @param[in] ext_vlan_sup
1743  *   Whether extended VLAN features are supported or not.
1744  * @param[out] error
1745  *   Pointer to error structure.
1746  *
1747  * @return
1748  *   0 on success, a negative errno value otherwise and rte_errno is set.
1749  */
1750 int
1751 mlx5_flow_validate_item_icmp6(const struct rte_flow_item *item,
1752                                uint64_t item_flags,
1753                                uint8_t target_protocol,
1754                                struct rte_flow_error *error)
1755 {
1756         const struct rte_flow_item_icmp6 *mask = item->mask;
1757         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1758         const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1759                                       MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1760         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1761                                       MLX5_FLOW_LAYER_OUTER_L4;
1762         int ret;
1763
1764         if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMPV6)
1765                 return rte_flow_error_set(error, EINVAL,
1766                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1767                                           "protocol filtering not compatible"
1768                                           " with ICMP6 layer");
1769         if (!(item_flags & l3m))
1770                 return rte_flow_error_set(error, EINVAL,
1771                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1772                                           "IPv6 is mandatory to filter on"
1773                                           " ICMP6");
1774         if (item_flags & l4m)
1775                 return rte_flow_error_set(error, EINVAL,
1776                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1777                                           "multiple L4 layers not supported");
1778         if (!mask)
1779                 mask = &rte_flow_item_icmp6_mask;
1780         ret = mlx5_flow_item_acceptable
1781                 (item, (const uint8_t *)mask,
1782                  (const uint8_t *)&rte_flow_item_icmp6_mask,
1783                  sizeof(struct rte_flow_item_icmp6),
1784                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1785         if (ret < 0)
1786                 return ret;
1787         return 0;
1788 }
1789
1790 /**
1791  * Validate ICMP item.
1792  *
1793  * @param[in] item
1794  *   Item specification.
1795  * @param[in] item_flags
1796  *   Bit-fields that holds the items detected until now.
1797  * @param[out] error
1798  *   Pointer to error structure.
1799  *
1800  * @return
1801  *   0 on success, a negative errno value otherwise and rte_errno is set.
1802  */
1803 int
1804 mlx5_flow_validate_item_icmp(const struct rte_flow_item *item,
1805                              uint64_t item_flags,
1806                              uint8_t target_protocol,
1807                              struct rte_flow_error *error)
1808 {
1809         const struct rte_flow_item_icmp *mask = item->mask;
1810         const struct rte_flow_item_icmp nic_mask = {
1811                 .hdr.icmp_type = 0xff,
1812                 .hdr.icmp_code = 0xff,
1813                 .hdr.icmp_ident = RTE_BE16(0xffff),
1814                 .hdr.icmp_seq_nb = RTE_BE16(0xffff),
1815         };
1816         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1817         const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1818                                       MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1819         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1820                                       MLX5_FLOW_LAYER_OUTER_L4;
1821         int ret;
1822
1823         if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMP)
1824                 return rte_flow_error_set(error, EINVAL,
1825                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1826                                           "protocol filtering not compatible"
1827                                           " with ICMP layer");
1828         if (!(item_flags & l3m))
1829                 return rte_flow_error_set(error, EINVAL,
1830                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1831                                           "IPv4 is mandatory to filter"
1832                                           " on ICMP");
1833         if (item_flags & l4m)
1834                 return rte_flow_error_set(error, EINVAL,
1835                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1836                                           "multiple L4 layers not supported");
1837         if (!mask)
1838                 mask = &nic_mask;
1839         ret = mlx5_flow_item_acceptable
1840                 (item, (const uint8_t *)mask,
1841                  (const uint8_t *)&nic_mask,
1842                  sizeof(struct rte_flow_item_icmp),
1843                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1844         if (ret < 0)
1845                 return ret;
1846         return 0;
1847 }
1848
1849 /**
1850  * Validate Ethernet item.
1851  *
1852  * @param[in] item
1853  *   Item specification.
1854  * @param[in] item_flags
1855  *   Bit-fields that holds the items detected until now.
1856  * @param[out] error
1857  *   Pointer to error structure.
1858  *
1859  * @return
1860  *   0 on success, a negative errno value otherwise and rte_errno is set.
1861  */
1862 int
1863 mlx5_flow_validate_item_eth(const struct rte_flow_item *item,
1864                             uint64_t item_flags, bool ext_vlan_sup,
1865                             struct rte_flow_error *error)
1866 {
1867         const struct rte_flow_item_eth *mask = item->mask;
1868         const struct rte_flow_item_eth nic_mask = {
1869                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
1870                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
1871                 .type = RTE_BE16(0xffff),
1872                 .has_vlan = ext_vlan_sup ? 1 : 0,
1873         };
1874         int ret;
1875         int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1876         const uint64_t ethm = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1877                                        MLX5_FLOW_LAYER_OUTER_L2;
1878
1879         if (item_flags & ethm)
1880                 return rte_flow_error_set(error, ENOTSUP,
1881                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1882                                           "multiple L2 layers not supported");
1883         if ((!tunnel && (item_flags & MLX5_FLOW_LAYER_OUTER_L3)) ||
1884             (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_L3)))
1885                 return rte_flow_error_set(error, EINVAL,
1886                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1887                                           "L2 layer should not follow "
1888                                           "L3 layers");
1889         if ((!tunnel && (item_flags & MLX5_FLOW_LAYER_OUTER_VLAN)) ||
1890             (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_VLAN)))
1891                 return rte_flow_error_set(error, EINVAL,
1892                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1893                                           "L2 layer should not follow VLAN");
1894         if (!mask)
1895                 mask = &rte_flow_item_eth_mask;
1896         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1897                                         (const uint8_t *)&nic_mask,
1898                                         sizeof(struct rte_flow_item_eth),
1899                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1900         return ret;
1901 }
1902
1903 /**
1904  * Validate VLAN item.
1905  *
1906  * @param[in] item
1907  *   Item specification.
1908  * @param[in] item_flags
1909  *   Bit-fields that holds the items detected until now.
1910  * @param[in] dev
1911  *   Ethernet device flow is being created on.
1912  * @param[out] error
1913  *   Pointer to error structure.
1914  *
1915  * @return
1916  *   0 on success, a negative errno value otherwise and rte_errno is set.
1917  */
1918 int
1919 mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
1920                              uint64_t item_flags,
1921                              struct rte_eth_dev *dev,
1922                              struct rte_flow_error *error)
1923 {
1924         const struct rte_flow_item_vlan *spec = item->spec;
1925         const struct rte_flow_item_vlan *mask = item->mask;
1926         const struct rte_flow_item_vlan nic_mask = {
1927                 .tci = RTE_BE16(UINT16_MAX),
1928                 .inner_type = RTE_BE16(UINT16_MAX),
1929         };
1930         uint16_t vlan_tag = 0;
1931         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1932         int ret;
1933         const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
1934                                         MLX5_FLOW_LAYER_INNER_L4) :
1935                                        (MLX5_FLOW_LAYER_OUTER_L3 |
1936                                         MLX5_FLOW_LAYER_OUTER_L4);
1937         const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
1938                                         MLX5_FLOW_LAYER_OUTER_VLAN;
1939
1940         if (item_flags & vlanm)
1941                 return rte_flow_error_set(error, EINVAL,
1942                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1943                                           "multiple VLAN layers not supported");
1944         else if ((item_flags & l34m) != 0)
1945                 return rte_flow_error_set(error, EINVAL,
1946                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1947                                           "VLAN cannot follow L3/L4 layer");
1948         if (!mask)
1949                 mask = &rte_flow_item_vlan_mask;
1950         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1951                                         (const uint8_t *)&nic_mask,
1952                                         sizeof(struct rte_flow_item_vlan),
1953                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1954         if (ret)
1955                 return ret;
1956         if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
1957                 struct mlx5_priv *priv = dev->data->dev_private;
1958
1959                 if (priv->vmwa_context) {
1960                         /*
1961                          * Non-NULL context means we have a virtual machine
1962                          * and SR-IOV enabled, we have to create VLAN interface
1963                          * to make hypervisor to setup E-Switch vport
1964                          * context correctly. We avoid creating the multiple
1965                          * VLAN interfaces, so we cannot support VLAN tag mask.
1966                          */
1967                         return rte_flow_error_set(error, EINVAL,
1968                                                   RTE_FLOW_ERROR_TYPE_ITEM,
1969                                                   item,
1970                                                   "VLAN tag mask is not"
1971                                                   " supported in virtual"
1972                                                   " environment");
1973                 }
1974         }
1975         if (spec) {
1976                 vlan_tag = spec->tci;
1977                 vlan_tag &= mask->tci;
1978         }
1979         /*
1980          * From verbs perspective an empty VLAN is equivalent
1981          * to a packet without VLAN layer.
1982          */
1983         if (!vlan_tag)
1984                 return rte_flow_error_set(error, EINVAL,
1985                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1986                                           item->spec,
1987                                           "VLAN cannot be empty");
1988         return 0;
1989 }
1990
1991 /**
1992  * Validate IPV4 item.
1993  *
1994  * @param[in] item
1995  *   Item specification.
1996  * @param[in] item_flags
1997  *   Bit-fields that holds the items detected until now.
1998  * @param[in] last_item
1999  *   Previous validated item in the pattern items.
2000  * @param[in] ether_type
2001  *   Type in the ethernet layer header (including dot1q).
2002  * @param[in] acc_mask
2003  *   Acceptable mask, if NULL default internal default mask
2004  *   will be used to check whether item fields are supported.
2005  * @param[in] range_accepted
2006  *   True if range of values is accepted for specific fields, false otherwise.
2007  * @param[out] error
2008  *   Pointer to error structure.
2009  *
2010  * @return
2011  *   0 on success, a negative errno value otherwise and rte_errno is set.
2012  */
2013 int
2014 mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
2015                              uint64_t item_flags,
2016                              uint64_t last_item,
2017                              uint16_t ether_type,
2018                              const struct rte_flow_item_ipv4 *acc_mask,
2019                              bool range_accepted,
2020                              struct rte_flow_error *error)
2021 {
2022         const struct rte_flow_item_ipv4 *mask = item->mask;
2023         const struct rte_flow_item_ipv4 *spec = item->spec;
2024         const struct rte_flow_item_ipv4 nic_mask = {
2025                 .hdr = {
2026                         .src_addr = RTE_BE32(0xffffffff),
2027                         .dst_addr = RTE_BE32(0xffffffff),
2028                         .type_of_service = 0xff,
2029                         .next_proto_id = 0xff,
2030                 },
2031         };
2032         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2033         const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
2034                                       MLX5_FLOW_LAYER_OUTER_L3;
2035         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2036                                       MLX5_FLOW_LAYER_OUTER_L4;
2037         int ret;
2038         uint8_t next_proto = 0xFF;
2039         const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 |
2040                                   MLX5_FLOW_LAYER_OUTER_VLAN |
2041                                   MLX5_FLOW_LAYER_INNER_VLAN);
2042
2043         if ((last_item & l2_vlan) && ether_type &&
2044             ether_type != RTE_ETHER_TYPE_IPV4)
2045                 return rte_flow_error_set(error, EINVAL,
2046                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2047                                           "IPv4 cannot follow L2/VLAN layer "
2048                                           "which ether type is not IPv4");
2049         if (item_flags & MLX5_FLOW_LAYER_IPIP) {
2050                 if (mask && spec)
2051                         next_proto = mask->hdr.next_proto_id &
2052                                      spec->hdr.next_proto_id;
2053                 if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6)
2054                         return rte_flow_error_set(error, EINVAL,
2055                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2056                                                   item,
2057                                                   "multiple tunnel "
2058                                                   "not supported");
2059         }
2060         if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP)
2061                 return rte_flow_error_set(error, EINVAL,
2062                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2063                                           "wrong tunnel type - IPv6 specified "
2064                                           "but IPv4 item provided");
2065         if (item_flags & l3m)
2066                 return rte_flow_error_set(error, ENOTSUP,
2067                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2068                                           "multiple L3 layers not supported");
2069         else if (item_flags & l4m)
2070                 return rte_flow_error_set(error, EINVAL,
2071                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2072                                           "L3 cannot follow an L4 layer.");
2073         else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) &&
2074                   !(item_flags & MLX5_FLOW_LAYER_INNER_L2))
2075                 return rte_flow_error_set(error, EINVAL,
2076                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2077                                           "L3 cannot follow an NVGRE layer.");
2078         if (!mask)
2079                 mask = &rte_flow_item_ipv4_mask;
2080         else if (mask->hdr.next_proto_id != 0 &&
2081                  mask->hdr.next_proto_id != 0xff)
2082                 return rte_flow_error_set(error, EINVAL,
2083                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
2084                                           "partial mask is not supported"
2085                                           " for protocol");
2086         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2087                                         acc_mask ? (const uint8_t *)acc_mask
2088                                                  : (const uint8_t *)&nic_mask,
2089                                         sizeof(struct rte_flow_item_ipv4),
2090                                         range_accepted, error);
2091         if (ret < 0)
2092                 return ret;
2093         return 0;
2094 }
2095
2096 /**
2097  * Validate IPV6 item.
2098  *
2099  * @param[in] item
2100  *   Item specification.
2101  * @param[in] item_flags
2102  *   Bit-fields that holds the items detected until now.
2103  * @param[in] last_item
2104  *   Previous validated item in the pattern items.
2105  * @param[in] ether_type
2106  *   Type in the ethernet layer header (including dot1q).
2107  * @param[in] acc_mask
2108  *   Acceptable mask, if NULL default internal default mask
2109  *   will be used to check whether item fields are supported.
2110  * @param[out] error
2111  *   Pointer to error structure.
2112  *
2113  * @return
2114  *   0 on success, a negative errno value otherwise and rte_errno is set.
2115  */
2116 int
2117 mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
2118                              uint64_t item_flags,
2119                              uint64_t last_item,
2120                              uint16_t ether_type,
2121                              const struct rte_flow_item_ipv6 *acc_mask,
2122                              struct rte_flow_error *error)
2123 {
2124         const struct rte_flow_item_ipv6 *mask = item->mask;
2125         const struct rte_flow_item_ipv6 *spec = item->spec;
2126         const struct rte_flow_item_ipv6 nic_mask = {
2127                 .hdr = {
2128                         .src_addr =
2129                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
2130                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
2131                         .dst_addr =
2132                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
2133                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
2134                         .vtc_flow = RTE_BE32(0xffffffff),
2135                         .proto = 0xff,
2136                 },
2137         };
2138         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2139         const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
2140                                       MLX5_FLOW_LAYER_OUTER_L3;
2141         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2142                                       MLX5_FLOW_LAYER_OUTER_L4;
2143         int ret;
2144         uint8_t next_proto = 0xFF;
2145         const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 |
2146                                   MLX5_FLOW_LAYER_OUTER_VLAN |
2147                                   MLX5_FLOW_LAYER_INNER_VLAN);
2148
2149         if ((last_item & l2_vlan) && ether_type &&
2150             ether_type != RTE_ETHER_TYPE_IPV6)
2151                 return rte_flow_error_set(error, EINVAL,
2152                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2153                                           "IPv6 cannot follow L2/VLAN layer "
2154                                           "which ether type is not IPv6");
2155         if (mask && mask->hdr.proto == UINT8_MAX && spec)
2156                 next_proto = spec->hdr.proto;
2157         if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) {
2158                 if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6)
2159                         return rte_flow_error_set(error, EINVAL,
2160                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2161                                                   item,
2162                                                   "multiple tunnel "
2163                                                   "not supported");
2164         }
2165         if (next_proto == IPPROTO_HOPOPTS  ||
2166             next_proto == IPPROTO_ROUTING  ||
2167             next_proto == IPPROTO_FRAGMENT ||
2168             next_proto == IPPROTO_ESP      ||
2169             next_proto == IPPROTO_AH       ||
2170             next_proto == IPPROTO_DSTOPTS)
2171                 return rte_flow_error_set(error, EINVAL,
2172                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2173                                           "IPv6 proto (next header) should "
2174                                           "not be set as extension header");
2175         if (item_flags & MLX5_FLOW_LAYER_IPIP)
2176                 return rte_flow_error_set(error, EINVAL,
2177                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2178                                           "wrong tunnel type - IPv4 specified "
2179                                           "but IPv6 item provided");
2180         if (item_flags & l3m)
2181                 return rte_flow_error_set(error, ENOTSUP,
2182                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2183                                           "multiple L3 layers not supported");
2184         else if (item_flags & l4m)
2185                 return rte_flow_error_set(error, EINVAL,
2186                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2187                                           "L3 cannot follow an L4 layer.");
2188         else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) &&
2189                   !(item_flags & MLX5_FLOW_LAYER_INNER_L2))
2190                 return rte_flow_error_set(error, EINVAL,
2191                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2192                                           "L3 cannot follow an NVGRE layer.");
2193         if (!mask)
2194                 mask = &rte_flow_item_ipv6_mask;
2195         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2196                                         acc_mask ? (const uint8_t *)acc_mask
2197                                                  : (const uint8_t *)&nic_mask,
2198                                         sizeof(struct rte_flow_item_ipv6),
2199                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2200         if (ret < 0)
2201                 return ret;
2202         return 0;
2203 }
2204
2205 /**
2206  * Validate UDP item.
2207  *
2208  * @param[in] item
2209  *   Item specification.
2210  * @param[in] item_flags
2211  *   Bit-fields that holds the items detected until now.
2212  * @param[in] target_protocol
2213  *   The next protocol in the previous item.
2214  * @param[in] flow_mask
2215  *   mlx5 flow-specific (DV, verbs, etc.) supported header fields mask.
2216  * @param[out] error
2217  *   Pointer to error structure.
2218  *
2219  * @return
2220  *   0 on success, a negative errno value otherwise and rte_errno is set.
2221  */
2222 int
2223 mlx5_flow_validate_item_udp(const struct rte_flow_item *item,
2224                             uint64_t item_flags,
2225                             uint8_t target_protocol,
2226                             struct rte_flow_error *error)
2227 {
2228         const struct rte_flow_item_udp *mask = item->mask;
2229         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2230         const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
2231                                       MLX5_FLOW_LAYER_OUTER_L3;
2232         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2233                                       MLX5_FLOW_LAYER_OUTER_L4;
2234         int ret;
2235
2236         if (target_protocol != 0xff && target_protocol != IPPROTO_UDP)
2237                 return rte_flow_error_set(error, EINVAL,
2238                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2239                                           "protocol filtering not compatible"
2240                                           " with UDP layer");
2241         if (!(item_flags & l3m))
2242                 return rte_flow_error_set(error, EINVAL,
2243                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2244                                           "L3 is mandatory to filter on L4");
2245         if (item_flags & l4m)
2246                 return rte_flow_error_set(error, EINVAL,
2247                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2248                                           "multiple L4 layers not supported");
2249         if (!mask)
2250                 mask = &rte_flow_item_udp_mask;
2251         ret = mlx5_flow_item_acceptable
2252                 (item, (const uint8_t *)mask,
2253                  (const uint8_t *)&rte_flow_item_udp_mask,
2254                  sizeof(struct rte_flow_item_udp), MLX5_ITEM_RANGE_NOT_ACCEPTED,
2255                  error);
2256         if (ret < 0)
2257                 return ret;
2258         return 0;
2259 }
2260
2261 /**
2262  * Validate TCP item.
2263  *
2264  * @param[in] item
2265  *   Item specification.
2266  * @param[in] item_flags
2267  *   Bit-fields that holds the items detected until now.
2268  * @param[in] target_protocol
2269  *   The next protocol in the previous item.
2270  * @param[out] error
2271  *   Pointer to error structure.
2272  *
2273  * @return
2274  *   0 on success, a negative errno value otherwise and rte_errno is set.
2275  */
2276 int
2277 mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,
2278                             uint64_t item_flags,
2279                             uint8_t target_protocol,
2280                             const struct rte_flow_item_tcp *flow_mask,
2281                             struct rte_flow_error *error)
2282 {
2283         const struct rte_flow_item_tcp *mask = item->mask;
2284         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2285         const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
2286                                       MLX5_FLOW_LAYER_OUTER_L3;
2287         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2288                                       MLX5_FLOW_LAYER_OUTER_L4;
2289         int ret;
2290
2291         MLX5_ASSERT(flow_mask);
2292         if (target_protocol != 0xff && target_protocol != IPPROTO_TCP)
2293                 return rte_flow_error_set(error, EINVAL,
2294                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2295                                           "protocol filtering not compatible"
2296                                           " with TCP layer");
2297         if (!(item_flags & l3m))
2298                 return rte_flow_error_set(error, EINVAL,
2299                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2300                                           "L3 is mandatory to filter on L4");
2301         if (item_flags & l4m)
2302                 return rte_flow_error_set(error, EINVAL,
2303                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2304                                           "multiple L4 layers not supported");
2305         if (!mask)
2306                 mask = &rte_flow_item_tcp_mask;
2307         ret = mlx5_flow_item_acceptable
2308                 (item, (const uint8_t *)mask,
2309                  (const uint8_t *)flow_mask,
2310                  sizeof(struct rte_flow_item_tcp), MLX5_ITEM_RANGE_NOT_ACCEPTED,
2311                  error);
2312         if (ret < 0)
2313                 return ret;
2314         return 0;
2315 }
2316
2317 /**
2318  * Validate VXLAN item.
2319  *
2320  * @param[in] item
2321  *   Item specification.
2322  * @param[in] item_flags
2323  *   Bit-fields that holds the items detected until now.
2324  * @param[in] target_protocol
2325  *   The next protocol in the previous item.
2326  * @param[out] error
2327  *   Pointer to error structure.
2328  *
2329  * @return
2330  *   0 on success, a negative errno value otherwise and rte_errno is set.
2331  */
2332 int
2333 mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item,
2334                               uint64_t item_flags,
2335                               struct rte_flow_error *error)
2336 {
2337         const struct rte_flow_item_vxlan *spec = item->spec;
2338         const struct rte_flow_item_vxlan *mask = item->mask;
2339         int ret;
2340         union vni {
2341                 uint32_t vlan_id;
2342                 uint8_t vni[4];
2343         } id = { .vlan_id = 0, };
2344
2345
2346         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2347                 return rte_flow_error_set(error, ENOTSUP,
2348                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2349                                           "multiple tunnel layers not"
2350                                           " supported");
2351         /*
2352          * Verify only UDPv4 is present as defined in
2353          * https://tools.ietf.org/html/rfc7348
2354          */
2355         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2356                 return rte_flow_error_set(error, EINVAL,
2357                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2358                                           "no outer UDP layer found");
2359         if (!mask)
2360                 mask = &rte_flow_item_vxlan_mask;
2361         ret = mlx5_flow_item_acceptable
2362                 (item, (const uint8_t *)mask,
2363                  (const uint8_t *)&rte_flow_item_vxlan_mask,
2364                  sizeof(struct rte_flow_item_vxlan),
2365                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2366         if (ret < 0)
2367                 return ret;
2368         if (spec) {
2369                 memcpy(&id.vni[1], spec->vni, 3);
2370                 memcpy(&id.vni[1], mask->vni, 3);
2371         }
2372         if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
2373                 return rte_flow_error_set(error, ENOTSUP,
2374                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2375                                           "VXLAN tunnel must be fully defined");
2376         return 0;
2377 }
2378
2379 /**
2380  * Validate VXLAN_GPE item.
2381  *
2382  * @param[in] item
2383  *   Item specification.
2384  * @param[in] item_flags
2385  *   Bit-fields that holds the items detected until now.
2386  * @param[in] priv
2387  *   Pointer to the private data structure.
2388  * @param[in] target_protocol
2389  *   The next protocol in the previous item.
2390  * @param[out] error
2391  *   Pointer to error structure.
2392  *
2393  * @return
2394  *   0 on success, a negative errno value otherwise and rte_errno is set.
2395  */
2396 int
2397 mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,
2398                                   uint64_t item_flags,
2399                                   struct rte_eth_dev *dev,
2400                                   struct rte_flow_error *error)
2401 {
2402         struct mlx5_priv *priv = dev->data->dev_private;
2403         const struct rte_flow_item_vxlan_gpe *spec = item->spec;
2404         const struct rte_flow_item_vxlan_gpe *mask = item->mask;
2405         int ret;
2406         union vni {
2407                 uint32_t vlan_id;
2408                 uint8_t vni[4];
2409         } id = { .vlan_id = 0, };
2410
2411         if (!priv->config.l3_vxlan_en)
2412                 return rte_flow_error_set(error, ENOTSUP,
2413                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2414                                           "L3 VXLAN is not enabled by device"
2415                                           " parameter and/or not configured in"
2416                                           " firmware");
2417         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2418                 return rte_flow_error_set(error, ENOTSUP,
2419                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2420                                           "multiple tunnel layers not"
2421                                           " supported");
2422         /*
2423          * Verify only UDPv4 is present as defined in
2424          * https://tools.ietf.org/html/rfc7348
2425          */
2426         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2427                 return rte_flow_error_set(error, EINVAL,
2428                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2429                                           "no outer UDP layer found");
2430         if (!mask)
2431                 mask = &rte_flow_item_vxlan_gpe_mask;
2432         ret = mlx5_flow_item_acceptable
2433                 (item, (const uint8_t *)mask,
2434                  (const uint8_t *)&rte_flow_item_vxlan_gpe_mask,
2435                  sizeof(struct rte_flow_item_vxlan_gpe),
2436                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2437         if (ret < 0)
2438                 return ret;
2439         if (spec) {
2440                 if (spec->protocol)
2441                         return rte_flow_error_set(error, ENOTSUP,
2442                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2443                                                   item,
2444                                                   "VxLAN-GPE protocol"
2445                                                   " not supported");
2446                 memcpy(&id.vni[1], spec->vni, 3);
2447                 memcpy(&id.vni[1], mask->vni, 3);
2448         }
2449         if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
2450                 return rte_flow_error_set(error, ENOTSUP,
2451                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2452                                           "VXLAN-GPE tunnel must be fully"
2453                                           " defined");
2454         return 0;
2455 }
2456 /**
2457  * Validate GRE Key item.
2458  *
2459  * @param[in] item
2460  *   Item specification.
2461  * @param[in] item_flags
2462  *   Bit flags to mark detected items.
2463  * @param[in] gre_item
2464  *   Pointer to gre_item
2465  * @param[out] error
2466  *   Pointer to error structure.
2467  *
2468  * @return
2469  *   0 on success, a negative errno value otherwise and rte_errno is set.
2470  */
2471 int
2472 mlx5_flow_validate_item_gre_key(const struct rte_flow_item *item,
2473                                 uint64_t item_flags,
2474                                 const struct rte_flow_item *gre_item,
2475                                 struct rte_flow_error *error)
2476 {
2477         const rte_be32_t *mask = item->mask;
2478         int ret = 0;
2479         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
2480         const struct rte_flow_item_gre *gre_spec;
2481         const struct rte_flow_item_gre *gre_mask;
2482
2483         if (item_flags & MLX5_FLOW_LAYER_GRE_KEY)
2484                 return rte_flow_error_set(error, ENOTSUP,
2485                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2486                                           "Multiple GRE key not support");
2487         if (!(item_flags & MLX5_FLOW_LAYER_GRE))
2488                 return rte_flow_error_set(error, ENOTSUP,
2489                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2490                                           "No preceding GRE header");
2491         if (item_flags & MLX5_FLOW_LAYER_INNER)
2492                 return rte_flow_error_set(error, ENOTSUP,
2493                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2494                                           "GRE key following a wrong item");
2495         gre_mask = gre_item->mask;
2496         if (!gre_mask)
2497                 gre_mask = &rte_flow_item_gre_mask;
2498         gre_spec = gre_item->spec;
2499         if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x2000)) &&
2500                          !(gre_spec->c_rsvd0_ver & RTE_BE16(0x2000)))
2501                 return rte_flow_error_set(error, EINVAL,
2502                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2503                                           "Key bit must be on");
2504
2505         if (!mask)
2506                 mask = &gre_key_default_mask;
2507         ret = mlx5_flow_item_acceptable
2508                 (item, (const uint8_t *)mask,
2509                  (const uint8_t *)&gre_key_default_mask,
2510                  sizeof(rte_be32_t), MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2511         return ret;
2512 }
2513
2514 /**
2515  * Validate GRE item.
2516  *
2517  * @param[in] item
2518  *   Item specification.
2519  * @param[in] item_flags
2520  *   Bit flags to mark detected items.
2521  * @param[in] target_protocol
2522  *   The next protocol in the previous item.
2523  * @param[out] error
2524  *   Pointer to error structure.
2525  *
2526  * @return
2527  *   0 on success, a negative errno value otherwise and rte_errno is set.
2528  */
2529 int
2530 mlx5_flow_validate_item_gre(const struct rte_flow_item *item,
2531                             uint64_t item_flags,
2532                             uint8_t target_protocol,
2533                             struct rte_flow_error *error)
2534 {
2535         const struct rte_flow_item_gre *spec __rte_unused = item->spec;
2536         const struct rte_flow_item_gre *mask = item->mask;
2537         int ret;
2538         const struct rte_flow_item_gre nic_mask = {
2539                 .c_rsvd0_ver = RTE_BE16(0xB000),
2540                 .protocol = RTE_BE16(UINT16_MAX),
2541         };
2542
2543         if (target_protocol != 0xff && target_protocol != IPPROTO_GRE)
2544                 return rte_flow_error_set(error, EINVAL,
2545                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2546                                           "protocol filtering not compatible"
2547                                           " with this GRE layer");
2548         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2549                 return rte_flow_error_set(error, ENOTSUP,
2550                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2551                                           "multiple tunnel layers not"
2552                                           " supported");
2553         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3))
2554                 return rte_flow_error_set(error, ENOTSUP,
2555                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2556                                           "L3 Layer is missing");
2557         if (!mask)
2558                 mask = &rte_flow_item_gre_mask;
2559         ret = mlx5_flow_item_acceptable
2560                 (item, (const uint8_t *)mask,
2561                  (const uint8_t *)&nic_mask,
2562                  sizeof(struct rte_flow_item_gre), MLX5_ITEM_RANGE_NOT_ACCEPTED,
2563                  error);
2564         if (ret < 0)
2565                 return ret;
2566 #ifndef HAVE_MLX5DV_DR
2567 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
2568         if (spec && (spec->protocol & mask->protocol))
2569                 return rte_flow_error_set(error, ENOTSUP,
2570                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2571                                           "without MPLS support the"
2572                                           " specification cannot be used for"
2573                                           " filtering");
2574 #endif
2575 #endif
2576         return 0;
2577 }
2578
2579 /**
2580  * Validate Geneve item.
2581  *
2582  * @param[in] item
2583  *   Item specification.
2584  * @param[in] itemFlags
2585  *   Bit-fields that holds the items detected until now.
2586  * @param[in] enPriv
2587  *   Pointer to the private data structure.
2588  * @param[out] error
2589  *   Pointer to error structure.
2590  *
2591  * @return
2592  *   0 on success, a negative errno value otherwise and rte_errno is set.
2593  */
2594
2595 int
2596 mlx5_flow_validate_item_geneve(const struct rte_flow_item *item,
2597                                uint64_t item_flags,
2598                                struct rte_eth_dev *dev,
2599                                struct rte_flow_error *error)
2600 {
2601         struct mlx5_priv *priv = dev->data->dev_private;
2602         const struct rte_flow_item_geneve *spec = item->spec;
2603         const struct rte_flow_item_geneve *mask = item->mask;
2604         int ret;
2605         uint16_t gbhdr;
2606         uint8_t opt_len = priv->config.hca_attr.geneve_max_opt_len ?
2607                           MLX5_GENEVE_OPT_LEN_1 : MLX5_GENEVE_OPT_LEN_0;
2608         const struct rte_flow_item_geneve nic_mask = {
2609                 .ver_opt_len_o_c_rsvd0 = RTE_BE16(0x3f80),
2610                 .vni = "\xff\xff\xff",
2611                 .protocol = RTE_BE16(UINT16_MAX),
2612         };
2613
2614         if (!priv->config.hca_attr.tunnel_stateless_geneve_rx)
2615                 return rte_flow_error_set(error, ENOTSUP,
2616                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2617                                           "L3 Geneve is not enabled by device"
2618                                           " parameter and/or not configured in"
2619                                           " firmware");
2620         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2621                 return rte_flow_error_set(error, ENOTSUP,
2622                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2623                                           "multiple tunnel layers not"
2624                                           " supported");
2625         /*
2626          * Verify only UDPv4 is present as defined in
2627          * https://tools.ietf.org/html/rfc7348
2628          */
2629         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2630                 return rte_flow_error_set(error, EINVAL,
2631                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2632                                           "no outer UDP layer found");
2633         if (!mask)
2634                 mask = &rte_flow_item_geneve_mask;
2635         ret = mlx5_flow_item_acceptable
2636                                   (item, (const uint8_t *)mask,
2637                                    (const uint8_t *)&nic_mask,
2638                                    sizeof(struct rte_flow_item_geneve),
2639                                    MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2640         if (ret)
2641                 return ret;
2642         if (spec) {
2643                 gbhdr = rte_be_to_cpu_16(spec->ver_opt_len_o_c_rsvd0);
2644                 if (MLX5_GENEVE_VER_VAL(gbhdr) ||
2645                      MLX5_GENEVE_CRITO_VAL(gbhdr) ||
2646                      MLX5_GENEVE_RSVD_VAL(gbhdr) || spec->rsvd1)
2647                         return rte_flow_error_set(error, ENOTSUP,
2648                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2649                                                   item,
2650                                                   "Geneve protocol unsupported"
2651                                                   " fields are being used");
2652                 if (MLX5_GENEVE_OPTLEN_VAL(gbhdr) > opt_len)
2653                         return rte_flow_error_set
2654                                         (error, ENOTSUP,
2655                                          RTE_FLOW_ERROR_TYPE_ITEM,
2656                                          item,
2657                                          "Unsupported Geneve options length");
2658         }
2659         if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
2660                 return rte_flow_error_set
2661                                     (error, ENOTSUP,
2662                                      RTE_FLOW_ERROR_TYPE_ITEM, item,
2663                                      "Geneve tunnel must be fully defined");
2664         return 0;
2665 }
2666
2667 /**
2668  * Validate Geneve TLV option item.
2669  *
2670  * @param[in] item
2671  *   Item specification.
2672  * @param[in] last_item
2673  *   Previous validated item in the pattern items.
2674  * @param[in] geneve_item
2675  *   Previous GENEVE item specification.
2676  * @param[in] dev
2677  *   Pointer to the rte_eth_dev structure.
2678  * @param[out] error
2679  *   Pointer to error structure.
2680  *
2681  * @return
2682  *   0 on success, a negative errno value otherwise and rte_errno is set.
2683  */
2684 int
2685 mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item,
2686                                    uint64_t last_item,
2687                                    const struct rte_flow_item *geneve_item,
2688                                    struct rte_eth_dev *dev,
2689                                    struct rte_flow_error *error)
2690 {
2691         struct mlx5_priv *priv = dev->data->dev_private;
2692         struct mlx5_dev_ctx_shared *sh = priv->sh;
2693         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource;
2694         struct mlx5_hca_attr *hca_attr = &priv->config.hca_attr;
2695         uint8_t data_max_supported =
2696                         hca_attr->max_geneve_tlv_option_data_len * 4;
2697         struct mlx5_dev_config *config = &priv->config;
2698         const struct rte_flow_item_geneve *geneve_spec;
2699         const struct rte_flow_item_geneve *geneve_mask;
2700         const struct rte_flow_item_geneve_opt *spec = item->spec;
2701         const struct rte_flow_item_geneve_opt *mask = item->mask;
2702         unsigned int i;
2703         unsigned int data_len;
2704         uint8_t tlv_option_len;
2705         uint16_t optlen_m, optlen_v;
2706         const struct rte_flow_item_geneve_opt full_mask = {
2707                 .option_class = RTE_BE16(0xffff),
2708                 .option_type = 0xff,
2709                 .option_len = 0x1f,
2710         };
2711
2712         if (!mask)
2713                 mask = &rte_flow_item_geneve_opt_mask;
2714         if (!spec)
2715                 return rte_flow_error_set
2716                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2717                         "Geneve TLV opt class/type/length must be specified");
2718         if ((uint32_t)spec->option_len > MLX5_GENEVE_OPTLEN_MASK)
2719                 return rte_flow_error_set
2720                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2721                         "Geneve TLV opt length exceeeds the limit (31)");
2722         /* Check if class type and length masks are full. */
2723         if (full_mask.option_class != mask->option_class ||
2724             full_mask.option_type != mask->option_type ||
2725             full_mask.option_len != (mask->option_len & full_mask.option_len))
2726                 return rte_flow_error_set
2727                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2728                         "Geneve TLV opt class/type/length masks must be full");
2729         /* Check if length is supported */
2730         if ((uint32_t)spec->option_len >
2731                         config->hca_attr.max_geneve_tlv_option_data_len)
2732                 return rte_flow_error_set
2733                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2734                         "Geneve TLV opt length not supported");
2735         if (config->hca_attr.max_geneve_tlv_options > 1)
2736                 DRV_LOG(DEBUG,
2737                         "max_geneve_tlv_options supports more than 1 option");
2738         /* Check GENEVE item preceding. */
2739         if (!geneve_item || !(last_item & MLX5_FLOW_LAYER_GENEVE))
2740                 return rte_flow_error_set
2741                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2742                         "Geneve opt item must be preceded with Geneve item");
2743         geneve_spec = geneve_item->spec;
2744         geneve_mask = geneve_item->mask ? geneve_item->mask :
2745                                           &rte_flow_item_geneve_mask;
2746         /* Check if GENEVE TLV option size doesn't exceed option length */
2747         if (geneve_spec && (geneve_mask->ver_opt_len_o_c_rsvd0 ||
2748                             geneve_spec->ver_opt_len_o_c_rsvd0)) {
2749                 tlv_option_len = spec->option_len & mask->option_len;
2750                 optlen_v = rte_be_to_cpu_16(geneve_spec->ver_opt_len_o_c_rsvd0);
2751                 optlen_v = MLX5_GENEVE_OPTLEN_VAL(optlen_v);
2752                 optlen_m = rte_be_to_cpu_16(geneve_mask->ver_opt_len_o_c_rsvd0);
2753                 optlen_m = MLX5_GENEVE_OPTLEN_VAL(optlen_m);
2754                 if ((optlen_v & optlen_m) <= tlv_option_len)
2755                         return rte_flow_error_set
2756                                 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2757                                  "GENEVE TLV option length exceeds optlen");
2758         }
2759         /* Check if length is 0 or data is 0. */
2760         if (spec->data == NULL || spec->option_len == 0)
2761                 return rte_flow_error_set
2762                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2763                         "Geneve TLV opt with zero data/length not supported");
2764         /* Check not all data & mask are 0. */
2765         data_len = spec->option_len * 4;
2766         if (mask->data == NULL) {
2767                 for (i = 0; i < data_len; i++)
2768                         if (spec->data[i])
2769                                 break;
2770                 if (i == data_len)
2771                         return rte_flow_error_set(error, ENOTSUP,
2772                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
2773                                 "Can't match on Geneve option data 0");
2774         } else {
2775                 for (i = 0; i < data_len; i++)
2776                         if (spec->data[i] & mask->data[i])
2777                                 break;
2778                 if (i == data_len)
2779                         return rte_flow_error_set(error, ENOTSUP,
2780                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
2781                                 "Can't match on Geneve option data and mask 0");
2782                 /* Check data mask supported. */
2783                 for (i = data_max_supported; i < data_len ; i++)
2784                         if (mask->data[i])
2785                                 return rte_flow_error_set(error, ENOTSUP,
2786                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2787                                         "Data mask is of unsupported size");
2788         }
2789         /* Check GENEVE option is supported in NIC. */
2790         if (!config->hca_attr.geneve_tlv_opt)
2791                 return rte_flow_error_set
2792                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2793                         "Geneve TLV opt not supported");
2794         /* Check if we already have geneve option with different type/class. */
2795         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
2796         geneve_opt_resource = sh->geneve_tlv_option_resource;
2797         if (geneve_opt_resource != NULL)
2798                 if (geneve_opt_resource->option_class != spec->option_class ||
2799                     geneve_opt_resource->option_type != spec->option_type ||
2800                     geneve_opt_resource->length != spec->option_len) {
2801                         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
2802                         return rte_flow_error_set(error, ENOTSUP,
2803                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
2804                                 "Only one Geneve TLV option supported");
2805                 }
2806         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
2807         return 0;
2808 }
2809
2810 /**
2811  * Validate MPLS item.
2812  *
2813  * @param[in] dev
2814  *   Pointer to the rte_eth_dev structure.
2815  * @param[in] item
2816  *   Item specification.
2817  * @param[in] item_flags
2818  *   Bit-fields that holds the items detected until now.
2819  * @param[in] prev_layer
2820  *   The protocol layer indicated in previous item.
2821  * @param[out] error
2822  *   Pointer to error structure.
2823  *
2824  * @return
2825  *   0 on success, a negative errno value otherwise and rte_errno is set.
2826  */
2827 int
2828 mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused,
2829                              const struct rte_flow_item *item __rte_unused,
2830                              uint64_t item_flags __rte_unused,
2831                              uint64_t prev_layer __rte_unused,
2832                              struct rte_flow_error *error)
2833 {
2834 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
2835         const struct rte_flow_item_mpls *mask = item->mask;
2836         struct mlx5_priv *priv = dev->data->dev_private;
2837         int ret;
2838
2839         if (!priv->config.mpls_en)
2840                 return rte_flow_error_set(error, ENOTSUP,
2841                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2842                                           "MPLS not supported or"
2843                                           " disabled in firmware"
2844                                           " configuration.");
2845         /* MPLS over IP, UDP, GRE is allowed */
2846         if (!(prev_layer & (MLX5_FLOW_LAYER_OUTER_L3 |
2847                             MLX5_FLOW_LAYER_OUTER_L4_UDP |
2848                             MLX5_FLOW_LAYER_GRE |
2849                             MLX5_FLOW_LAYER_GRE_KEY)))
2850                 return rte_flow_error_set(error, EINVAL,
2851                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2852                                           "protocol filtering not compatible"
2853                                           " with MPLS layer");
2854         /* Multi-tunnel isn't allowed but MPLS over GRE is an exception. */
2855         if ((item_flags & MLX5_FLOW_LAYER_TUNNEL) &&
2856             !(item_flags & MLX5_FLOW_LAYER_GRE))
2857                 return rte_flow_error_set(error, ENOTSUP,
2858                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2859                                           "multiple tunnel layers not"
2860                                           " supported");
2861         if (!mask)
2862                 mask = &rte_flow_item_mpls_mask;
2863         ret = mlx5_flow_item_acceptable
2864                 (item, (const uint8_t *)mask,
2865                  (const uint8_t *)&rte_flow_item_mpls_mask,
2866                  sizeof(struct rte_flow_item_mpls),
2867                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2868         if (ret < 0)
2869                 return ret;
2870         return 0;
2871 #else
2872         return rte_flow_error_set(error, ENOTSUP,
2873                                   RTE_FLOW_ERROR_TYPE_ITEM, item,
2874                                   "MPLS is not supported by Verbs, please"
2875                                   " update.");
2876 #endif
2877 }
2878
2879 /**
2880  * Validate NVGRE item.
2881  *
2882  * @param[in] item
2883  *   Item specification.
2884  * @param[in] item_flags
2885  *   Bit flags to mark detected items.
2886  * @param[in] target_protocol
2887  *   The next protocol in the previous item.
2888  * @param[out] error
2889  *   Pointer to error structure.
2890  *
2891  * @return
2892  *   0 on success, a negative errno value otherwise and rte_errno is set.
2893  */
2894 int
2895 mlx5_flow_validate_item_nvgre(const struct rte_flow_item *item,
2896                               uint64_t item_flags,
2897                               uint8_t target_protocol,
2898                               struct rte_flow_error *error)
2899 {
2900         const struct rte_flow_item_nvgre *mask = item->mask;
2901         int ret;
2902
2903         if (target_protocol != 0xff && target_protocol != IPPROTO_GRE)
2904                 return rte_flow_error_set(error, EINVAL,
2905                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2906                                           "protocol filtering not compatible"
2907                                           " with this GRE layer");
2908         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2909                 return rte_flow_error_set(error, ENOTSUP,
2910                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2911                                           "multiple tunnel layers not"
2912                                           " supported");
2913         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3))
2914                 return rte_flow_error_set(error, ENOTSUP,
2915                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2916                                           "L3 Layer is missing");
2917         if (!mask)
2918                 mask = &rte_flow_item_nvgre_mask;
2919         ret = mlx5_flow_item_acceptable
2920                 (item, (const uint8_t *)mask,
2921                  (const uint8_t *)&rte_flow_item_nvgre_mask,
2922                  sizeof(struct rte_flow_item_nvgre),
2923                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2924         if (ret < 0)
2925                 return ret;
2926         return 0;
2927 }
2928
2929 /**
2930  * Validate eCPRI item.
2931  *
2932  * @param[in] item
2933  *   Item specification.
2934  * @param[in] item_flags
2935  *   Bit-fields that holds the items detected until now.
2936  * @param[in] last_item
2937  *   Previous validated item in the pattern items.
2938  * @param[in] ether_type
2939  *   Type in the ethernet layer header (including dot1q).
2940  * @param[in] acc_mask
2941  *   Acceptable mask, if NULL default internal default mask
2942  *   will be used to check whether item fields are supported.
2943  * @param[out] error
2944  *   Pointer to error structure.
2945  *
2946  * @return
2947  *   0 on success, a negative errno value otherwise and rte_errno is set.
2948  */
2949 int
2950 mlx5_flow_validate_item_ecpri(const struct rte_flow_item *item,
2951                               uint64_t item_flags,
2952                               uint64_t last_item,
2953                               uint16_t ether_type,
2954                               const struct rte_flow_item_ecpri *acc_mask,
2955                               struct rte_flow_error *error)
2956 {
2957         const struct rte_flow_item_ecpri *mask = item->mask;
2958         const struct rte_flow_item_ecpri nic_mask = {
2959                 .hdr = {
2960                         .common = {
2961                                 .u32 =
2962                                 RTE_BE32(((const struct rte_ecpri_common_hdr) {
2963                                         .type = 0xFF,
2964                                         }).u32),
2965                         },
2966                         .dummy[0] = 0xFFFFFFFF,
2967                 },
2968         };
2969         const uint64_t outer_l2_vlan = (MLX5_FLOW_LAYER_OUTER_L2 |
2970                                         MLX5_FLOW_LAYER_OUTER_VLAN);
2971         struct rte_flow_item_ecpri mask_lo;
2972
2973         if (!(last_item & outer_l2_vlan) &&
2974             last_item != MLX5_FLOW_LAYER_OUTER_L4_UDP)
2975                 return rte_flow_error_set(error, EINVAL,
2976                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2977                                           "eCPRI can only follow L2/VLAN layer or UDP layer");
2978         if ((last_item & outer_l2_vlan) && ether_type &&
2979             ether_type != RTE_ETHER_TYPE_ECPRI)
2980                 return rte_flow_error_set(error, EINVAL,
2981                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2982                                           "eCPRI cannot follow L2/VLAN layer which ether type is not 0xAEFE");
2983         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2984                 return rte_flow_error_set(error, EINVAL,
2985                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2986                                           "eCPRI with tunnel is not supported right now");
2987         if (item_flags & MLX5_FLOW_LAYER_OUTER_L3)
2988                 return rte_flow_error_set(error, ENOTSUP,
2989                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2990                                           "multiple L3 layers not supported");
2991         else if (item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP)
2992                 return rte_flow_error_set(error, EINVAL,
2993                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2994                                           "eCPRI cannot coexist with a TCP layer");
2995         /* In specification, eCPRI could be over UDP layer. */
2996         else if (item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)
2997                 return rte_flow_error_set(error, EINVAL,
2998                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2999                                           "eCPRI over UDP layer is not yet supported right now");
3000         /* Mask for type field in common header could be zero. */
3001         if (!mask)
3002                 mask = &rte_flow_item_ecpri_mask;
3003         mask_lo.hdr.common.u32 = rte_be_to_cpu_32(mask->hdr.common.u32);
3004         /* Input mask is in big-endian format. */
3005         if (mask_lo.hdr.common.type != 0 && mask_lo.hdr.common.type != 0xff)
3006                 return rte_flow_error_set(error, EINVAL,
3007                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
3008                                           "partial mask is not supported for protocol");
3009         else if (mask_lo.hdr.common.type == 0 && mask->hdr.dummy[0] != 0)
3010                 return rte_flow_error_set(error, EINVAL,
3011                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
3012                                           "message header mask must be after a type mask");
3013         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
3014                                          acc_mask ? (const uint8_t *)acc_mask
3015                                                   : (const uint8_t *)&nic_mask,
3016                                          sizeof(struct rte_flow_item_ecpri),
3017                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
3018 }
3019
3020 /**
3021  * Release resource related QUEUE/RSS action split.
3022  *
3023  * @param dev
3024  *   Pointer to Ethernet device.
3025  * @param flow
3026  *   Flow to release id's from.
3027  */
3028 static void
3029 flow_mreg_split_qrss_release(struct rte_eth_dev *dev,
3030                              struct rte_flow *flow)
3031 {
3032         struct mlx5_priv *priv = dev->data->dev_private;
3033         uint32_t handle_idx;
3034         struct mlx5_flow_handle *dev_handle;
3035
3036         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
3037                        handle_idx, dev_handle, next)
3038                 if (dev_handle->split_flow_id &&
3039                     !dev_handle->is_meter_flow_id)
3040                         mlx5_ipool_free(priv->sh->ipool
3041                                         [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
3042                                         dev_handle->split_flow_id);
3043 }
3044
3045 static int
3046 flow_null_validate(struct rte_eth_dev *dev __rte_unused,
3047                    const struct rte_flow_attr *attr __rte_unused,
3048                    const struct rte_flow_item items[] __rte_unused,
3049                    const struct rte_flow_action actions[] __rte_unused,
3050                    bool external __rte_unused,
3051                    int hairpin __rte_unused,
3052                    struct rte_flow_error *error)
3053 {
3054         return rte_flow_error_set(error, ENOTSUP,
3055                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
3056 }
3057
3058 static struct mlx5_flow *
3059 flow_null_prepare(struct rte_eth_dev *dev __rte_unused,
3060                   const struct rte_flow_attr *attr __rte_unused,
3061                   const struct rte_flow_item items[] __rte_unused,
3062                   const struct rte_flow_action actions[] __rte_unused,
3063                   struct rte_flow_error *error)
3064 {
3065         rte_flow_error_set(error, ENOTSUP,
3066                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
3067         return NULL;
3068 }
3069
3070 static int
3071 flow_null_translate(struct rte_eth_dev *dev __rte_unused,
3072                     struct mlx5_flow *dev_flow __rte_unused,
3073                     const struct rte_flow_attr *attr __rte_unused,
3074                     const struct rte_flow_item items[] __rte_unused,
3075                     const struct rte_flow_action actions[] __rte_unused,
3076                     struct rte_flow_error *error)
3077 {
3078         return rte_flow_error_set(error, ENOTSUP,
3079                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
3080 }
3081
3082 static int
3083 flow_null_apply(struct rte_eth_dev *dev __rte_unused,
3084                 struct rte_flow *flow __rte_unused,
3085                 struct rte_flow_error *error)
3086 {
3087         return rte_flow_error_set(error, ENOTSUP,
3088                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
3089 }
3090
3091 static void
3092 flow_null_remove(struct rte_eth_dev *dev __rte_unused,
3093                  struct rte_flow *flow __rte_unused)
3094 {
3095 }
3096
3097 static void
3098 flow_null_destroy(struct rte_eth_dev *dev __rte_unused,
3099                   struct rte_flow *flow __rte_unused)
3100 {
3101 }
3102
3103 static int
3104 flow_null_query(struct rte_eth_dev *dev __rte_unused,
3105                 struct rte_flow *flow __rte_unused,
3106                 const struct rte_flow_action *actions __rte_unused,
3107                 void *data __rte_unused,
3108                 struct rte_flow_error *error)
3109 {
3110         return rte_flow_error_set(error, ENOTSUP,
3111                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
3112 }
3113
3114 static int
3115 flow_null_sync_domain(struct rte_eth_dev *dev __rte_unused,
3116                       uint32_t domains __rte_unused,
3117                       uint32_t flags __rte_unused)
3118 {
3119         return 0;
3120 }
3121
3122 /* Void driver to protect from null pointer reference. */
3123 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops = {
3124         .validate = flow_null_validate,
3125         .prepare = flow_null_prepare,
3126         .translate = flow_null_translate,
3127         .apply = flow_null_apply,
3128         .remove = flow_null_remove,
3129         .destroy = flow_null_destroy,
3130         .query = flow_null_query,
3131         .sync_domain = flow_null_sync_domain,
3132 };
3133
3134 /**
3135  * Select flow driver type according to flow attributes and device
3136  * configuration.
3137  *
3138  * @param[in] dev
3139  *   Pointer to the dev structure.
3140  * @param[in] attr
3141  *   Pointer to the flow attributes.
3142  *
3143  * @return
3144  *   flow driver type, MLX5_FLOW_TYPE_MAX otherwise.
3145  */
3146 static enum mlx5_flow_drv_type
3147 flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr)
3148 {
3149         struct mlx5_priv *priv = dev->data->dev_private;
3150         /* The OS can determine first a specific flow type (DV, VERBS) */
3151         enum mlx5_flow_drv_type type = mlx5_flow_os_get_type();
3152
3153         if (type != MLX5_FLOW_TYPE_MAX)
3154                 return type;
3155         /* If no OS specific type - continue with DV/VERBS selection */
3156         if (attr->transfer && priv->config.dv_esw_en)
3157                 type = MLX5_FLOW_TYPE_DV;
3158         if (!attr->transfer)
3159                 type = priv->config.dv_flow_en ? MLX5_FLOW_TYPE_DV :
3160                                                  MLX5_FLOW_TYPE_VERBS;
3161         return type;
3162 }
3163
3164 #define flow_get_drv_ops(type) flow_drv_ops[type]
3165
3166 /**
3167  * Flow driver validation API. This abstracts calling driver specific functions.
3168  * The type of flow driver is determined according to flow attributes.
3169  *
3170  * @param[in] dev
3171  *   Pointer to the dev structure.
3172  * @param[in] attr
3173  *   Pointer to the flow attributes.
3174  * @param[in] items
3175  *   Pointer to the list of items.
3176  * @param[in] actions
3177  *   Pointer to the list of actions.
3178  * @param[in] external
3179  *   This flow rule is created by request external to PMD.
3180  * @param[in] hairpin
3181  *   Number of hairpin TX actions, 0 means classic flow.
3182  * @param[out] error
3183  *   Pointer to the error structure.
3184  *
3185  * @return
3186  *   0 on success, a negative errno value otherwise and rte_errno is set.
3187  */
3188 static inline int
3189 flow_drv_validate(struct rte_eth_dev *dev,
3190                   const struct rte_flow_attr *attr,
3191                   const struct rte_flow_item items[],
3192                   const struct rte_flow_action actions[],
3193                   bool external, int hairpin, struct rte_flow_error *error)
3194 {
3195         const struct mlx5_flow_driver_ops *fops;
3196         enum mlx5_flow_drv_type type = flow_get_drv_type(dev, attr);
3197
3198         fops = flow_get_drv_ops(type);
3199         return fops->validate(dev, attr, items, actions, external,
3200                               hairpin, error);
3201 }
3202
3203 /**
3204  * Flow driver preparation API. This abstracts calling driver specific
3205  * functions. Parent flow (rte_flow) should have driver type (drv_type). It
3206  * calculates the size of memory required for device flow, allocates the memory,
3207  * initializes the device flow and returns the pointer.
3208  *
3209  * @note
3210  *   This function initializes device flow structure such as dv or verbs in
3211  *   struct mlx5_flow. However, it is caller's responsibility to initialize the
3212  *   rest. For example, adding returning device flow to flow->dev_flow list and
3213  *   setting backward reference to the flow should be done out of this function.
3214  *   layers field is not filled either.
3215  *
3216  * @param[in] dev
3217  *   Pointer to the dev structure.
3218  * @param[in] attr
3219  *   Pointer to the flow attributes.
3220  * @param[in] items
3221  *   Pointer to the list of items.
3222  * @param[in] actions
3223  *   Pointer to the list of actions.
3224  * @param[in] flow_idx
3225  *   This memory pool index to the flow.
3226  * @param[out] error
3227  *   Pointer to the error structure.
3228  *
3229  * @return
3230  *   Pointer to device flow on success, otherwise NULL and rte_errno is set.
3231  */
3232 static inline struct mlx5_flow *
3233 flow_drv_prepare(struct rte_eth_dev *dev,
3234                  const struct rte_flow *flow,
3235                  const struct rte_flow_attr *attr,
3236                  const struct rte_flow_item items[],
3237                  const struct rte_flow_action actions[],
3238                  uint32_t flow_idx,
3239                  struct rte_flow_error *error)
3240 {
3241         const struct mlx5_flow_driver_ops *fops;
3242         enum mlx5_flow_drv_type type = flow->drv_type;
3243         struct mlx5_flow *mlx5_flow = NULL;
3244
3245         MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
3246         fops = flow_get_drv_ops(type);
3247         mlx5_flow = fops->prepare(dev, attr, items, actions, error);
3248         if (mlx5_flow)
3249                 mlx5_flow->flow_idx = flow_idx;
3250         return mlx5_flow;
3251 }
3252
3253 /**
3254  * Flow driver translation API. This abstracts calling driver specific
3255  * functions. Parent flow (rte_flow) should have driver type (drv_type). It
3256  * translates a generic flow into a driver flow. flow_drv_prepare() must
3257  * precede.
3258  *
3259  * @note
3260  *   dev_flow->layers could be filled as a result of parsing during translation
3261  *   if needed by flow_drv_apply(). dev_flow->flow->actions can also be filled
3262  *   if necessary. As a flow can have multiple dev_flows by RSS flow expansion,
3263  *   flow->actions could be overwritten even though all the expanded dev_flows
3264  *   have the same actions.
3265  *
3266  * @param[in] dev
3267  *   Pointer to the rte dev structure.
3268  * @param[in, out] dev_flow
3269  *   Pointer to the mlx5 flow.
3270  * @param[in] attr
3271  *   Pointer to the flow attributes.
3272  * @param[in] items
3273  *   Pointer to the list of items.
3274  * @param[in] actions
3275  *   Pointer to the list of actions.
3276  * @param[out] error
3277  *   Pointer to the error structure.
3278  *
3279  * @return
3280  *   0 on success, a negative errno value otherwise and rte_errno is set.
3281  */
3282 static inline int
3283 flow_drv_translate(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow,
3284                    const struct rte_flow_attr *attr,
3285                    const struct rte_flow_item items[],
3286                    const struct rte_flow_action actions[],
3287                    struct rte_flow_error *error)
3288 {
3289         const struct mlx5_flow_driver_ops *fops;
3290         enum mlx5_flow_drv_type type = dev_flow->flow->drv_type;
3291
3292         MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
3293         fops = flow_get_drv_ops(type);
3294         return fops->translate(dev, dev_flow, attr, items, actions, error);
3295 }
3296
3297 /**
3298  * Flow driver apply API. This abstracts calling driver specific functions.
3299  * Parent flow (rte_flow) should have driver type (drv_type). It applies
3300  * translated driver flows on to device. flow_drv_translate() must precede.
3301  *
3302  * @param[in] dev
3303  *   Pointer to Ethernet device structure.
3304  * @param[in, out] flow
3305  *   Pointer to flow structure.
3306  * @param[out] error
3307  *   Pointer to error structure.
3308  *
3309  * @return
3310  *   0 on success, a negative errno value otherwise and rte_errno is set.
3311  */
3312 static inline int
3313 flow_drv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
3314                struct rte_flow_error *error)
3315 {
3316         const struct mlx5_flow_driver_ops *fops;
3317         enum mlx5_flow_drv_type type = flow->drv_type;
3318
3319         MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
3320         fops = flow_get_drv_ops(type);
3321         return fops->apply(dev, flow, error);
3322 }
3323
3324 /**
3325  * Flow driver destroy API. This abstracts calling driver specific functions.
3326  * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow
3327  * on device and releases resources of the flow.
3328  *
3329  * @param[in] dev
3330  *   Pointer to Ethernet device.
3331  * @param[in, out] flow
3332  *   Pointer to flow structure.
3333  */
3334 static inline void
3335 flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
3336 {
3337         const struct mlx5_flow_driver_ops *fops;
3338         enum mlx5_flow_drv_type type = flow->drv_type;
3339
3340         flow_mreg_split_qrss_release(dev, flow);
3341         MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
3342         fops = flow_get_drv_ops(type);
3343         fops->destroy(dev, flow);
3344 }
3345
3346 /**
3347  * Get RSS action from the action list.
3348  *
3349  * @param[in] actions
3350  *   Pointer to the list of actions.
3351  *
3352  * @return
3353  *   Pointer to the RSS action if exist, else return NULL.
3354  */
3355 static const struct rte_flow_action_rss*
3356 flow_get_rss_action(const struct rte_flow_action actions[])
3357 {
3358         const struct rte_flow_action_rss *rss = NULL;
3359
3360         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3361                 switch (actions->type) {
3362                 case RTE_FLOW_ACTION_TYPE_RSS:
3363                         rss = actions->conf;
3364                         break;
3365                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
3366                 {
3367                         const struct rte_flow_action_sample *sample =
3368                                                                 actions->conf;
3369                         const struct rte_flow_action *act = sample->actions;
3370                         for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++)
3371                                 if (act->type == RTE_FLOW_ACTION_TYPE_RSS)
3372                                         rss = act->conf;
3373                         break;
3374                 }
3375                 default:
3376                         break;
3377                 }
3378         }
3379         return rss;
3380 }
3381
3382 /**
3383  * Get ASO age action by index.
3384  *
3385  * @param[in] dev
3386  *   Pointer to the Ethernet device structure.
3387  * @param[in] age_idx
3388  *   Index to the ASO age action.
3389  *
3390  * @return
3391  *   The specified ASO age action.
3392  */
3393 struct mlx5_aso_age_action*
3394 flow_aso_age_get_by_idx(struct rte_eth_dev *dev, uint32_t age_idx)
3395 {
3396         uint16_t pool_idx = age_idx & UINT16_MAX;
3397         uint16_t offset = (age_idx >> 16) & UINT16_MAX;
3398         struct mlx5_priv *priv = dev->data->dev_private;
3399         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
3400         struct mlx5_aso_age_pool *pool = mng->pools[pool_idx];
3401
3402         return &pool->actions[offset - 1];
3403 }
3404
3405 /* maps indirect action to translated direct in some actions array */
3406 struct mlx5_translated_action_handle {
3407         struct rte_flow_action_handle *action; /**< Indirect action handle. */
3408         int index; /**< Index in related array of rte_flow_action. */
3409 };
3410
3411 /**
3412  * Translates actions of type RTE_FLOW_ACTION_TYPE_INDIRECT to related
3413  * direct action if translation possible.
3414  * This functionality used to run same execution path for both direct and
3415  * indirect actions on flow create. All necessary preparations for indirect
3416  * action handling should be performed on *handle* actions list returned
3417  * from this call.
3418  *
3419  * @param[in] dev
3420  *   Pointer to Ethernet device.
3421  * @param[in] actions
3422  *   List of actions to translate.
3423  * @param[out] handle
3424  *   List to store translated indirect action object handles.
3425  * @param[in, out] indir_n
3426  *   Size of *handle* array. On return should be updated with number of
3427  *   indirect actions retrieved from the *actions* list.
3428  * @param[out] translated_actions
3429  *   List of actions where all indirect actions were translated to direct
3430  *   if possible. NULL if no translation took place.
3431  * @param[out] error
3432  *   Pointer to the error structure.
3433  *
3434  * @return
3435  *   0 on success, a negative errno value otherwise and rte_errno is set.
3436  */
3437 static int
3438 flow_action_handles_translate(struct rte_eth_dev *dev,
3439                               const struct rte_flow_action actions[],
3440                               struct mlx5_translated_action_handle *handle,
3441                               int *indir_n,
3442                               struct rte_flow_action **translated_actions,
3443                               struct rte_flow_error *error)
3444 {
3445         struct mlx5_priv *priv = dev->data->dev_private;
3446         struct rte_flow_action *translated = NULL;
3447         size_t actions_size;
3448         int n;
3449         int copied_n = 0;
3450         struct mlx5_translated_action_handle *handle_end = NULL;
3451
3452         for (n = 0; actions[n].type != RTE_FLOW_ACTION_TYPE_END; n++) {
3453                 if (actions[n].type != RTE_FLOW_ACTION_TYPE_INDIRECT)
3454                         continue;
3455                 if (copied_n == *indir_n) {
3456                         return rte_flow_error_set
3457                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_NUM,
3458                                  NULL, "too many shared actions");
3459                 }
3460                 rte_memcpy(&handle[copied_n].action, &actions[n].conf,
3461                            sizeof(actions[n].conf));
3462                 handle[copied_n].index = n;
3463                 copied_n++;
3464         }
3465         n++;
3466         *indir_n = copied_n;
3467         if (!copied_n)
3468                 return 0;
3469         actions_size = sizeof(struct rte_flow_action) * n;
3470         translated = mlx5_malloc(MLX5_MEM_ZERO, actions_size, 0, SOCKET_ID_ANY);
3471         if (!translated) {
3472                 rte_errno = ENOMEM;
3473                 return -ENOMEM;
3474         }
3475         memcpy(translated, actions, actions_size);
3476         for (handle_end = handle + copied_n; handle < handle_end; handle++) {
3477                 struct mlx5_shared_action_rss *shared_rss;
3478                 uint32_t act_idx = (uint32_t)(uintptr_t)handle->action;
3479                 uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
3480                 uint32_t idx = act_idx &
3481                                ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
3482
3483                 switch (type) {
3484                 case MLX5_INDIRECT_ACTION_TYPE_RSS:
3485                         shared_rss = mlx5_ipool_get
3486                           (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
3487                         translated[handle->index].type =
3488                                 RTE_FLOW_ACTION_TYPE_RSS;
3489                         translated[handle->index].conf =
3490                                 &shared_rss->origin;
3491                         break;
3492                 case MLX5_INDIRECT_ACTION_TYPE_AGE:
3493                         if (priv->sh->flow_hit_aso_en) {
3494                                 translated[handle->index].type =
3495                                         (enum rte_flow_action_type)
3496                                         MLX5_RTE_FLOW_ACTION_TYPE_AGE;
3497                                 translated[handle->index].conf =
3498                                                          (void *)(uintptr_t)idx;
3499                                 break;
3500                         }
3501                         /* Fall-through */
3502                 default:
3503                         mlx5_free(translated);
3504                         return rte_flow_error_set
3505                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
3506                                  NULL, "invalid indirect action type");
3507                 }
3508         }
3509         *translated_actions = translated;
3510         return 0;
3511 }
3512
3513 /**
3514  * Get Shared RSS action from the action list.
3515  *
3516  * @param[in] dev
3517  *   Pointer to Ethernet device.
3518  * @param[in] shared
3519  *   Pointer to the list of actions.
3520  * @param[in] shared_n
3521  *   Actions list length.
3522  *
3523  * @return
3524  *   The MLX5 RSS action ID if exists, otherwise return 0.
3525  */
3526 static uint32_t
3527 flow_get_shared_rss_action(struct rte_eth_dev *dev,
3528                            struct mlx5_translated_action_handle *handle,
3529                            int shared_n)
3530 {
3531         struct mlx5_translated_action_handle *handle_end;
3532         struct mlx5_priv *priv = dev->data->dev_private;
3533         struct mlx5_shared_action_rss *shared_rss;
3534
3535
3536         for (handle_end = handle + shared_n; handle < handle_end; handle++) {
3537                 uint32_t act_idx = (uint32_t)(uintptr_t)handle->action;
3538                 uint32_t type = act_idx >> MLX5_INDIRECT_ACTION_TYPE_OFFSET;
3539                 uint32_t idx = act_idx &
3540                                ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1);
3541                 switch (type) {
3542                 case MLX5_INDIRECT_ACTION_TYPE_RSS:
3543                         shared_rss = mlx5_ipool_get
3544                                 (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
3545                                                                            idx);
3546                         __atomic_add_fetch(&shared_rss->refcnt, 1,
3547                                            __ATOMIC_RELAXED);
3548                         return idx;
3549                 default:
3550                         break;
3551                 }
3552         }
3553         return 0;
3554 }
3555
3556 static unsigned int
3557 find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level)
3558 {
3559         const struct rte_flow_item *item;
3560         unsigned int has_vlan = 0;
3561
3562         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3563                 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
3564                         has_vlan = 1;
3565                         break;
3566                 }
3567         }
3568         if (has_vlan)
3569                 return rss_level < 2 ? MLX5_EXPANSION_ROOT_ETH_VLAN :
3570                                        MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN;
3571         return rss_level < 2 ? MLX5_EXPANSION_ROOT :
3572                                MLX5_EXPANSION_ROOT_OUTER;
3573 }
3574
3575 /**
3576  *  Get layer flags from the prefix flow.
3577  *
3578  *  Some flows may be split to several subflows, the prefix subflow gets the
3579  *  match items and the suffix sub flow gets the actions.
3580  *  Some actions need the user defined match item flags to get the detail for
3581  *  the action.
3582  *  This function helps the suffix flow to get the item layer flags from prefix
3583  *  subflow.
3584  *
3585  * @param[in] dev_flow
3586  *   Pointer the created preifx subflow.
3587  *
3588  * @return
3589  *   The layers get from prefix subflow.
3590  */
3591 static inline uint64_t
3592 flow_get_prefix_layer_flags(struct mlx5_flow *dev_flow)
3593 {
3594         uint64_t layers = 0;
3595
3596         /*
3597          * Layers bits could be localization, but usually the compiler will
3598          * help to do the optimization work for source code.
3599          * If no decap actions, use the layers directly.
3600          */
3601         if (!(dev_flow->act_flags & MLX5_FLOW_ACTION_DECAP))
3602                 return dev_flow->handle->layers;
3603         /* Convert L3 layers with decap action. */
3604         if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV4)
3605                 layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3606         else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV6)
3607                 layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3608         /* Convert L4 layers with decap action.  */
3609         if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_TCP)
3610                 layers |= MLX5_FLOW_LAYER_OUTER_L4_TCP;
3611         else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_UDP)
3612                 layers |= MLX5_FLOW_LAYER_OUTER_L4_UDP;
3613         return layers;
3614 }
3615
3616 /**
3617  * Get metadata split action information.
3618  *
3619  * @param[in] actions
3620  *   Pointer to the list of actions.
3621  * @param[out] qrss
3622  *   Pointer to the return pointer.
3623  * @param[out] qrss_type
3624  *   Pointer to the action type to return. RTE_FLOW_ACTION_TYPE_END is returned
3625  *   if no QUEUE/RSS is found.
3626  * @param[out] encap_idx
3627  *   Pointer to the index of the encap action if exists, otherwise the last
3628  *   action index.
3629  *
3630  * @return
3631  *   Total number of actions.
3632  */
3633 static int
3634 flow_parse_metadata_split_actions_info(const struct rte_flow_action actions[],
3635                                        const struct rte_flow_action **qrss,
3636                                        int *encap_idx)
3637 {
3638         const struct rte_flow_action_raw_encap *raw_encap;
3639         int actions_n = 0;
3640         int raw_decap_idx = -1;
3641
3642         *encap_idx = -1;
3643         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3644                 switch (actions->type) {
3645                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3646                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3647                         *encap_idx = actions_n;
3648                         break;
3649                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
3650                         raw_decap_idx = actions_n;
3651                         break;
3652                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3653                         raw_encap = actions->conf;
3654                         if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
3655                                 *encap_idx = raw_decap_idx != -1 ?
3656                                                       raw_decap_idx : actions_n;
3657                         break;
3658                 case RTE_FLOW_ACTION_TYPE_QUEUE:
3659                 case RTE_FLOW_ACTION_TYPE_RSS:
3660                         *qrss = actions;
3661                         break;
3662                 default:
3663                         break;
3664                 }
3665                 actions_n++;
3666         }
3667         if (*encap_idx == -1)
3668                 *encap_idx = actions_n;
3669         /* Count RTE_FLOW_ACTION_TYPE_END. */
3670         return actions_n + 1;
3671 }
3672
3673 /**
3674  * Check meter action from the action list.
3675  *
3676  * @param[in] actions
3677  *   Pointer to the list of actions.
3678  * @param[out] has_mtr
3679  *   Pointer to the meter exist flag.
3680  * @param[out] meter_id
3681  *   Pointer to the meter id.
3682  *
3683  * @return
3684  *   Total number of actions.
3685  */
3686 static int
3687 flow_check_meter_action(const struct rte_flow_action actions[],
3688                         bool *has_mtr,
3689                         uint32_t *meter_id)
3690 {
3691         const struct rte_flow_action_meter *mtr = NULL;
3692         int actions_n = 0;
3693
3694         MLX5_ASSERT(has_mtr);
3695         *has_mtr = false;
3696         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3697                 switch (actions->type) {
3698                 case RTE_FLOW_ACTION_TYPE_METER:
3699                         mtr = actions->conf;
3700                         *meter_id = mtr->mtr_id;
3701                         *has_mtr = true;
3702                         break;
3703                 default:
3704                         break;
3705                 }
3706                 actions_n++;
3707         }
3708         /* Count RTE_FLOW_ACTION_TYPE_END. */
3709         return actions_n + 1;
3710 }
3711
3712 /**
3713  * Check if the flow should be split due to hairpin.
3714  * The reason for the split is that in current HW we can't
3715  * support encap and push-vlan on Rx, so if a flow contains
3716  * these actions we move it to Tx.
3717  *
3718  * @param dev
3719  *   Pointer to Ethernet device.
3720  * @param[in] attr
3721  *   Flow rule attributes.
3722  * @param[in] actions
3723  *   Associated actions (list terminated by the END action).
3724  *
3725  * @return
3726  *   > 0 the number of actions and the flow should be split,
3727  *   0 when no split required.
3728  */
3729 static int
3730 flow_check_hairpin_split(struct rte_eth_dev *dev,
3731                          const struct rte_flow_attr *attr,
3732                          const struct rte_flow_action actions[])
3733 {
3734         int queue_action = 0;
3735         int action_n = 0;
3736         int split = 0;
3737         const struct rte_flow_action_queue *queue;
3738         const struct rte_flow_action_rss *rss;
3739         const struct rte_flow_action_raw_encap *raw_encap;
3740         const struct rte_eth_hairpin_conf *conf;
3741
3742         if (!attr->ingress)
3743                 return 0;
3744         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3745                 switch (actions->type) {
3746                 case RTE_FLOW_ACTION_TYPE_QUEUE:
3747                         queue = actions->conf;
3748                         if (queue == NULL)
3749                                 return 0;
3750                         conf = mlx5_rxq_get_hairpin_conf(dev, queue->index);
3751                         if (conf == NULL || conf->tx_explicit != 0)
3752                                 return 0;
3753                         queue_action = 1;
3754                         action_n++;
3755                         break;
3756                 case RTE_FLOW_ACTION_TYPE_RSS:
3757                         rss = actions->conf;
3758                         if (rss == NULL || rss->queue_num == 0)
3759                                 return 0;
3760                         conf = mlx5_rxq_get_hairpin_conf(dev, rss->queue[0]);
3761                         if (conf == NULL || conf->tx_explicit != 0)
3762                                 return 0;
3763                         queue_action = 1;
3764                         action_n++;
3765                         break;
3766                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3767                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3768                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
3769                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
3770                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
3771                         split++;
3772                         action_n++;
3773                         break;
3774                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3775                         raw_encap = actions->conf;
3776                         if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
3777                                 split++;
3778                         action_n++;
3779                         break;
3780                 default:
3781                         action_n++;
3782                         break;
3783                 }
3784         }
3785         if (split && queue_action)
3786                 return action_n;
3787         return 0;
3788 }
3789
3790 /* Declare flow create/destroy prototype in advance. */
3791 static uint32_t
3792 flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
3793                  const struct rte_flow_attr *attr,
3794                  const struct rte_flow_item items[],
3795                  const struct rte_flow_action actions[],
3796                  bool external, struct rte_flow_error *error);
3797
3798 static void
3799 flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,
3800                   uint32_t flow_idx);
3801
3802 int
3803 flow_dv_mreg_match_cb(struct mlx5_hlist *list __rte_unused,
3804                       struct mlx5_hlist_entry *entry,
3805                       uint64_t key, void *cb_ctx __rte_unused)
3806 {
3807         struct mlx5_flow_mreg_copy_resource *mcp_res =
3808                 container_of(entry, typeof(*mcp_res), hlist_ent);
3809
3810         return mcp_res->mark_id != key;
3811 }
3812
3813 struct mlx5_hlist_entry *
3814 flow_dv_mreg_create_cb(struct mlx5_hlist *list, uint64_t key,
3815                        void *cb_ctx)
3816 {
3817         struct rte_eth_dev *dev = list->ctx;
3818         struct mlx5_priv *priv = dev->data->dev_private;
3819         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3820         struct mlx5_flow_mreg_copy_resource *mcp_res;
3821         struct rte_flow_error *error = ctx->error;
3822         uint32_t idx = 0;
3823         int ret;
3824         uint32_t mark_id = key;
3825         struct rte_flow_attr attr = {
3826                 .group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
3827                 .ingress = 1,
3828         };
3829         struct mlx5_rte_flow_item_tag tag_spec = {
3830                 .data = mark_id,
3831         };
3832         struct rte_flow_item items[] = {
3833                 [1] = { .type = RTE_FLOW_ITEM_TYPE_END, },
3834         };
3835         struct rte_flow_action_mark ftag = {
3836                 .id = mark_id,
3837         };
3838         struct mlx5_flow_action_copy_mreg cp_mreg = {
3839                 .dst = REG_B,
3840                 .src = REG_NON,
3841         };
3842         struct rte_flow_action_jump jump = {
3843                 .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP,
3844         };
3845         struct rte_flow_action actions[] = {
3846                 [3] = { .type = RTE_FLOW_ACTION_TYPE_END, },
3847         };
3848
3849         /* Fill the register fileds in the flow. */
3850         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3851         if (ret < 0)
3852                 return NULL;
3853         tag_spec.id = ret;
3854         ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error);
3855         if (ret < 0)
3856                 return NULL;
3857         cp_mreg.src = ret;
3858         /* Provide the full width of FLAG specific value. */
3859         if (mark_id == (priv->sh->dv_regc0_mask & MLX5_FLOW_MARK_DEFAULT))
3860                 tag_spec.data = MLX5_FLOW_MARK_DEFAULT;
3861         /* Build a new flow. */
3862         if (mark_id != MLX5_DEFAULT_COPY_ID) {
3863                 items[0] = (struct rte_flow_item){
3864                         .type = (enum rte_flow_item_type)
3865                                 MLX5_RTE_FLOW_ITEM_TYPE_TAG,
3866                         .spec = &tag_spec,
3867                 };
3868                 items[1] = (struct rte_flow_item){
3869                         .type = RTE_FLOW_ITEM_TYPE_END,
3870                 };
3871                 actions[0] = (struct rte_flow_action){
3872                         .type = (enum rte_flow_action_type)
3873                                 MLX5_RTE_FLOW_ACTION_TYPE_MARK,
3874                         .conf = &ftag,
3875                 };
3876                 actions[1] = (struct rte_flow_action){
3877                         .type = (enum rte_flow_action_type)
3878                                 MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
3879                         .conf = &cp_mreg,
3880                 };
3881                 actions[2] = (struct rte_flow_action){
3882                         .type = RTE_FLOW_ACTION_TYPE_JUMP,
3883                         .conf = &jump,
3884                 };
3885                 actions[3] = (struct rte_flow_action){
3886                         .type = RTE_FLOW_ACTION_TYPE_END,
3887                 };
3888         } else {
3889                 /* Default rule, wildcard match. */
3890                 attr.priority = MLX5_FLOW_LOWEST_PRIO_INDICATOR;
3891                 items[0] = (struct rte_flow_item){
3892                         .type = RTE_FLOW_ITEM_TYPE_END,
3893                 };
3894                 actions[0] = (struct rte_flow_action){
3895                         .type = (enum rte_flow_action_type)
3896                                 MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
3897                         .conf = &cp_mreg,
3898                 };
3899                 actions[1] = (struct rte_flow_action){
3900                         .type = RTE_FLOW_ACTION_TYPE_JUMP,
3901                         .conf = &jump,
3902                 };
3903                 actions[2] = (struct rte_flow_action){
3904                         .type = RTE_FLOW_ACTION_TYPE_END,
3905                 };
3906         }
3907         /* Build a new entry. */
3908         mcp_res = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MCP], &idx);
3909         if (!mcp_res) {
3910                 rte_errno = ENOMEM;
3911                 return NULL;
3912         }
3913         mcp_res->idx = idx;
3914         mcp_res->mark_id = mark_id;
3915         /*
3916          * The copy Flows are not included in any list. There
3917          * ones are referenced from other Flows and can not
3918          * be applied, removed, deleted in ardbitrary order
3919          * by list traversing.
3920          */
3921         mcp_res->rix_flow = flow_list_create(dev, NULL, &attr, items,
3922                                          actions, false, error);
3923         if (!mcp_res->rix_flow) {
3924                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], idx);
3925                 return NULL;
3926         }
3927         return &mcp_res->hlist_ent;
3928 }
3929
3930 /**
3931  * Add a flow of copying flow metadata registers in RX_CP_TBL.
3932  *
3933  * As mark_id is unique, if there's already a registered flow for the mark_id,
3934  * return by increasing the reference counter of the resource. Otherwise, create
3935  * the resource (mcp_res) and flow.
3936  *
3937  * Flow looks like,
3938  *   - If ingress port is ANY and reg_c[1] is mark_id,
3939  *     flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL.
3940  *
3941  * For default flow (zero mark_id), flow is like,
3942  *   - If ingress port is ANY,
3943  *     reg_b := reg_c[0] and jump to RX_ACT_TBL.
3944  *
3945  * @param dev
3946  *   Pointer to Ethernet device.
3947  * @param mark_id
3948  *   ID of MARK action, zero means default flow for META.
3949  * @param[out] error
3950  *   Perform verbose error reporting if not NULL.
3951  *
3952  * @return
3953  *   Associated resource on success, NULL otherwise and rte_errno is set.
3954  */
3955 static struct mlx5_flow_mreg_copy_resource *
3956 flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id,
3957                           struct rte_flow_error *error)
3958 {
3959         struct mlx5_priv *priv = dev->data->dev_private;
3960         struct mlx5_hlist_entry *entry;
3961         struct mlx5_flow_cb_ctx ctx = {
3962                 .dev = dev,
3963                 .error = error,
3964         };
3965
3966         /* Check if already registered. */
3967         MLX5_ASSERT(priv->mreg_cp_tbl);
3968         entry = mlx5_hlist_register(priv->mreg_cp_tbl, mark_id, &ctx);
3969         if (!entry)
3970                 return NULL;
3971         return container_of(entry, struct mlx5_flow_mreg_copy_resource,
3972                             hlist_ent);
3973 }
3974
3975 void
3976 flow_dv_mreg_remove_cb(struct mlx5_hlist *list, struct mlx5_hlist_entry *entry)
3977 {
3978         struct mlx5_flow_mreg_copy_resource *mcp_res =
3979                 container_of(entry, typeof(*mcp_res), hlist_ent);
3980         struct rte_eth_dev *dev = list->ctx;
3981         struct mlx5_priv *priv = dev->data->dev_private;
3982
3983         MLX5_ASSERT(mcp_res->rix_flow);
3984         flow_list_destroy(dev, NULL, mcp_res->rix_flow);
3985         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx);
3986 }
3987
3988 /**
3989  * Release flow in RX_CP_TBL.
3990  *
3991  * @param dev
3992  *   Pointer to Ethernet device.
3993  * @flow
3994  *   Parent flow for wich copying is provided.
3995  */
3996 static void
3997 flow_mreg_del_copy_action(struct rte_eth_dev *dev,
3998                           struct rte_flow *flow)
3999 {
4000         struct mlx5_flow_mreg_copy_resource *mcp_res;
4001         struct mlx5_priv *priv = dev->data->dev_private;
4002
4003         if (!flow->rix_mreg_copy)
4004                 return;
4005         mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP],
4006                                  flow->rix_mreg_copy);
4007         if (!mcp_res || !priv->mreg_cp_tbl)
4008                 return;
4009         MLX5_ASSERT(mcp_res->rix_flow);
4010         mlx5_hlist_unregister(priv->mreg_cp_tbl, &mcp_res->hlist_ent);
4011         flow->rix_mreg_copy = 0;
4012 }
4013
4014 /**
4015  * Remove the default copy action from RX_CP_TBL.
4016  *
4017  * This functions is called in the mlx5_dev_start(). No thread safe
4018  * is guaranteed.
4019  *
4020  * @param dev
4021  *   Pointer to Ethernet device.
4022  */
4023 static void
4024 flow_mreg_del_default_copy_action(struct rte_eth_dev *dev)
4025 {
4026         struct mlx5_hlist_entry *entry;
4027         struct mlx5_priv *priv = dev->data->dev_private;
4028
4029         /* Check if default flow is registered. */
4030         if (!priv->mreg_cp_tbl)
4031                 return;
4032         entry = mlx5_hlist_lookup(priv->mreg_cp_tbl,
4033                                   MLX5_DEFAULT_COPY_ID, NULL);
4034         if (!entry)
4035                 return;
4036         mlx5_hlist_unregister(priv->mreg_cp_tbl, entry);
4037 }
4038
4039 /**
4040  * Add the default copy action in in RX_CP_TBL.
4041  *
4042  * This functions is called in the mlx5_dev_start(). No thread safe
4043  * is guaranteed.
4044  *
4045  * @param dev
4046  *   Pointer to Ethernet device.
4047  * @param[out] error
4048  *   Perform verbose error reporting if not NULL.
4049  *
4050  * @return
4051  *   0 for success, negative value otherwise and rte_errno is set.
4052  */
4053 static int
4054 flow_mreg_add_default_copy_action(struct rte_eth_dev *dev,
4055                                   struct rte_flow_error *error)
4056 {
4057         struct mlx5_priv *priv = dev->data->dev_private;
4058         struct mlx5_flow_mreg_copy_resource *mcp_res;
4059
4060         /* Check whether extensive metadata feature is engaged. */
4061         if (!priv->config.dv_flow_en ||
4062             priv->config.dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
4063             !mlx5_flow_ext_mreg_supported(dev) ||
4064             !priv->sh->dv_regc0_mask)
4065                 return 0;
4066         /*
4067          * Add default mreg copy flow may be called multiple time, but
4068          * only be called once in stop. Avoid register it twice.
4069          */
4070         if (mlx5_hlist_lookup(priv->mreg_cp_tbl, MLX5_DEFAULT_COPY_ID, NULL))
4071                 return 0;
4072         mcp_res = flow_mreg_add_copy_action(dev, MLX5_DEFAULT_COPY_ID, error);
4073         if (!mcp_res)
4074                 return -rte_errno;
4075         return 0;
4076 }
4077
4078 /**
4079  * Add a flow of copying flow metadata registers in RX_CP_TBL.
4080  *
4081  * All the flow having Q/RSS action should be split by
4082  * flow_mreg_split_qrss_prep() to pass by RX_CP_TBL. A flow in the RX_CP_TBL
4083  * performs the following,
4084  *   - CQE->flow_tag := reg_c[1] (MARK)
4085  *   - CQE->flow_table_metadata (reg_b) := reg_c[0] (META)
4086  * As CQE's flow_tag is not a register, it can't be simply copied from reg_c[1]
4087  * but there should be a flow per each MARK ID set by MARK action.
4088  *
4089  * For the aforementioned reason, if there's a MARK action in flow's action
4090  * list, a corresponding flow should be added to the RX_CP_TBL in order to copy
4091  * the MARK ID to CQE's flow_tag like,
4092  *   - If reg_c[1] is mark_id,
4093  *     flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL.
4094  *
4095  * For SET_META action which stores value in reg_c[0], as the destination is
4096  * also a flow metadata register (reg_b), adding a default flow is enough. Zero
4097  * MARK ID means the default flow. The default flow looks like,
4098  *   - For all flow, reg_b := reg_c[0] and jump to RX_ACT_TBL.
4099  *
4100  * @param dev
4101  *   Pointer to Ethernet device.
4102  * @param flow
4103  *   Pointer to flow structure.
4104  * @param[in] actions
4105  *   Pointer to the list of actions.
4106  * @param[out] error
4107  *   Perform verbose error reporting if not NULL.
4108  *
4109  * @return
4110  *   0 on success, negative value otherwise and rte_errno is set.
4111  */
4112 static int
4113 flow_mreg_update_copy_table(struct rte_eth_dev *dev,
4114                             struct rte_flow *flow,
4115                             const struct rte_flow_action *actions,
4116                             struct rte_flow_error *error)
4117 {
4118         struct mlx5_priv *priv = dev->data->dev_private;
4119         struct mlx5_dev_config *config = &priv->config;
4120         struct mlx5_flow_mreg_copy_resource *mcp_res;
4121         const struct rte_flow_action_mark *mark;
4122
4123         /* Check whether extensive metadata feature is engaged. */
4124         if (!config->dv_flow_en ||
4125             config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
4126             !mlx5_flow_ext_mreg_supported(dev) ||
4127             !priv->sh->dv_regc0_mask)
4128                 return 0;
4129         /* Find MARK action. */
4130         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
4131                 switch (actions->type) {
4132                 case RTE_FLOW_ACTION_TYPE_FLAG:
4133                         mcp_res = flow_mreg_add_copy_action
4134                                 (dev, MLX5_FLOW_MARK_DEFAULT, error);
4135                         if (!mcp_res)
4136                                 return -rte_errno;
4137                         flow->rix_mreg_copy = mcp_res->idx;
4138                         return 0;
4139                 case RTE_FLOW_ACTION_TYPE_MARK:
4140                         mark = (const struct rte_flow_action_mark *)
4141                                 actions->conf;
4142                         mcp_res =
4143                                 flow_mreg_add_copy_action(dev, mark->id, error);
4144                         if (!mcp_res)
4145                                 return -rte_errno;
4146                         flow->rix_mreg_copy = mcp_res->idx;
4147                         return 0;
4148                 default:
4149                         break;
4150                 }
4151         }
4152         return 0;
4153 }
4154
4155 #define MLX5_MAX_SPLIT_ACTIONS 24
4156 #define MLX5_MAX_SPLIT_ITEMS 24
4157
4158 /**
4159  * Split the hairpin flow.
4160  * Since HW can't support encap and push-vlan on Rx, we move these
4161  * actions to Tx.
4162  * If the count action is after the encap then we also
4163  * move the count action. in this case the count will also measure
4164  * the outer bytes.
4165  *
4166  * @param dev
4167  *   Pointer to Ethernet device.
4168  * @param[in] actions
4169  *   Associated actions (list terminated by the END action).
4170  * @param[out] actions_rx
4171  *   Rx flow actions.
4172  * @param[out] actions_tx
4173  *   Tx flow actions..
4174  * @param[out] pattern_tx
4175  *   The pattern items for the Tx flow.
4176  * @param[out] flow_id
4177  *   The flow ID connected to this flow.
4178  *
4179  * @return
4180  *   0 on success.
4181  */
4182 static int
4183 flow_hairpin_split(struct rte_eth_dev *dev,
4184                    const struct rte_flow_action actions[],
4185                    struct rte_flow_action actions_rx[],
4186                    struct rte_flow_action actions_tx[],
4187                    struct rte_flow_item pattern_tx[],
4188                    uint32_t flow_id)
4189 {
4190         const struct rte_flow_action_raw_encap *raw_encap;
4191         const struct rte_flow_action_raw_decap *raw_decap;
4192         struct mlx5_rte_flow_action_set_tag *set_tag;
4193         struct rte_flow_action *tag_action;
4194         struct mlx5_rte_flow_item_tag *tag_item;
4195         struct rte_flow_item *item;
4196         char *addr;
4197         int encap = 0;
4198
4199         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
4200                 switch (actions->type) {
4201                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
4202                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
4203                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
4204                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
4205                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
4206                         rte_memcpy(actions_tx, actions,
4207                                sizeof(struct rte_flow_action));
4208                         actions_tx++;
4209                         break;
4210                 case RTE_FLOW_ACTION_TYPE_COUNT:
4211                         if (encap) {
4212                                 rte_memcpy(actions_tx, actions,
4213                                            sizeof(struct rte_flow_action));
4214                                 actions_tx++;
4215                         } else {
4216                                 rte_memcpy(actions_rx, actions,
4217                                            sizeof(struct rte_flow_action));
4218                                 actions_rx++;
4219                         }
4220                         break;
4221                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
4222                         raw_encap = actions->conf;
4223                         if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE) {
4224                                 memcpy(actions_tx, actions,
4225                                        sizeof(struct rte_flow_action));
4226                                 actions_tx++;
4227                                 encap = 1;
4228                         } else {
4229                                 rte_memcpy(actions_rx, actions,
4230                                            sizeof(struct rte_flow_action));
4231                                 actions_rx++;
4232                         }
4233                         break;
4234                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
4235                         raw_decap = actions->conf;
4236                         if (raw_decap->size < MLX5_ENCAPSULATION_DECISION_SIZE) {
4237                                 memcpy(actions_tx, actions,
4238                                        sizeof(struct rte_flow_action));
4239                                 actions_tx++;
4240                         } else {
4241                                 rte_memcpy(actions_rx, actions,
4242                                            sizeof(struct rte_flow_action));
4243                                 actions_rx++;
4244                         }
4245                         break;
4246                 default:
4247                         rte_memcpy(actions_rx, actions,
4248                                    sizeof(struct rte_flow_action));
4249                         actions_rx++;
4250                         break;
4251                 }
4252         }
4253         /* Add set meta action and end action for the Rx flow. */
4254         tag_action = actions_rx;
4255         tag_action->type = (enum rte_flow_action_type)
4256                            MLX5_RTE_FLOW_ACTION_TYPE_TAG;
4257         actions_rx++;
4258         rte_memcpy(actions_rx, actions, sizeof(struct rte_flow_action));
4259         actions_rx++;
4260         set_tag = (void *)actions_rx;
4261         *set_tag = (struct mlx5_rte_flow_action_set_tag) {
4262                 .id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_RX, 0, NULL),
4263                 .data = flow_id,
4264         };
4265         MLX5_ASSERT(set_tag->id > REG_NON);
4266         tag_action->conf = set_tag;
4267         /* Create Tx item list. */
4268         rte_memcpy(actions_tx, actions, sizeof(struct rte_flow_action));
4269         addr = (void *)&pattern_tx[2];
4270         item = pattern_tx;
4271         item->type = (enum rte_flow_item_type)
4272                      MLX5_RTE_FLOW_ITEM_TYPE_TAG;
4273         tag_item = (void *)addr;
4274         tag_item->data = flow_id;
4275         tag_item->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_TX, 0, NULL);
4276         MLX5_ASSERT(set_tag->id > REG_NON);
4277         item->spec = tag_item;
4278         addr += sizeof(struct mlx5_rte_flow_item_tag);
4279         tag_item = (void *)addr;
4280         tag_item->data = UINT32_MAX;
4281         tag_item->id = UINT16_MAX;
4282         item->mask = tag_item;
4283         item->last = NULL;
4284         item++;
4285         item->type = RTE_FLOW_ITEM_TYPE_END;
4286         return 0;
4287 }
4288
4289 /**
4290  * The last stage of splitting chain, just creates the subflow
4291  * without any modification.
4292  *
4293  * @param[in] dev
4294  *   Pointer to Ethernet device.
4295  * @param[in] flow
4296  *   Parent flow structure pointer.
4297  * @param[in, out] sub_flow
4298  *   Pointer to return the created subflow, may be NULL.
4299  * @param[in] attr
4300  *   Flow rule attributes.
4301  * @param[in] items
4302  *   Pattern specification (list terminated by the END pattern item).
4303  * @param[in] actions
4304  *   Associated actions (list terminated by the END action).
4305  * @param[in] flow_split_info
4306  *   Pointer to flow split info structure.
4307  * @param[out] error
4308  *   Perform verbose error reporting if not NULL.
4309  * @return
4310  *   0 on success, negative value otherwise
4311  */
4312 static int
4313 flow_create_split_inner(struct rte_eth_dev *dev,
4314                         struct rte_flow *flow,
4315                         struct mlx5_flow **sub_flow,
4316                         const struct rte_flow_attr *attr,
4317                         const struct rte_flow_item items[],
4318                         const struct rte_flow_action actions[],
4319                         struct mlx5_flow_split_info *flow_split_info,
4320                         struct rte_flow_error *error)
4321 {
4322         struct mlx5_flow *dev_flow;
4323
4324         dev_flow = flow_drv_prepare(dev, flow, attr, items, actions,
4325                                     flow_split_info->flow_idx, error);
4326         if (!dev_flow)
4327                 return -rte_errno;
4328         dev_flow->flow = flow;
4329         dev_flow->external = flow_split_info->external;
4330         dev_flow->skip_scale = flow_split_info->skip_scale;
4331         /* Subflow object was created, we must include one in the list. */
4332         SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
4333                       dev_flow->handle, next);
4334         /*
4335          * If dev_flow is as one of the suffix flow, some actions in suffix
4336          * flow may need some user defined item layer flags, and pass the
4337          * Metadate rxq mark flag to suffix flow as well.
4338          */
4339         if (flow_split_info->prefix_layers)
4340                 dev_flow->handle->layers = flow_split_info->prefix_layers;
4341         if (flow_split_info->prefix_mark)
4342                 dev_flow->handle->mark = 1;
4343         if (sub_flow)
4344                 *sub_flow = dev_flow;
4345 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
4346         dev_flow->dv.table_id = flow_split_info->table_id;
4347 #endif
4348         return flow_drv_translate(dev, dev_flow, attr, items, actions, error);
4349 }
4350
4351 /**
4352  * Split the meter flow.
4353  *
4354  * As meter flow will split to three sub flow, other than meter
4355  * action, the other actions make sense to only meter accepts
4356  * the packet. If it need to be dropped, no other additional
4357  * actions should be take.
4358  *
4359  * One kind of special action which decapsulates the L3 tunnel
4360  * header will be in the prefix sub flow, as not to take the
4361  * L3 tunnel header into account.
4362  *
4363  * @param[in] dev
4364  *   Pointer to Ethernet device.
4365  * @param[in] flow
4366  *   Parent flow structure pointer.
4367  * @param[in] fm
4368  *   Pointer to flow meter structure.
4369  * @param[in] attr
4370  *   Flow rule attributes.
4371  * @param[in] items
4372  *   Pattern specification (list terminated by the END pattern item).
4373  * @param[out] sfx_items
4374  *   Suffix flow match items (list terminated by the END pattern item).
4375  * @param[in] actions
4376  *   Associated actions (list terminated by the END action).
4377  * @param[out] actions_sfx
4378  *   Suffix flow actions.
4379  * @param[out] actions_pre
4380  *   Prefix flow actions.
4381  * @param[out] error
4382  *   Perform verbose error reporting if not NULL.
4383  *
4384  * @return
4385  *   The flow id, 0 otherwise and rte_errno is set.
4386  */
4387 static uint32_t
4388 flow_meter_split_prep(struct rte_eth_dev *dev,
4389                       struct rte_flow *flow,
4390                       struct mlx5_flow_meter_info *fm,
4391                       const struct rte_flow_attr *attr,
4392                       const struct rte_flow_item items[],
4393                       struct rte_flow_item sfx_items[],
4394                       const struct rte_flow_action actions[],
4395                       struct rte_flow_action actions_sfx[],
4396                       struct rte_flow_action actions_pre[],
4397                       struct rte_flow_error *error)
4398 {
4399         struct mlx5_priv *priv = dev->data->dev_private;
4400         struct rte_flow_action *tag_action = NULL;
4401         struct rte_flow_item *tag_item;
4402         struct mlx5_rte_flow_action_set_tag *set_tag;
4403         const struct rte_flow_action_raw_encap *raw_encap;
4404         const struct rte_flow_action_raw_decap *raw_decap;
4405         struct mlx5_rte_flow_item_tag *tag_item_spec;
4406         struct mlx5_rte_flow_item_tag *tag_item_mask;
4407         uint32_t tag_id = 0;
4408         bool copy_vlan = false;
4409         struct rte_flow_action *hw_mtr_action;
4410         struct rte_flow_action_jump *jump_data;
4411         struct rte_flow_action *action_pre_head = NULL;
4412         bool mtr_first = priv->sh->meter_aso_en &&
4413                         (attr->egress ||
4414                         (attr->transfer && priv->representor_id != UINT16_MAX));
4415         uint8_t mtr_id_offset = priv->mtr_reg_share ? MLX5_MTR_COLOR_BITS : 0;
4416         uint8_t mtr_reg_bits = priv->mtr_reg_share ?
4417                                 MLX5_MTR_IDLE_BITS_IN_COLOR_REG : MLX5_REG_BITS;
4418         uint32_t flow_id = 0;
4419         uint32_t flow_id_reversed = 0;
4420         uint8_t flow_id_bits = 0;
4421         int shift;
4422
4423         /* For ASO meter, meter must be before tag in TX direction. */
4424         if (mtr_first) {
4425                 action_pre_head = actions_pre++;
4426                 /* Leave space for tag action. */
4427                 tag_action = actions_pre++;
4428         }
4429         /* Prepare the actions for prefix and suffix flow. */
4430         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
4431                 struct rte_flow_action *action_cur = NULL;
4432
4433                 switch (actions->type) {
4434                 case RTE_FLOW_ACTION_TYPE_METER:
4435                         if (mtr_first) {
4436                                 action_cur = action_pre_head;
4437                         } else {
4438                                 /* Leave space for tag action. */
4439                                 tag_action = actions_pre++;
4440                                 action_cur = actions_pre++;
4441                         }
4442                         break;
4443                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
4444                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
4445                         action_cur = actions_pre++;
4446                         break;
4447                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
4448                         raw_encap = actions->conf;
4449                         if (raw_encap->size < MLX5_ENCAPSULATION_DECISION_SIZE)
4450                                 action_cur = actions_pre++;
4451                         break;
4452                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
4453                         raw_decap = actions->conf;
4454                         if (raw_decap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
4455                                 action_cur = actions_pre++;
4456                         break;
4457                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
4458                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
4459                         copy_vlan = true;
4460                         break;
4461                 default:
4462                         break;
4463                 }
4464                 if (!action_cur)
4465                         action_cur = actions_sfx++;
4466                 memcpy(action_cur, actions, sizeof(struct rte_flow_action));
4467         }
4468         /* Add end action to the actions. */
4469         actions_sfx->type = RTE_FLOW_ACTION_TYPE_END;
4470         if (priv->sh->meter_aso_en) {
4471                 /**
4472                  * For ASO meter, need to add an extra jump action explicitly,
4473                  * to jump from meter to policer table.
4474                  */
4475                 hw_mtr_action = actions_pre;
4476                 hw_mtr_action->type = RTE_FLOW_ACTION_TYPE_JUMP;
4477                 actions_pre++;
4478                 actions_pre->type = RTE_FLOW_ACTION_TYPE_END;
4479                 actions_pre++;
4480                 jump_data = (struct rte_flow_action_jump *)actions_pre;
4481                 jump_data->group = attr->transfer ?
4482                                 (MLX5_FLOW_TABLE_LEVEL_METER - 1) :
4483                                  MLX5_FLOW_TABLE_LEVEL_METER;
4484                 hw_mtr_action->conf = jump_data;
4485                 actions_pre = (struct rte_flow_action *)(jump_data + 1);
4486         } else {
4487                 actions_pre->type = RTE_FLOW_ACTION_TYPE_END;
4488                 actions_pre++;
4489         }
4490         /* Generate meter flow_id only if support multiple flows per meter. */
4491         mlx5_ipool_malloc(fm->flow_ipool, &tag_id);
4492         if (!tag_id)
4493                 return rte_flow_error_set(error, ENOMEM,
4494                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4495                                 "Failed to allocate meter flow id.");
4496         flow_id = tag_id - 1;
4497         flow_id_bits = MLX5_REG_BITS - __builtin_clz(flow_id);
4498         flow_id_bits = flow_id_bits ? flow_id_bits : 1;
4499         if ((flow_id_bits + priv->max_mtr_bits) > mtr_reg_bits) {
4500                 mlx5_ipool_free(fm->flow_ipool, tag_id);
4501                 return rte_flow_error_set(error, EINVAL,
4502                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4503                                 "Meter flow id exceeds max limit.");
4504         }
4505         if (flow_id_bits > priv->max_mtr_flow_bits)
4506                 priv->max_mtr_flow_bits = flow_id_bits;
4507         /* Prepare the suffix subflow items. */
4508         tag_item = sfx_items++;
4509         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
4510                 int item_type = items->type;
4511
4512                 switch (item_type) {
4513                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
4514                         memcpy(sfx_items, items, sizeof(*sfx_items));
4515                         sfx_items++;
4516                         break;
4517                 case RTE_FLOW_ITEM_TYPE_VLAN:
4518                         if (copy_vlan) {
4519                                 memcpy(sfx_items, items, sizeof(*sfx_items));
4520                                 /*
4521                                  * Convert to internal match item, it is used
4522                                  * for vlan push and set vid.
4523                                  */
4524                                 sfx_items->type = (enum rte_flow_item_type)
4525                                                   MLX5_RTE_FLOW_ITEM_TYPE_VLAN;
4526                                 sfx_items++;
4527                         }
4528                         break;
4529                 default:
4530                         break;
4531                 }
4532         }
4533         sfx_items->type = RTE_FLOW_ITEM_TYPE_END;
4534         sfx_items++;
4535         /* Build tag actions and items for meter_id/meter flow_id. */
4536         assert(tag_action);
4537         set_tag = (struct mlx5_rte_flow_action_set_tag *)actions_pre;
4538         tag_item_spec = (struct mlx5_rte_flow_item_tag *)sfx_items;
4539         tag_item_mask = tag_item_spec + 1;
4540         /* Both flow_id and meter_id share the same register. */
4541         *set_tag = (struct mlx5_rte_flow_action_set_tag) {
4542                 .id = (enum modify_reg)mlx5_flow_get_reg_id(dev, MLX5_MTR_ID,
4543                                                             0, error),
4544                 .offset = mtr_id_offset,
4545                 .length = mtr_reg_bits,
4546                 .data = flow->meter,
4547         };
4548         /*
4549          * The color Reg bits used by flow_id are growing from
4550          * msb to lsb, so must do bit reverse for flow_id val in RegC.
4551          */
4552         for (shift = 0; shift < flow_id_bits; shift++)
4553                 flow_id_reversed = (flow_id_reversed << 1) |
4554                               ((flow_id >> shift) & 0x1);
4555         set_tag->data |= flow_id_reversed << (mtr_reg_bits - flow_id_bits);
4556         tag_item_spec->id = set_tag->id;
4557         tag_item_spec->data = set_tag->data << mtr_id_offset;
4558         tag_item_mask->data = UINT32_MAX << mtr_id_offset;
4559         tag_action->type = (enum rte_flow_action_type)
4560                                 MLX5_RTE_FLOW_ACTION_TYPE_TAG;
4561         tag_action->conf = set_tag;
4562         tag_item->type = (enum rte_flow_item_type)
4563                                 MLX5_RTE_FLOW_ITEM_TYPE_TAG;
4564         tag_item->spec = tag_item_spec;
4565         tag_item->last = NULL;
4566         tag_item->mask = tag_item_mask;
4567         return tag_id;
4568 }
4569
4570 /**
4571  * Split action list having QUEUE/RSS for metadata register copy.
4572  *
4573  * Once Q/RSS action is detected in user's action list, the flow action
4574  * should be split in order to copy metadata registers, which will happen in
4575  * RX_CP_TBL like,
4576  *   - CQE->flow_tag := reg_c[1] (MARK)
4577  *   - CQE->flow_table_metadata (reg_b) := reg_c[0] (META)
4578  * The Q/RSS action will be performed on RX_ACT_TBL after passing by RX_CP_TBL.
4579  * This is because the last action of each flow must be a terminal action
4580  * (QUEUE, RSS or DROP).
4581  *
4582  * Flow ID must be allocated to identify actions in the RX_ACT_TBL and it is
4583  * stored and kept in the mlx5_flow structure per each sub_flow.
4584  *
4585  * The Q/RSS action is replaced with,
4586  *   - SET_TAG, setting the allocated flow ID to reg_c[2].
4587  * And the following JUMP action is added at the end,
4588  *   - JUMP, to RX_CP_TBL.
4589  *
4590  * A flow to perform remained Q/RSS action will be created in RX_ACT_TBL by
4591  * flow_create_split_metadata() routine. The flow will look like,
4592  *   - If flow ID matches (reg_c[2]), perform Q/RSS.
4593  *
4594  * @param dev
4595  *   Pointer to Ethernet device.
4596  * @param[out] split_actions
4597  *   Pointer to store split actions to jump to CP_TBL.
4598  * @param[in] actions
4599  *   Pointer to the list of original flow actions.
4600  * @param[in] qrss
4601  *   Pointer to the Q/RSS action.
4602  * @param[in] actions_n
4603  *   Number of original actions.
4604  * @param[out] error
4605  *   Perform verbose error reporting if not NULL.
4606  *
4607  * @return
4608  *   non-zero unique flow_id on success, otherwise 0 and
4609  *   error/rte_error are set.
4610  */
4611 static uint32_t
4612 flow_mreg_split_qrss_prep(struct rte_eth_dev *dev,
4613                           struct rte_flow_action *split_actions,
4614                           const struct rte_flow_action *actions,
4615                           const struct rte_flow_action *qrss,
4616                           int actions_n, struct rte_flow_error *error)
4617 {
4618         struct mlx5_priv *priv = dev->data->dev_private;
4619         struct mlx5_rte_flow_action_set_tag *set_tag;
4620         struct rte_flow_action_jump *jump;
4621         const int qrss_idx = qrss - actions;
4622         uint32_t flow_id = 0;
4623         int ret = 0;
4624
4625         /*
4626          * Given actions will be split
4627          * - Replace QUEUE/RSS action with SET_TAG to set flow ID.
4628          * - Add jump to mreg CP_TBL.
4629          * As a result, there will be one more action.
4630          */
4631         ++actions_n;
4632         memcpy(split_actions, actions, sizeof(*split_actions) * actions_n);
4633         set_tag = (void *)(split_actions + actions_n);
4634         /*
4635          * If tag action is not set to void(it means we are not the meter
4636          * suffix flow), add the tag action. Since meter suffix flow already
4637          * has the tag added.
4638          */
4639         if (split_actions[qrss_idx].type != RTE_FLOW_ACTION_TYPE_VOID) {
4640                 /*
4641                  * Allocate the new subflow ID. This one is unique within
4642                  * device and not shared with representors. Otherwise,
4643                  * we would have to resolve multi-thread access synch
4644                  * issue. Each flow on the shared device is appended
4645                  * with source vport identifier, so the resulting
4646                  * flows will be unique in the shared (by master and
4647                  * representors) domain even if they have coinciding
4648                  * IDs.
4649                  */
4650                 mlx5_ipool_malloc(priv->sh->ipool
4651                                   [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], &flow_id);
4652                 if (!flow_id)
4653                         return rte_flow_error_set(error, ENOMEM,
4654                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4655                                                   NULL, "can't allocate id "
4656                                                   "for split Q/RSS subflow");
4657                 /* Internal SET_TAG action to set flow ID. */
4658                 *set_tag = (struct mlx5_rte_flow_action_set_tag){
4659                         .data = flow_id,
4660                 };
4661                 ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0, error);
4662                 if (ret < 0)
4663                         return ret;
4664                 set_tag->id = ret;
4665                 /* Construct new actions array. */
4666                 /* Replace QUEUE/RSS action. */
4667                 split_actions[qrss_idx] = (struct rte_flow_action){
4668                         .type = (enum rte_flow_action_type)
4669                                 MLX5_RTE_FLOW_ACTION_TYPE_TAG,
4670                         .conf = set_tag,
4671                 };
4672         }
4673         /* JUMP action to jump to mreg copy table (CP_TBL). */
4674         jump = (void *)(set_tag + 1);
4675         *jump = (struct rte_flow_action_jump){
4676                 .group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
4677         };
4678         split_actions[actions_n - 2] = (struct rte_flow_action){
4679                 .type = RTE_FLOW_ACTION_TYPE_JUMP,
4680                 .conf = jump,
4681         };
4682         split_actions[actions_n - 1] = (struct rte_flow_action){
4683                 .type = RTE_FLOW_ACTION_TYPE_END,
4684         };
4685         return flow_id;
4686 }
4687
4688 /**
4689  * Extend the given action list for Tx metadata copy.
4690  *
4691  * Copy the given action list to the ext_actions and add flow metadata register
4692  * copy action in order to copy reg_a set by WQE to reg_c[0].
4693  *
4694  * @param[out] ext_actions
4695  *   Pointer to the extended action list.
4696  * @param[in] actions
4697  *   Pointer to the list of actions.
4698  * @param[in] actions_n
4699  *   Number of actions in the list.
4700  * @param[out] error
4701  *   Perform verbose error reporting if not NULL.
4702  * @param[in] encap_idx
4703  *   The encap action inndex.
4704  *
4705  * @return
4706  *   0 on success, negative value otherwise
4707  */
4708 static int
4709 flow_mreg_tx_copy_prep(struct rte_eth_dev *dev,
4710                        struct rte_flow_action *ext_actions,
4711                        const struct rte_flow_action *actions,
4712                        int actions_n, struct rte_flow_error *error,
4713                        int encap_idx)
4714 {
4715         struct mlx5_flow_action_copy_mreg *cp_mreg =
4716                 (struct mlx5_flow_action_copy_mreg *)
4717                         (ext_actions + actions_n + 1);
4718         int ret;
4719
4720         ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error);
4721         if (ret < 0)
4722                 return ret;
4723         cp_mreg->dst = ret;
4724         ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_TX, 0, error);
4725         if (ret < 0)
4726                 return ret;
4727         cp_mreg->src = ret;
4728         if (encap_idx != 0)
4729                 memcpy(ext_actions, actions, sizeof(*ext_actions) * encap_idx);
4730         if (encap_idx == actions_n - 1) {
4731                 ext_actions[actions_n - 1] = (struct rte_flow_action){
4732                         .type = (enum rte_flow_action_type)
4733                                 MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
4734                         .conf = cp_mreg,
4735                 };
4736                 ext_actions[actions_n] = (struct rte_flow_action){
4737                         .type = RTE_FLOW_ACTION_TYPE_END,
4738                 };
4739         } else {
4740                 ext_actions[encap_idx] = (struct rte_flow_action){
4741                         .type = (enum rte_flow_action_type)
4742                                 MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
4743                         .conf = cp_mreg,
4744                 };
4745                 memcpy(ext_actions + encap_idx + 1, actions + encap_idx,
4746                                 sizeof(*ext_actions) * (actions_n - encap_idx));
4747         }
4748         return 0;
4749 }
4750
4751 /**
4752  * Check the match action from the action list.
4753  *
4754  * @param[in] actions
4755  *   Pointer to the list of actions.
4756  * @param[in] attr
4757  *   Flow rule attributes.
4758  * @param[in] action
4759  *   The action to be check if exist.
4760  * @param[out] match_action_pos
4761  *   Pointer to the position of the matched action if exists, otherwise is -1.
4762  * @param[out] qrss_action_pos
4763  *   Pointer to the position of the Queue/RSS action if exists, otherwise is -1.
4764  * @param[out] modify_after_mirror
4765  *   Pointer to the flag of modify action after FDB mirroring.
4766  *
4767  * @return
4768  *   > 0 the total number of actions.
4769  *   0 if not found match action in action list.
4770  */
4771 static int
4772 flow_check_match_action(const struct rte_flow_action actions[],
4773                         const struct rte_flow_attr *attr,
4774                         enum rte_flow_action_type action,
4775                         int *match_action_pos, int *qrss_action_pos,
4776                         int *modify_after_mirror)
4777 {
4778         const struct rte_flow_action_sample *sample;
4779         int actions_n = 0;
4780         uint32_t ratio = 0;
4781         int sub_type = 0;
4782         int flag = 0;
4783         int fdb_mirror = 0;
4784
4785         *match_action_pos = -1;
4786         *qrss_action_pos = -1;
4787         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
4788                 if (actions->type == action) {
4789                         flag = 1;
4790                         *match_action_pos = actions_n;
4791                 }
4792                 switch (actions->type) {
4793                 case RTE_FLOW_ACTION_TYPE_QUEUE:
4794                 case RTE_FLOW_ACTION_TYPE_RSS:
4795                         *qrss_action_pos = actions_n;
4796                         break;
4797                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
4798                         sample = actions->conf;
4799                         ratio = sample->ratio;
4800                         sub_type = ((const struct rte_flow_action *)
4801                                         (sample->actions))->type;
4802                         if (ratio == 1 && attr->transfer)
4803                                 fdb_mirror = 1;
4804                         break;
4805                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
4806                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
4807                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
4808                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
4809                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
4810                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
4811                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
4812                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
4813                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
4814                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
4815                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
4816                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
4817                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
4818                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
4819                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP:
4820                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP:
4821                 case RTE_FLOW_ACTION_TYPE_FLAG:
4822                 case RTE_FLOW_ACTION_TYPE_MARK:
4823                 case RTE_FLOW_ACTION_TYPE_SET_META:
4824                 case RTE_FLOW_ACTION_TYPE_SET_TAG:
4825                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
4826                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
4827                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
4828                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
4829                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
4830                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
4831                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
4832                 case RTE_FLOW_ACTION_TYPE_MODIFY_FIELD:
4833                         if (fdb_mirror)
4834                                 *modify_after_mirror = 1;
4835                         break;
4836                 default:
4837                         break;
4838                 }
4839                 actions_n++;
4840         }
4841         if (flag && fdb_mirror && !*modify_after_mirror) {
4842                 /* FDB mirroring uses the destination array to implement
4843                  * instead of FLOW_SAMPLER object.
4844                  */
4845                 if (sub_type != RTE_FLOW_ACTION_TYPE_END)
4846                         flag = 0;
4847         }
4848         /* Count RTE_FLOW_ACTION_TYPE_END. */
4849         return flag ? actions_n + 1 : 0;
4850 }
4851
4852 #define SAMPLE_SUFFIX_ITEM 2
4853
4854 /**
4855  * Split the sample flow.
4856  *
4857  * As sample flow will split to two sub flow, sample flow with
4858  * sample action, the other actions will move to new suffix flow.
4859  *
4860  * Also add unique tag id with tag action in the sample flow,
4861  * the same tag id will be as match in the suffix flow.
4862  *
4863  * @param dev
4864  *   Pointer to Ethernet device.
4865  * @param[in] add_tag
4866  *   Add extra tag action flag.
4867  * @param[out] sfx_items
4868  *   Suffix flow match items (list terminated by the END pattern item).
4869  * @param[in] actions
4870  *   Associated actions (list terminated by the END action).
4871  * @param[out] actions_sfx
4872  *   Suffix flow actions.
4873  * @param[out] actions_pre
4874  *   Prefix flow actions.
4875  * @param[in] actions_n
4876  *  The total number of actions.
4877  * @param[in] sample_action_pos
4878  *   The sample action position.
4879  * @param[in] qrss_action_pos
4880  *   The Queue/RSS action position.
4881  * @param[in] jump_table
4882  *   Add extra jump action flag.
4883  * @param[out] error
4884  *   Perform verbose error reporting if not NULL.
4885  *
4886  * @return
4887  *   0 on success, or unique flow_id, a negative errno value
4888  *   otherwise and rte_errno is set.
4889  */
4890 static int
4891 flow_sample_split_prep(struct rte_eth_dev *dev,
4892                        int add_tag,
4893                        struct rte_flow_item sfx_items[],
4894                        const struct rte_flow_action actions[],
4895                        struct rte_flow_action actions_sfx[],
4896                        struct rte_flow_action actions_pre[],
4897                        int actions_n,
4898                        int sample_action_pos,
4899                        int qrss_action_pos,
4900                        int jump_table,
4901                        struct rte_flow_error *error)
4902 {
4903         struct mlx5_priv *priv = dev->data->dev_private;
4904         struct mlx5_rte_flow_action_set_tag *set_tag;
4905         struct mlx5_rte_flow_item_tag *tag_spec;
4906         struct mlx5_rte_flow_item_tag *tag_mask;
4907         struct rte_flow_action_jump *jump_action;
4908         uint32_t tag_id = 0;
4909         int index;
4910         int append_index = 0;
4911         int ret;
4912
4913         if (sample_action_pos < 0)
4914                 return rte_flow_error_set(error, EINVAL,
4915                                           RTE_FLOW_ERROR_TYPE_ACTION,
4916                                           NULL, "invalid position of sample "
4917                                           "action in list");
4918         /* Prepare the actions for prefix and suffix flow. */
4919         if (qrss_action_pos >= 0 && qrss_action_pos < sample_action_pos) {
4920                 index = qrss_action_pos;
4921                 /* Put the preceding the Queue/RSS action into prefix flow. */
4922                 if (index != 0)
4923                         memcpy(actions_pre, actions,
4924                                sizeof(struct rte_flow_action) * index);
4925                 /* Put others preceding the sample action into prefix flow. */
4926                 if (sample_action_pos > index + 1)
4927                         memcpy(actions_pre + index, actions + index + 1,
4928                                sizeof(struct rte_flow_action) *
4929                                (sample_action_pos - index - 1));
4930                 index = sample_action_pos - 1;
4931                 /* Put Queue/RSS action into Suffix flow. */
4932                 memcpy(actions_sfx, actions + qrss_action_pos,
4933                        sizeof(struct rte_flow_action));
4934                 actions_sfx++;
4935         } else {
4936                 index = sample_action_pos;
4937                 if (index != 0)
4938                         memcpy(actions_pre, actions,
4939                                sizeof(struct rte_flow_action) * index);
4940         }
4941         /* For CX5, add an extra tag action for NIC-RX and E-Switch ingress.
4942          * For CX6DX and above, metadata registers Cx preserve their value,
4943          * add an extra tag action for NIC-RX and E-Switch Domain.
4944          */
4945         if (add_tag) {
4946                 /* Prepare the prefix tag action. */
4947                 append_index++;
4948                 set_tag = (void *)(actions_pre + actions_n + append_index);
4949                 ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, 0, error);
4950                 if (ret < 0)
4951                         return ret;
4952                 mlx5_ipool_malloc(priv->sh->ipool
4953                                   [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], &tag_id);
4954                 *set_tag = (struct mlx5_rte_flow_action_set_tag) {
4955                         .id = ret,
4956                         .data = tag_id,
4957                 };
4958                 /* Prepare the suffix subflow items. */
4959                 tag_spec = (void *)(sfx_items + SAMPLE_SUFFIX_ITEM);
4960                 tag_spec->data = tag_id;
4961                 tag_spec->id = set_tag->id;
4962                 tag_mask = tag_spec + 1;
4963                 tag_mask->data = UINT32_MAX;
4964                 sfx_items[0] = (struct rte_flow_item){
4965                         .type = (enum rte_flow_item_type)
4966                                 MLX5_RTE_FLOW_ITEM_TYPE_TAG,
4967                         .spec = tag_spec,
4968                         .last = NULL,
4969                         .mask = tag_mask,
4970                 };
4971                 sfx_items[1] = (struct rte_flow_item){
4972                         .type = (enum rte_flow_item_type)
4973                                 RTE_FLOW_ITEM_TYPE_END,
4974                 };
4975                 /* Prepare the tag action in prefix subflow. */
4976                 actions_pre[index++] =
4977                         (struct rte_flow_action){
4978                         .type = (enum rte_flow_action_type)
4979                                 MLX5_RTE_FLOW_ACTION_TYPE_TAG,
4980                         .conf = set_tag,
4981                 };
4982         }
4983         memcpy(actions_pre + index, actions + sample_action_pos,
4984                sizeof(struct rte_flow_action));
4985         index += 1;
4986         /* For the modify action after the sample action in E-Switch mirroring,
4987          * Add the extra jump action in prefix subflow and jump into the next
4988          * table, then do the modify action in the new table.
4989          */
4990         if (jump_table) {
4991                 /* Prepare the prefix jump action. */
4992                 append_index++;
4993                 jump_action = (void *)(actions_pre + actions_n + append_index);
4994                 jump_action->group = jump_table;
4995                 actions_pre[index++] =
4996                         (struct rte_flow_action){
4997                         .type = (enum rte_flow_action_type)
4998                                 RTE_FLOW_ACTION_TYPE_JUMP,
4999                         .conf = jump_action,
5000                 };
5001         }
5002         actions_pre[index] = (struct rte_flow_action){
5003                 .type = (enum rte_flow_action_type)
5004                         RTE_FLOW_ACTION_TYPE_END,
5005         };
5006         /* Put the actions after sample into Suffix flow. */
5007         memcpy(actions_sfx, actions + sample_action_pos + 1,
5008                sizeof(struct rte_flow_action) *
5009                (actions_n - sample_action_pos - 1));
5010         return tag_id;
5011 }
5012
5013 /**
5014  * The splitting for metadata feature.
5015  *
5016  * - Q/RSS action on NIC Rx should be split in order to pass by
5017  *   the mreg copy table (RX_CP_TBL) and then it jumps to the
5018  *   action table (RX_ACT_TBL) which has the split Q/RSS action.
5019  *
5020  * - All the actions on NIC Tx should have a mreg copy action to
5021  *   copy reg_a from WQE to reg_c[0].
5022  *
5023  * @param dev
5024  *   Pointer to Ethernet device.
5025  * @param[in] flow
5026  *   Parent flow structure pointer.
5027  * @param[in] attr
5028  *   Flow rule attributes.
5029  * @param[in] items
5030  *   Pattern specification (list terminated by the END pattern item).
5031  * @param[in] actions
5032  *   Associated actions (list terminated by the END action).
5033  * @param[in] flow_split_info
5034  *   Pointer to flow split info structure.
5035  * @param[out] error
5036  *   Perform verbose error reporting if not NULL.
5037  * @return
5038  *   0 on success, negative value otherwise
5039  */
5040 static int
5041 flow_create_split_metadata(struct rte_eth_dev *dev,
5042                            struct rte_flow *flow,
5043                            const struct rte_flow_attr *attr,
5044                            const struct rte_flow_item items[],
5045                            const struct rte_flow_action actions[],
5046                            struct mlx5_flow_split_info *flow_split_info,
5047                            struct rte_flow_error *error)
5048 {
5049         struct mlx5_priv *priv = dev->data->dev_private;
5050         struct mlx5_dev_config *config = &priv->config;
5051         const struct rte_flow_action *qrss = NULL;
5052         struct rte_flow_action *ext_actions = NULL;
5053         struct mlx5_flow *dev_flow = NULL;
5054         uint32_t qrss_id = 0;
5055         int mtr_sfx = 0;
5056         size_t act_size;
5057         int actions_n;
5058         int encap_idx;
5059         int ret;
5060
5061         /* Check whether extensive metadata feature is engaged. */
5062         if (!config->dv_flow_en ||
5063             config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
5064             !mlx5_flow_ext_mreg_supported(dev))
5065                 return flow_create_split_inner(dev, flow, NULL, attr, items,
5066                                                actions, flow_split_info, error);
5067         actions_n = flow_parse_metadata_split_actions_info(actions, &qrss,
5068                                                            &encap_idx);
5069         if (qrss) {
5070                 /* Exclude hairpin flows from splitting. */
5071                 if (qrss->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
5072                         const struct rte_flow_action_queue *queue;
5073
5074                         queue = qrss->conf;
5075                         if (mlx5_rxq_get_type(dev, queue->index) ==
5076                             MLX5_RXQ_TYPE_HAIRPIN)
5077                                 qrss = NULL;
5078                 } else if (qrss->type == RTE_FLOW_ACTION_TYPE_RSS) {
5079                         const struct rte_flow_action_rss *rss;
5080
5081                         rss = qrss->conf;
5082                         if (mlx5_rxq_get_type(dev, rss->queue[0]) ==
5083                             MLX5_RXQ_TYPE_HAIRPIN)
5084                                 qrss = NULL;
5085                 }
5086         }
5087         if (qrss) {
5088                 /* Check if it is in meter suffix table. */
5089                 mtr_sfx = attr->group == (attr->transfer ?
5090                           (MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) :
5091                           MLX5_FLOW_TABLE_LEVEL_SUFFIX);
5092                 /*
5093                  * Q/RSS action on NIC Rx should be split in order to pass by
5094                  * the mreg copy table (RX_CP_TBL) and then it jumps to the
5095                  * action table (RX_ACT_TBL) which has the split Q/RSS action.
5096                  */
5097                 act_size = sizeof(struct rte_flow_action) * (actions_n + 1) +
5098                            sizeof(struct rte_flow_action_set_tag) +
5099                            sizeof(struct rte_flow_action_jump);
5100                 ext_actions = mlx5_malloc(MLX5_MEM_ZERO, act_size, 0,
5101                                           SOCKET_ID_ANY);
5102                 if (!ext_actions)
5103                         return rte_flow_error_set(error, ENOMEM,
5104                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5105                                                   NULL, "no memory to split "
5106                                                   "metadata flow");
5107                 /*
5108                  * If we are the suffix flow of meter, tag already exist.
5109                  * Set the tag action to void.
5110                  */
5111                 if (mtr_sfx)
5112                         ext_actions[qrss - actions].type =
5113                                                 RTE_FLOW_ACTION_TYPE_VOID;
5114                 else
5115                         ext_actions[qrss - actions].type =
5116                                                 (enum rte_flow_action_type)
5117                                                 MLX5_RTE_FLOW_ACTION_TYPE_TAG;
5118                 /*
5119                  * Create the new actions list with removed Q/RSS action
5120                  * and appended set tag and jump to register copy table
5121                  * (RX_CP_TBL). We should preallocate unique tag ID here
5122                  * in advance, because it is needed for set tag action.
5123                  */
5124                 qrss_id = flow_mreg_split_qrss_prep(dev, ext_actions, actions,
5125                                                     qrss, actions_n, error);
5126                 if (!mtr_sfx && !qrss_id) {
5127                         ret = -rte_errno;
5128                         goto exit;
5129                 }
5130         } else if (attr->egress && !attr->transfer) {
5131                 /*
5132                  * All the actions on NIC Tx should have a metadata register
5133                  * copy action to copy reg_a from WQE to reg_c[meta]
5134                  */
5135                 act_size = sizeof(struct rte_flow_action) * (actions_n + 1) +
5136                            sizeof(struct mlx5_flow_action_copy_mreg);
5137                 ext_actions = mlx5_malloc(MLX5_MEM_ZERO, act_size, 0,
5138                                           SOCKET_ID_ANY);
5139                 if (!ext_actions)
5140                         return rte_flow_error_set(error, ENOMEM,
5141                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5142                                                   NULL, "no memory to split "
5143                                                   "metadata flow");
5144                 /* Create the action list appended with copy register. */
5145                 ret = flow_mreg_tx_copy_prep(dev, ext_actions, actions,
5146                                              actions_n, error, encap_idx);
5147                 if (ret < 0)
5148                         goto exit;
5149         }
5150         /* Add the unmodified original or prefix subflow. */
5151         ret = flow_create_split_inner(dev, flow, &dev_flow, attr,
5152                                       items, ext_actions ? ext_actions :
5153                                       actions, flow_split_info, error);
5154         if (ret < 0)
5155                 goto exit;
5156         MLX5_ASSERT(dev_flow);
5157         if (qrss) {
5158                 const struct rte_flow_attr q_attr = {
5159                         .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP,
5160                         .ingress = 1,
5161                 };
5162                 /* Internal PMD action to set register. */
5163                 struct mlx5_rte_flow_item_tag q_tag_spec = {
5164                         .data = qrss_id,
5165                         .id = REG_NON,
5166                 };
5167                 struct rte_flow_item q_items[] = {
5168                         {
5169                                 .type = (enum rte_flow_item_type)
5170                                         MLX5_RTE_FLOW_ITEM_TYPE_TAG,
5171                                 .spec = &q_tag_spec,
5172                                 .last = NULL,
5173                                 .mask = NULL,
5174                         },
5175                         {
5176                                 .type = RTE_FLOW_ITEM_TYPE_END,
5177                         },
5178                 };
5179                 struct rte_flow_action q_actions[] = {
5180                         {
5181                                 .type = qrss->type,
5182                                 .conf = qrss->conf,
5183                         },
5184                         {
5185                                 .type = RTE_FLOW_ACTION_TYPE_END,
5186                         },
5187                 };
5188                 uint64_t layers = flow_get_prefix_layer_flags(dev_flow);
5189
5190                 /*
5191                  * Configure the tag item only if there is no meter subflow.
5192                  * Since tag is already marked in the meter suffix subflow
5193                  * we can just use the meter suffix items as is.
5194                  */
5195                 if (qrss_id) {
5196                         /* Not meter subflow. */
5197                         MLX5_ASSERT(!mtr_sfx);
5198                         /*
5199                          * Put unique id in prefix flow due to it is destroyed
5200                          * after suffix flow and id will be freed after there
5201                          * is no actual flows with this id and identifier
5202                          * reallocation becomes possible (for example, for
5203                          * other flows in other threads).
5204                          */
5205                         dev_flow->handle->split_flow_id = qrss_id;
5206                         ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0,
5207                                                    error);
5208                         if (ret < 0)
5209                                 goto exit;
5210                         q_tag_spec.id = ret;
5211                 }
5212                 dev_flow = NULL;
5213                 /* Add suffix subflow to execute Q/RSS. */
5214                 flow_split_info->prefix_layers = layers;
5215                 flow_split_info->prefix_mark = 0;
5216                 ret = flow_create_split_inner(dev, flow, &dev_flow,
5217                                               &q_attr, mtr_sfx ? items :
5218                                               q_items, q_actions,
5219                                               flow_split_info, error);
5220                 if (ret < 0)
5221                         goto exit;
5222                 /* qrss ID should be freed if failed. */
5223                 qrss_id = 0;
5224                 MLX5_ASSERT(dev_flow);
5225         }
5226
5227 exit:
5228         /*
5229          * We do not destroy the partially created sub_flows in case of error.
5230          * These ones are included into parent flow list and will be destroyed
5231          * by flow_drv_destroy.
5232          */
5233         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
5234                         qrss_id);
5235         mlx5_free(ext_actions);
5236         return ret;
5237 }
5238
5239 /**
5240  * The splitting for meter feature.
5241  *
5242  * - The meter flow will be split to two flows as prefix and
5243  *   suffix flow. The packets make sense only it pass the prefix
5244  *   meter action.
5245  *
5246  * - Reg_C_5 is used for the packet to match betweend prefix and
5247  *   suffix flow.
5248  *
5249  * @param dev
5250  *   Pointer to Ethernet device.
5251  * @param[in] flow
5252  *   Parent flow structure pointer.
5253  * @param[in] attr
5254  *   Flow rule attributes.
5255  * @param[in] items
5256  *   Pattern specification (list terminated by the END pattern item).
5257  * @param[in] actions
5258  *   Associated actions (list terminated by the END action).
5259  * @param[in] flow_split_info
5260  *   Pointer to flow split info structure.
5261  * @param[out] error
5262  *   Perform verbose error reporting if not NULL.
5263  * @return
5264  *   0 on success, negative value otherwise
5265  */
5266 static int
5267 flow_create_split_meter(struct rte_eth_dev *dev,
5268                         struct rte_flow *flow,
5269                         const struct rte_flow_attr *attr,
5270                         const struct rte_flow_item items[],
5271                         const struct rte_flow_action actions[],
5272                         struct mlx5_flow_split_info *flow_split_info,
5273                         struct rte_flow_error *error)
5274 {
5275         struct mlx5_priv *priv = dev->data->dev_private;
5276         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
5277         struct rte_flow_action *sfx_actions = NULL;
5278         struct rte_flow_action *pre_actions = NULL;
5279         struct rte_flow_item *sfx_items = NULL;
5280         struct mlx5_flow *dev_flow = NULL;
5281         struct rte_flow_attr sfx_attr = *attr;
5282         struct mlx5_flow_meter_info *fm = NULL;
5283         bool has_mtr = false;
5284         uint32_t meter_id;
5285         uint32_t mtr_idx = 0;
5286         uint32_t mtr_tag_id = 0;
5287         size_t act_size;
5288         size_t item_size;
5289         int actions_n = 0;
5290         int ret = 0;
5291
5292         if (priv->mtr_en)
5293                 actions_n = flow_check_meter_action(actions, &has_mtr,
5294                                                     &meter_id);
5295         if (has_mtr) {
5296                 if (flow->meter) {
5297                         fm = flow_dv_meter_find_by_idx(priv, flow->meter);
5298                         if (!fm)
5299                                 return rte_flow_error_set(error, EINVAL,
5300                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5301                                                 NULL, "Meter not found.");
5302                 } else {
5303                         fm = mlx5_flow_meter_find(priv, meter_id, &mtr_idx);
5304                         if (!fm)
5305                                 return rte_flow_error_set(error, EINVAL,
5306                                                 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5307                                                 NULL, "Meter not found.");
5308                         ret = mlx5_flow_meter_attach(priv, fm,
5309                                                      &sfx_attr, error);
5310                         if (ret)
5311                                 return -rte_errno;
5312                         flow->meter = mtr_idx;
5313                 }
5314                 wks->fm = fm;
5315                 /* Prefix actions: meter, decap, encap, tag, jump, end. */
5316                 act_size = sizeof(struct rte_flow_action) * (actions_n + 6) +
5317                            sizeof(struct mlx5_rte_flow_action_set_tag) +
5318                            sizeof(struct rte_flow_action_jump);
5319                 /* Suffix items: tag, vlan, port id, end. */
5320 #define METER_SUFFIX_ITEM 4
5321                 item_size = sizeof(struct rte_flow_item) * METER_SUFFIX_ITEM +
5322                             sizeof(struct mlx5_rte_flow_item_tag) * 2;
5323                 sfx_actions = mlx5_malloc(MLX5_MEM_ZERO, (act_size + item_size),
5324                                           0, SOCKET_ID_ANY);
5325                 if (!sfx_actions)
5326                         return rte_flow_error_set(error, ENOMEM,
5327                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5328                                                   NULL, "no memory to split "
5329                                                   "meter flow");
5330                 sfx_items = (struct rte_flow_item *)((char *)sfx_actions +
5331                              act_size);
5332                 pre_actions = sfx_actions + actions_n;
5333                 mtr_tag_id = flow_meter_split_prep(dev, flow, fm, &sfx_attr,
5334                                                    items, sfx_items, actions,
5335                                                    sfx_actions, pre_actions,
5336                                                    error);
5337                 if (!mtr_tag_id) {
5338                         ret = -rte_errno;
5339                         goto exit;
5340                 }
5341                 /* Add the prefix subflow. */
5342                 flow_split_info->prefix_mark = 0;
5343                 ret = flow_create_split_inner(dev, flow, &dev_flow,
5344                                               attr, items, pre_actions,
5345                                               flow_split_info, error);
5346                 if (ret) {
5347                         mlx5_ipool_free(fm->flow_ipool, mtr_tag_id);
5348                         ret = -rte_errno;
5349                         goto exit;
5350                 }
5351                 dev_flow->handle->split_flow_id = mtr_tag_id;
5352                 dev_flow->handle->is_meter_flow_id = 1;
5353                 /* Setting the sfx group atrr. */
5354                 sfx_attr.group = sfx_attr.transfer ?
5355                                 (MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) :
5356                                  MLX5_FLOW_TABLE_LEVEL_SUFFIX;
5357                 flow_split_info->prefix_layers =
5358                                 flow_get_prefix_layer_flags(dev_flow);
5359                 flow_split_info->prefix_mark = dev_flow->handle->mark;
5360         }
5361         /* Add the prefix subflow. */
5362         ret = flow_create_split_metadata(dev, flow,
5363                                          &sfx_attr, sfx_items ?
5364                                          sfx_items : items,
5365                                          sfx_actions ? sfx_actions : actions,
5366                                          flow_split_info, error);
5367 exit:
5368         if (sfx_actions)
5369                 mlx5_free(sfx_actions);
5370         return ret;
5371 }
5372
5373 /**
5374  * The splitting for sample feature.
5375  *
5376  * Once Sample action is detected in the action list, the flow actions should
5377  * be split into prefix sub flow and suffix sub flow.
5378  *
5379  * The original items remain in the prefix sub flow, all actions preceding the
5380  * sample action and the sample action itself will be copied to the prefix
5381  * sub flow, the actions following the sample action will be copied to the
5382  * suffix sub flow, Queue action always be located in the suffix sub flow.
5383  *
5384  * In order to make the packet from prefix sub flow matches with suffix sub
5385  * flow, an extra tag action be added into prefix sub flow, and the suffix sub
5386  * flow uses tag item with the unique flow id.
5387  *
5388  * @param dev
5389  *   Pointer to Ethernet device.
5390  * @param[in] flow
5391  *   Parent flow structure pointer.
5392  * @param[in] attr
5393  *   Flow rule attributes.
5394  * @param[in] items
5395  *   Pattern specification (list terminated by the END pattern item).
5396  * @param[in] actions
5397  *   Associated actions (list terminated by the END action).
5398  * @param[in] flow_split_info
5399  *   Pointer to flow split info structure.
5400  * @param[out] error
5401  *   Perform verbose error reporting if not NULL.
5402  * @return
5403  *   0 on success, negative value otherwise
5404  */
5405 static int
5406 flow_create_split_sample(struct rte_eth_dev *dev,
5407                          struct rte_flow *flow,
5408                          const struct rte_flow_attr *attr,
5409                          const struct rte_flow_item items[],
5410                          const struct rte_flow_action actions[],
5411                          struct mlx5_flow_split_info *flow_split_info,
5412                          struct rte_flow_error *error)
5413 {
5414         struct mlx5_priv *priv = dev->data->dev_private;
5415         struct rte_flow_action *sfx_actions = NULL;
5416         struct rte_flow_action *pre_actions = NULL;
5417         struct rte_flow_item *sfx_items = NULL;
5418         struct mlx5_flow *dev_flow = NULL;
5419         struct rte_flow_attr sfx_attr = *attr;
5420 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
5421         struct mlx5_flow_dv_sample_resource *sample_res;
5422         struct mlx5_flow_tbl_data_entry *sfx_tbl_data;
5423         struct mlx5_flow_tbl_resource *sfx_tbl;
5424 #endif
5425         size_t act_size;
5426         size_t item_size;
5427         uint32_t fdb_tx = 0;
5428         int32_t tag_id = 0;
5429         int actions_n = 0;
5430         int sample_action_pos;
5431         int qrss_action_pos;
5432         int add_tag = 0;
5433         int modify_after_mirror = 0;
5434         uint16_t jump_table = 0;
5435         const uint32_t next_ft_step = 1;
5436         int ret = 0;
5437
5438         if (priv->sampler_en)
5439                 actions_n = flow_check_match_action(actions, attr,
5440                                         RTE_FLOW_ACTION_TYPE_SAMPLE,
5441                                         &sample_action_pos, &qrss_action_pos,
5442                                         &modify_after_mirror);
5443         if (actions_n) {
5444                 /* The prefix actions must includes sample, tag, end. */
5445                 act_size = sizeof(struct rte_flow_action) * (actions_n * 2 + 1)
5446                            + sizeof(struct mlx5_rte_flow_action_set_tag);
5447                 item_size = sizeof(struct rte_flow_item) * SAMPLE_SUFFIX_ITEM +
5448                             sizeof(struct mlx5_rte_flow_item_tag) * 2;
5449                 sfx_actions = mlx5_malloc(MLX5_MEM_ZERO, (act_size +
5450                                           item_size), 0, SOCKET_ID_ANY);
5451                 if (!sfx_actions)
5452                         return rte_flow_error_set(error, ENOMEM,
5453                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5454                                                   NULL, "no memory to split "
5455                                                   "sample flow");
5456                 /* The representor_id is -1 for uplink. */
5457                 fdb_tx = (attr->transfer && priv->representor_id != -1);
5458                 /*
5459                  * When reg_c_preserve is set, metadata registers Cx preserve
5460                  * their value even through packet duplication.
5461                  */
5462                 add_tag = (!fdb_tx || priv->config.hca_attr.reg_c_preserve);
5463                 if (add_tag)
5464                         sfx_items = (struct rte_flow_item *)((char *)sfx_actions
5465                                         + act_size);
5466                 if (modify_after_mirror)
5467                         jump_table = attr->group * MLX5_FLOW_TABLE_FACTOR +
5468                                      next_ft_step;
5469                 pre_actions = sfx_actions + actions_n;
5470                 tag_id = flow_sample_split_prep(dev, add_tag, sfx_items,
5471                                                 actions, sfx_actions,
5472                                                 pre_actions, actions_n,
5473                                                 sample_action_pos,
5474                                                 qrss_action_pos, jump_table,
5475                                                 error);
5476                 if (tag_id < 0 || (add_tag && !tag_id)) {
5477                         ret = -rte_errno;
5478                         goto exit;
5479                 }
5480                 if (modify_after_mirror)
5481                         flow_split_info->skip_scale =
5482                                         1 << MLX5_SCALE_JUMP_FLOW_GROUP_BIT;
5483                 /* Add the prefix subflow. */
5484                 ret = flow_create_split_inner(dev, flow, &dev_flow, attr,
5485                                               items, pre_actions,
5486                                               flow_split_info, error);
5487                 if (ret) {
5488                         ret = -rte_errno;
5489                         goto exit;
5490                 }
5491                 dev_flow->handle->split_flow_id = tag_id;
5492 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
5493                 if (!modify_after_mirror) {
5494                         /* Set the sfx group attr. */
5495                         sample_res = (struct mlx5_flow_dv_sample_resource *)
5496                                                 dev_flow->dv.sample_res;
5497                         sfx_tbl = (struct mlx5_flow_tbl_resource *)
5498                                                 sample_res->normal_path_tbl;
5499                         sfx_tbl_data = container_of(sfx_tbl,
5500                                                 struct mlx5_flow_tbl_data_entry,
5501                                                 tbl);
5502                         sfx_attr.group = sfx_attr.transfer ?
5503                         (sfx_tbl_data->level - 1) : sfx_tbl_data->level;
5504                 } else {
5505                         MLX5_ASSERT(attr->transfer);
5506                         sfx_attr.group = jump_table;
5507                 }
5508                 flow_split_info->prefix_layers =
5509                                 flow_get_prefix_layer_flags(dev_flow);
5510                 flow_split_info->prefix_mark = dev_flow->handle->mark;
5511                 /* Suffix group level already be scaled with factor, set
5512                  * MLX5_SCALE_FLOW_GROUP_BIT of skip_scale to 1 to avoid scale
5513                  * again in translation.
5514                  */
5515                 flow_split_info->skip_scale = 1 << MLX5_SCALE_FLOW_GROUP_BIT;
5516 #endif
5517         }
5518         /* Add the suffix subflow. */
5519         ret = flow_create_split_meter(dev, flow, &sfx_attr,
5520                                       sfx_items ? sfx_items : items,
5521                                       sfx_actions ? sfx_actions : actions,
5522                                       flow_split_info, error);
5523 exit:
5524         if (sfx_actions)
5525                 mlx5_free(sfx_actions);
5526         return ret;
5527 }
5528
5529 /**
5530  * Split the flow to subflow set. The splitters might be linked
5531  * in the chain, like this:
5532  * flow_create_split_outer() calls:
5533  *   flow_create_split_meter() calls:
5534  *     flow_create_split_metadata(meter_subflow_0) calls:
5535  *       flow_create_split_inner(metadata_subflow_0)
5536  *       flow_create_split_inner(metadata_subflow_1)
5537  *       flow_create_split_inner(metadata_subflow_2)
5538  *     flow_create_split_metadata(meter_subflow_1) calls:
5539  *       flow_create_split_inner(metadata_subflow_0)
5540  *       flow_create_split_inner(metadata_subflow_1)
5541  *       flow_create_split_inner(metadata_subflow_2)
5542  *
5543  * This provide flexible way to add new levels of flow splitting.
5544  * The all of successfully created subflows are included to the
5545  * parent flow dev_flow list.
5546  *
5547  * @param dev
5548  *   Pointer to Ethernet device.
5549  * @param[in] flow
5550  *   Parent flow structure pointer.
5551  * @param[in] attr
5552  *   Flow rule attributes.
5553  * @param[in] items
5554  *   Pattern specification (list terminated by the END pattern item).
5555  * @param[in] actions
5556  *   Associated actions (list terminated by the END action).
5557  * @param[in] flow_split_info
5558  *   Pointer to flow split info structure.
5559  * @param[out] error
5560  *   Perform verbose error reporting if not NULL.
5561  * @return
5562  *   0 on success, negative value otherwise
5563  */
5564 static int
5565 flow_create_split_outer(struct rte_eth_dev *dev,
5566                         struct rte_flow *flow,
5567                         const struct rte_flow_attr *attr,
5568                         const struct rte_flow_item items[],
5569                         const struct rte_flow_action actions[],
5570                         struct mlx5_flow_split_info *flow_split_info,
5571                         struct rte_flow_error *error)
5572 {
5573         int ret;
5574
5575         ret = flow_create_split_sample(dev, flow, attr, items,
5576                                        actions, flow_split_info, error);
5577         MLX5_ASSERT(ret <= 0);
5578         return ret;
5579 }
5580
5581 static struct mlx5_flow_tunnel *
5582 flow_tunnel_from_rule(struct rte_eth_dev *dev,
5583                       const struct rte_flow_attr *attr,
5584                       const struct rte_flow_item items[],
5585                       const struct rte_flow_action actions[])
5586 {
5587         struct mlx5_flow_tunnel *tunnel;
5588
5589 #pragma GCC diagnostic push
5590 #pragma GCC diagnostic ignored "-Wcast-qual"
5591         if (is_flow_tunnel_match_rule(dev, attr, items, actions))
5592                 tunnel = (struct mlx5_flow_tunnel *)items[0].spec;
5593         else if (is_flow_tunnel_steer_rule(dev, attr, items, actions))
5594                 tunnel = (struct mlx5_flow_tunnel *)actions[0].conf;
5595         else
5596                 tunnel = NULL;
5597 #pragma GCC diagnostic pop
5598
5599         return tunnel;
5600 }
5601
5602 /**
5603  * Adjust flow RSS workspace if needed.
5604  *
5605  * @param wks
5606  *   Pointer to thread flow work space.
5607  * @param rss_desc
5608  *   Pointer to RSS descriptor.
5609  * @param[in] nrssq_num
5610  *   New RSS queue number.
5611  *
5612  * @return
5613  *   0 on success, -1 otherwise and rte_errno is set.
5614  */
5615 static int
5616 flow_rss_workspace_adjust(struct mlx5_flow_workspace *wks,
5617                           struct mlx5_flow_rss_desc *rss_desc,
5618                           uint32_t nrssq_num)
5619 {
5620         if (likely(nrssq_num <= wks->rssq_num))
5621                 return 0;
5622         rss_desc->queue = realloc(rss_desc->queue,
5623                           sizeof(*rss_desc->queue) * RTE_ALIGN(nrssq_num, 2));
5624         if (!rss_desc->queue) {
5625                 rte_errno = ENOMEM;
5626                 return -1;
5627         }
5628         wks->rssq_num = RTE_ALIGN(nrssq_num, 2);
5629         return 0;
5630 }
5631
5632 /**
5633  * Create a flow and add it to @p list.
5634  *
5635  * @param dev
5636  *   Pointer to Ethernet device.
5637  * @param list
5638  *   Pointer to a TAILQ flow list. If this parameter NULL,
5639  *   no list insertion occurred, flow is just created,
5640  *   this is caller's responsibility to track the
5641  *   created flow.
5642  * @param[in] attr
5643  *   Flow rule attributes.
5644  * @param[in] items
5645  *   Pattern specification (list terminated by the END pattern item).
5646  * @param[in] actions
5647  *   Associated actions (list terminated by the END action).
5648  * @param[in] external
5649  *   This flow rule is created by request external to PMD.
5650  * @param[out] error
5651  *   Perform verbose error reporting if not NULL.
5652  *
5653  * @return
5654  *   A flow index on success, 0 otherwise and rte_errno is set.
5655  */
5656 static uint32_t
5657 flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
5658                  const struct rte_flow_attr *attr,
5659                  const struct rte_flow_item items[],
5660                  const struct rte_flow_action original_actions[],
5661                  bool external, struct rte_flow_error *error)
5662 {
5663         struct mlx5_priv *priv = dev->data->dev_private;
5664         struct rte_flow *flow = NULL;
5665         struct mlx5_flow *dev_flow;
5666         const struct rte_flow_action_rss *rss = NULL;
5667         struct mlx5_translated_action_handle
5668                 indir_actions[MLX5_MAX_INDIRECT_ACTIONS];
5669         int indir_actions_n = MLX5_MAX_INDIRECT_ACTIONS;
5670         union {
5671                 struct mlx5_flow_expand_rss buf;
5672                 uint8_t buffer[2048];
5673         } expand_buffer;
5674         union {
5675                 struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS];
5676                 uint8_t buffer[2048];
5677         } actions_rx;
5678         union {
5679                 struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS];
5680                 uint8_t buffer[2048];
5681         } actions_hairpin_tx;
5682         union {
5683                 struct rte_flow_item items[MLX5_MAX_SPLIT_ITEMS];
5684                 uint8_t buffer[2048];
5685         } items_tx;
5686         struct mlx5_flow_expand_rss *buf = &expand_buffer.buf;
5687         struct mlx5_flow_rss_desc *rss_desc;
5688         const struct rte_flow_action *p_actions_rx;
5689         uint32_t i;
5690         uint32_t idx = 0;
5691         int hairpin_flow;
5692         struct rte_flow_attr attr_tx = { .priority = 0 };
5693         const struct rte_flow_action *actions;
5694         struct rte_flow_action *translated_actions = NULL;
5695         struct mlx5_flow_tunnel *tunnel;
5696         struct tunnel_default_miss_ctx default_miss_ctx = { 0, };
5697         struct mlx5_flow_workspace *wks = mlx5_flow_push_thread_workspace();
5698         struct mlx5_flow_split_info flow_split_info = {
5699                 .external = !!external,
5700                 .skip_scale = 0,
5701                 .flow_idx = 0,
5702                 .prefix_mark = 0,
5703                 .prefix_layers = 0,
5704                 .table_id = 0
5705         };
5706         int ret;
5707
5708         MLX5_ASSERT(wks);
5709         rss_desc = &wks->rss_desc;
5710         ret = flow_action_handles_translate(dev, original_actions,
5711                                             indir_actions,
5712                                             &indir_actions_n,
5713                                             &translated_actions, error);
5714         if (ret < 0) {
5715                 MLX5_ASSERT(translated_actions == NULL);
5716                 return 0;
5717         }
5718         actions = translated_actions ? translated_actions : original_actions;
5719         p_actions_rx = actions;
5720         hairpin_flow = flow_check_hairpin_split(dev, attr, actions);
5721         ret = flow_drv_validate(dev, attr, items, p_actions_rx,
5722                                 external, hairpin_flow, error);
5723         if (ret < 0)
5724                 goto error_before_hairpin_split;
5725         flow = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], &idx);
5726         if (!flow) {
5727                 rte_errno = ENOMEM;
5728                 goto error_before_hairpin_split;
5729         }
5730         if (hairpin_flow > 0) {
5731                 if (hairpin_flow > MLX5_MAX_SPLIT_ACTIONS) {
5732                         rte_errno = EINVAL;
5733                         goto error_before_hairpin_split;
5734                 }
5735                 flow_hairpin_split(dev, actions, actions_rx.actions,
5736                                    actions_hairpin_tx.actions, items_tx.items,
5737                                    idx);
5738                 p_actions_rx = actions_rx.actions;
5739         }
5740         flow_split_info.flow_idx = idx;
5741         flow->drv_type = flow_get_drv_type(dev, attr);
5742         MLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN &&
5743                     flow->drv_type < MLX5_FLOW_TYPE_MAX);
5744         memset(rss_desc, 0, offsetof(struct mlx5_flow_rss_desc, queue));
5745         /* RSS Action only works on NIC RX domain */
5746         if (attr->ingress && !attr->transfer)
5747                 rss = flow_get_rss_action(p_actions_rx);
5748         if (rss) {
5749                 if (flow_rss_workspace_adjust(wks, rss_desc, rss->queue_num))
5750                         return 0;
5751                 /*
5752                  * The following information is required by
5753                  * mlx5_flow_hashfields_adjust() in advance.
5754                  */
5755                 rss_desc->level = rss->level;
5756                 /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
5757                 rss_desc->types = !rss->types ? ETH_RSS_IP : rss->types;
5758         }
5759         flow->dev_handles = 0;
5760         if (rss && rss->types) {
5761                 unsigned int graph_root;
5762
5763                 graph_root = find_graph_root(items, rss->level);
5764                 ret = mlx5_flow_expand_rss(buf, sizeof(expand_buffer.buffer),
5765                                            items, rss->types,
5766                                            mlx5_support_expansion, graph_root);
5767                 MLX5_ASSERT(ret > 0 &&
5768                        (unsigned int)ret < sizeof(expand_buffer.buffer));
5769         } else {
5770                 buf->entries = 1;
5771                 buf->entry[0].pattern = (void *)(uintptr_t)items;
5772         }
5773         rss_desc->shared_rss = flow_get_shared_rss_action(dev, indir_actions,
5774                                                       indir_actions_n);
5775         for (i = 0; i < buf->entries; ++i) {
5776                 /* Initialize flow split data. */
5777                 flow_split_info.prefix_layers = 0;
5778                 flow_split_info.prefix_mark = 0;
5779                 flow_split_info.skip_scale = 0;
5780                 /*
5781                  * The splitter may create multiple dev_flows,
5782                  * depending on configuration. In the simplest
5783                  * case it just creates unmodified original flow.
5784                  */
5785                 ret = flow_create_split_outer(dev, flow, attr,
5786                                               buf->entry[i].pattern,
5787                                               p_actions_rx, &flow_split_info,
5788                                               error);
5789                 if (ret < 0)
5790                         goto error;
5791                 if (is_flow_tunnel_steer_rule(dev, attr,
5792                                               buf->entry[i].pattern,
5793                                               p_actions_rx)) {
5794                         ret = flow_tunnel_add_default_miss(dev, flow, attr,
5795                                                            p_actions_rx,
5796                                                            idx,
5797                                                            &default_miss_ctx,
5798                                                            error);
5799                         if (ret < 0) {
5800                                 mlx5_free(default_miss_ctx.queue);
5801                                 goto error;
5802                         }
5803                 }
5804         }
5805         /* Create the tx flow. */
5806         if (hairpin_flow) {
5807                 attr_tx.group = MLX5_HAIRPIN_TX_TABLE;
5808                 attr_tx.ingress = 0;
5809                 attr_tx.egress = 1;
5810                 dev_flow = flow_drv_prepare(dev, flow, &attr_tx, items_tx.items,
5811                                          actions_hairpin_tx.actions,
5812                                          idx, error);
5813                 if (!dev_flow)
5814                         goto error;
5815                 dev_flow->flow = flow;
5816                 dev_flow->external = 0;
5817                 SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
5818                               dev_flow->handle, next);
5819                 ret = flow_drv_translate(dev, dev_flow, &attr_tx,
5820                                          items_tx.items,
5821                                          actions_hairpin_tx.actions, error);
5822                 if (ret < 0)
5823                         goto error;
5824         }
5825         /*
5826          * Update the metadata register copy table. If extensive
5827          * metadata feature is enabled and registers are supported
5828          * we might create the extra rte_flow for each unique
5829          * MARK/FLAG action ID.
5830          *
5831          * The table is updated for ingress Flows only, because
5832          * the egress Flows belong to the different device and
5833          * copy table should be updated in peer NIC Rx domain.
5834          */
5835         if (attr->ingress &&
5836             (external || attr->group != MLX5_FLOW_MREG_CP_TABLE_GROUP)) {
5837                 ret = flow_mreg_update_copy_table(dev, flow, actions, error);
5838                 if (ret)
5839                         goto error;
5840         }
5841         /*
5842          * If the flow is external (from application) OR device is started,
5843          * OR mreg discover, then apply immediately.
5844          */
5845         if (external || dev->data->dev_started ||
5846             (attr->group == MLX5_FLOW_MREG_CP_TABLE_GROUP &&
5847              attr->priority == MLX5_FLOW_LOWEST_PRIO_INDICATOR)) {
5848                 ret = flow_drv_apply(dev, flow, error);
5849                 if (ret < 0)
5850                         goto error;
5851         }
5852         if (list) {
5853                 rte_spinlock_lock(&priv->flow_list_lock);
5854                 ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list, idx,
5855                              flow, next);
5856                 rte_spinlock_unlock(&priv->flow_list_lock);
5857         }
5858         flow_rxq_flags_set(dev, flow);
5859         rte_free(translated_actions);
5860         tunnel = flow_tunnel_from_rule(dev, attr, items, actions);
5861         if (tunnel) {
5862                 flow->tunnel = 1;
5863                 flow->tunnel_id = tunnel->tunnel_id;
5864                 __atomic_add_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED);
5865                 mlx5_free(default_miss_ctx.queue);
5866         }
5867         mlx5_flow_pop_thread_workspace();
5868         return idx;
5869 error:
5870         MLX5_ASSERT(flow);
5871         ret = rte_errno; /* Save rte_errno before cleanup. */
5872         flow_mreg_del_copy_action(dev, flow);
5873         flow_drv_destroy(dev, flow);
5874         if (rss_desc->shared_rss)
5875                 __atomic_sub_fetch(&((struct mlx5_shared_action_rss *)
5876                         mlx5_ipool_get
5877                         (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
5878                         rss_desc->shared_rss))->refcnt, 1, __ATOMIC_RELAXED);
5879         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], idx);
5880         rte_errno = ret; /* Restore rte_errno. */
5881         ret = rte_errno;
5882         rte_errno = ret;
5883         mlx5_flow_pop_thread_workspace();
5884 error_before_hairpin_split:
5885         rte_free(translated_actions);
5886         return 0;
5887 }
5888
5889 /**
5890  * Create a dedicated flow rule on e-switch table 0 (root table), to direct all
5891  * incoming packets to table 1.
5892  *
5893  * Other flow rules, requested for group n, will be created in
5894  * e-switch table n+1.
5895  * Jump action to e-switch group n will be created to group n+1.
5896  *
5897  * Used when working in switchdev mode, to utilise advantages of table 1
5898  * and above.
5899  *
5900  * @param dev
5901  *   Pointer to Ethernet device.
5902  *
5903  * @return
5904  *   Pointer to flow on success, NULL otherwise and rte_errno is set.
5905  */
5906 struct rte_flow *
5907 mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev)
5908 {
5909         const struct rte_flow_attr attr = {
5910                 .group = 0,
5911                 .priority = 0,
5912                 .ingress = 1,
5913                 .egress = 0,
5914                 .transfer = 1,
5915         };
5916         const struct rte_flow_item pattern = {
5917                 .type = RTE_FLOW_ITEM_TYPE_END,
5918         };
5919         struct rte_flow_action_jump jump = {
5920                 .group = 1,
5921         };
5922         const struct rte_flow_action actions[] = {
5923                 {
5924                         .type = RTE_FLOW_ACTION_TYPE_JUMP,
5925                         .conf = &jump,
5926                 },
5927                 {
5928                         .type = RTE_FLOW_ACTION_TYPE_END,
5929                 },
5930         };
5931         struct mlx5_priv *priv = dev->data->dev_private;
5932         struct rte_flow_error error;
5933
5934         return (void *)(uintptr_t)flow_list_create(dev, &priv->ctrl_flows,
5935                                                    &attr, &pattern,
5936                                                    actions, false, &error);
5937 }
5938
5939 /**
5940  * Validate a flow supported by the NIC.
5941  *
5942  * @see rte_flow_validate()
5943  * @see rte_flow_ops
5944  */
5945 int
5946 mlx5_flow_validate(struct rte_eth_dev *dev,
5947                    const struct rte_flow_attr *attr,
5948                    const struct rte_flow_item items[],
5949                    const struct rte_flow_action original_actions[],
5950                    struct rte_flow_error *error)
5951 {
5952         int hairpin_flow;
5953         struct mlx5_translated_action_handle
5954                 indir_actions[MLX5_MAX_INDIRECT_ACTIONS];
5955         int indir_actions_n = MLX5_MAX_INDIRECT_ACTIONS;
5956         const struct rte_flow_action *actions;
5957         struct rte_flow_action *translated_actions = NULL;
5958         int ret = flow_action_handles_translate(dev, original_actions,
5959                                                 indir_actions,
5960                                                 &indir_actions_n,
5961                                                 &translated_actions, error);
5962
5963         if (ret)
5964                 return ret;
5965         actions = translated_actions ? translated_actions : original_actions;
5966         hairpin_flow = flow_check_hairpin_split(dev, attr, actions);
5967         ret = flow_drv_validate(dev, attr, items, actions,
5968                                 true, hairpin_flow, error);
5969         rte_free(translated_actions);
5970         return ret;
5971 }
5972
5973 /**
5974  * Create a flow.
5975  *
5976  * @see rte_flow_create()
5977  * @see rte_flow_ops
5978  */
5979 struct rte_flow *
5980 mlx5_flow_create(struct rte_eth_dev *dev,
5981                  const struct rte_flow_attr *attr,
5982                  const struct rte_flow_item items[],
5983                  const struct rte_flow_action actions[],
5984                  struct rte_flow_error *error)
5985 {
5986         struct mlx5_priv *priv = dev->data->dev_private;
5987
5988         /*
5989          * If the device is not started yet, it is not allowed to created a
5990          * flow from application. PMD default flows and traffic control flows
5991          * are not affected.
5992          */
5993         if (unlikely(!dev->data->dev_started)) {
5994                 DRV_LOG(DEBUG, "port %u is not started when "
5995                         "inserting a flow", dev->data->port_id);
5996                 rte_flow_error_set(error, ENODEV,
5997                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5998                                    NULL,
5999                                    "port not started");
6000                 return NULL;
6001         }
6002
6003         return (void *)(uintptr_t)flow_list_create(dev, &priv->flows,
6004                                   attr, items, actions, true, error);
6005 }
6006
6007 /**
6008  * Destroy a flow in a list.
6009  *
6010  * @param dev
6011  *   Pointer to Ethernet device.
6012  * @param list
6013  *   Pointer to the Indexed flow list. If this parameter NULL,
6014  *   there is no flow removal from the list. Be noted that as
6015  *   flow is add to the indexed list, memory of the indexed
6016  *   list points to maybe changed as flow destroyed.
6017  * @param[in] flow_idx
6018  *   Index of flow to destroy.
6019  */
6020 static void
6021 flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,
6022                   uint32_t flow_idx)
6023 {
6024         struct mlx5_priv *priv = dev->data->dev_private;
6025         struct rte_flow *flow = mlx5_ipool_get(priv->sh->ipool
6026                                                [MLX5_IPOOL_RTE_FLOW], flow_idx);
6027
6028         if (!flow)
6029                 return;
6030         /*
6031          * Update RX queue flags only if port is started, otherwise it is
6032          * already clean.
6033          */
6034         if (dev->data->dev_started)
6035                 flow_rxq_flags_trim(dev, flow);
6036         flow_drv_destroy(dev, flow);
6037         if (list) {
6038                 rte_spinlock_lock(&priv->flow_list_lock);
6039                 ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list,
6040                              flow_idx, flow, next);
6041                 rte_spinlock_unlock(&priv->flow_list_lock);
6042         }
6043         if (flow->tunnel) {
6044                 struct mlx5_flow_tunnel *tunnel;
6045
6046                 tunnel = mlx5_find_tunnel_id(dev, flow->tunnel_id);
6047                 RTE_VERIFY(tunnel);
6048                 if (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED))
6049                         mlx5_flow_tunnel_free(dev, tunnel);
6050         }
6051         flow_mreg_del_copy_action(dev, flow);
6052         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], flow_idx);
6053 }
6054
6055 /**
6056  * Destroy all flows.
6057  *
6058  * @param dev
6059  *   Pointer to Ethernet device.
6060  * @param list
6061  *   Pointer to the Indexed flow list.
6062  * @param active
6063  *   If flushing is called avtively.
6064  */
6065 void
6066 mlx5_flow_list_flush(struct rte_eth_dev *dev, uint32_t *list, bool active)
6067 {
6068         uint32_t num_flushed = 0;
6069
6070         while (*list) {
6071                 flow_list_destroy(dev, list, *list);
6072                 num_flushed++;
6073         }
6074         if (active) {
6075                 DRV_LOG(INFO, "port %u: %u flows flushed before stopping",
6076                         dev->data->port_id, num_flushed);
6077         }
6078 }
6079
6080 /**
6081  * Stop all default actions for flows.
6082  *
6083  * @param dev
6084  *   Pointer to Ethernet device.
6085  */
6086 void
6087 mlx5_flow_stop_default(struct rte_eth_dev *dev)
6088 {
6089         flow_mreg_del_default_copy_action(dev);
6090         flow_rxq_flags_clear(dev);
6091 }
6092
6093 /**
6094  * Start all default actions for flows.
6095  *
6096  * @param dev
6097  *   Pointer to Ethernet device.
6098  * @return
6099  *   0 on success, a negative errno value otherwise and rte_errno is set.
6100  */
6101 int
6102 mlx5_flow_start_default(struct rte_eth_dev *dev)
6103 {
6104         struct rte_flow_error error;
6105
6106         /* Make sure default copy action (reg_c[0] -> reg_b) is created. */
6107         return flow_mreg_add_default_copy_action(dev, &error);
6108 }
6109
6110 /**
6111  * Release key of thread specific flow workspace data.
6112  */
6113 void
6114 flow_release_workspace(void *data)
6115 {
6116         struct mlx5_flow_workspace *wks = data;
6117         struct mlx5_flow_workspace *next;
6118
6119         while (wks) {
6120                 next = wks->next;
6121                 free(wks->rss_desc.queue);
6122                 free(wks);
6123                 wks = next;
6124         }
6125 }
6126
6127 /**
6128  * Get thread specific current flow workspace.
6129  *
6130  * @return pointer to thread specific flow workspace data, NULL on error.
6131  */
6132 struct mlx5_flow_workspace*
6133 mlx5_flow_get_thread_workspace(void)
6134 {
6135         struct mlx5_flow_workspace *data;
6136
6137         data = mlx5_flow_os_get_specific_workspace();
6138         MLX5_ASSERT(data && data->inuse);
6139         if (!data || !data->inuse)
6140                 DRV_LOG(ERR, "flow workspace not initialized.");
6141         return data;
6142 }
6143
6144 /**
6145  * Allocate and init new flow workspace.
6146  *
6147  * @return pointer to flow workspace data, NULL on error.
6148  */
6149 static struct mlx5_flow_workspace*
6150 flow_alloc_thread_workspace(void)
6151 {
6152         struct mlx5_flow_workspace *data = calloc(1, sizeof(*data));
6153
6154         if (!data) {
6155                 DRV_LOG(ERR, "Failed to allocate flow workspace "
6156                         "memory.");
6157                 return NULL;
6158         }
6159         data->rss_desc.queue = calloc(1,
6160                         sizeof(uint16_t) * MLX5_RSSQ_DEFAULT_NUM);
6161         if (!data->rss_desc.queue)
6162                 goto err;
6163         data->rssq_num = MLX5_RSSQ_DEFAULT_NUM;
6164         return data;
6165 err:
6166         if (data->rss_desc.queue)
6167                 free(data->rss_desc.queue);
6168         free(data);
6169         return NULL;
6170 }
6171
6172 /**
6173  * Get new thread specific flow workspace.
6174  *
6175  * If current workspace inuse, create new one and set as current.
6176  *
6177  * @return pointer to thread specific flow workspace data, NULL on error.
6178  */
6179 static struct mlx5_flow_workspace*
6180 mlx5_flow_push_thread_workspace(void)
6181 {
6182         struct mlx5_flow_workspace *curr;
6183         struct mlx5_flow_workspace *data;
6184
6185         curr = mlx5_flow_os_get_specific_workspace();
6186         if (!curr) {
6187                 data = flow_alloc_thread_workspace();
6188                 if (!data)
6189                         return NULL;
6190         } else if (!curr->inuse) {
6191                 data = curr;
6192         } else if (curr->next) {
6193                 data = curr->next;
6194         } else {
6195                 data = flow_alloc_thread_workspace();
6196                 if (!data)
6197                         return NULL;
6198                 curr->next = data;
6199                 data->prev = curr;
6200         }
6201         data->inuse = 1;
6202         data->flow_idx = 0;
6203         /* Set as current workspace */
6204         if (mlx5_flow_os_set_specific_workspace(data))
6205                 DRV_LOG(ERR, "Failed to set flow workspace to thread.");
6206         return data;
6207 }
6208
6209 /**
6210  * Close current thread specific flow workspace.
6211  *
6212  * If previous workspace available, set it as current.
6213  *
6214  * @return pointer to thread specific flow workspace data, NULL on error.
6215  */
6216 static void
6217 mlx5_flow_pop_thread_workspace(void)
6218 {
6219         struct mlx5_flow_workspace *data = mlx5_flow_get_thread_workspace();
6220
6221         if (!data)
6222                 return;
6223         if (!data->inuse) {
6224                 DRV_LOG(ERR, "Failed to close unused flow workspace.");
6225                 return;
6226         }
6227         data->inuse = 0;
6228         if (!data->prev)
6229                 return;
6230         if (mlx5_flow_os_set_specific_workspace(data->prev))
6231                 DRV_LOG(ERR, "Failed to set flow workspace to thread.");
6232 }
6233
6234 /**
6235  * Verify the flow list is empty
6236  *
6237  * @param dev
6238  *  Pointer to Ethernet device.
6239  *
6240  * @return the number of flows not released.
6241  */
6242 int
6243 mlx5_flow_verify(struct rte_eth_dev *dev)
6244 {
6245         struct mlx5_priv *priv = dev->data->dev_private;
6246         struct rte_flow *flow;
6247         uint32_t idx;
6248         int ret = 0;
6249
6250         ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], priv->flows, idx,
6251                       flow, next) {
6252                 DRV_LOG(DEBUG, "port %u flow %p still referenced",
6253                         dev->data->port_id, (void *)flow);
6254                 ++ret;
6255         }
6256         return ret;
6257 }
6258
6259 /**
6260  * Enable default hairpin egress flow.
6261  *
6262  * @param dev
6263  *   Pointer to Ethernet device.
6264  * @param queue
6265  *   The queue index.
6266  *
6267  * @return
6268  *   0 on success, a negative errno value otherwise and rte_errno is set.
6269  */
6270 int
6271 mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev,
6272                             uint32_t queue)
6273 {
6274         struct mlx5_priv *priv = dev->data->dev_private;
6275         const struct rte_flow_attr attr = {
6276                 .egress = 1,
6277                 .priority = 0,
6278         };
6279         struct mlx5_rte_flow_item_tx_queue queue_spec = {
6280                 .queue = queue,
6281         };
6282         struct mlx5_rte_flow_item_tx_queue queue_mask = {
6283                 .queue = UINT32_MAX,
6284         };
6285         struct rte_flow_item items[] = {
6286                 {
6287                         .type = (enum rte_flow_item_type)
6288                                 MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE,
6289                         .spec = &queue_spec,
6290                         .last = NULL,
6291                         .mask = &queue_mask,
6292                 },
6293                 {
6294                         .type = RTE_FLOW_ITEM_TYPE_END,
6295                 },
6296         };
6297         struct rte_flow_action_jump jump = {
6298                 .group = MLX5_HAIRPIN_TX_TABLE,
6299         };
6300         struct rte_flow_action actions[2];
6301         uint32_t flow_idx;
6302         struct rte_flow_error error;
6303
6304         actions[0].type = RTE_FLOW_ACTION_TYPE_JUMP;
6305         actions[0].conf = &jump;
6306         actions[1].type = RTE_FLOW_ACTION_TYPE_END;
6307         flow_idx = flow_list_create(dev, &priv->ctrl_flows,
6308                                 &attr, items, actions, false, &error);
6309         if (!flow_idx) {
6310                 DRV_LOG(DEBUG,
6311                         "Failed to create ctrl flow: rte_errno(%d),"
6312                         " type(%d), message(%s)",
6313                         rte_errno, error.type,
6314                         error.message ? error.message : " (no stated reason)");
6315                 return -rte_errno;
6316         }
6317         return 0;
6318 }
6319
6320 /**
6321  * Enable a control flow configured from the control plane.
6322  *
6323  * @param dev
6324  *   Pointer to Ethernet device.
6325  * @param eth_spec
6326  *   An Ethernet flow spec to apply.
6327  * @param eth_mask
6328  *   An Ethernet flow mask to apply.
6329  * @param vlan_spec
6330  *   A VLAN flow spec to apply.
6331  * @param vlan_mask
6332  *   A VLAN flow mask to apply.
6333  *
6334  * @return
6335  *   0 on success, a negative errno value otherwise and rte_errno is set.
6336  */
6337 int
6338 mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
6339                     struct rte_flow_item_eth *eth_spec,
6340                     struct rte_flow_item_eth *eth_mask,
6341                     struct rte_flow_item_vlan *vlan_spec,
6342                     struct rte_flow_item_vlan *vlan_mask)
6343 {
6344         struct mlx5_priv *priv = dev->data->dev_private;
6345         const struct rte_flow_attr attr = {
6346                 .ingress = 1,
6347                 .priority = MLX5_FLOW_LOWEST_PRIO_INDICATOR,
6348         };
6349         struct rte_flow_item items[] = {
6350                 {
6351                         .type = RTE_FLOW_ITEM_TYPE_ETH,
6352                         .spec = eth_spec,
6353                         .last = NULL,
6354                         .mask = eth_mask,
6355                 },
6356                 {
6357                         .type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN :
6358                                               RTE_FLOW_ITEM_TYPE_END,
6359                         .spec = vlan_spec,
6360                         .last = NULL,
6361                         .mask = vlan_mask,
6362                 },
6363                 {
6364                         .type = RTE_FLOW_ITEM_TYPE_END,
6365                 },
6366         };
6367         uint16_t queue[priv->reta_idx_n];
6368         struct rte_flow_action_rss action_rss = {
6369                 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
6370                 .level = 0,
6371                 .types = priv->rss_conf.rss_hf,
6372                 .key_len = priv->rss_conf.rss_key_len,
6373                 .queue_num = priv->reta_idx_n,
6374                 .key = priv->rss_conf.rss_key,
6375                 .queue = queue,
6376         };
6377         struct rte_flow_action actions[] = {
6378                 {
6379                         .type = RTE_FLOW_ACTION_TYPE_RSS,
6380                         .conf = &action_rss,
6381                 },
6382                 {
6383                         .type = RTE_FLOW_ACTION_TYPE_END,
6384                 },
6385         };
6386         uint32_t flow_idx;
6387         struct rte_flow_error error;
6388         unsigned int i;
6389
6390         if (!priv->reta_idx_n || !priv->rxqs_n) {
6391                 return 0;
6392         }
6393         if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
6394                 action_rss.types = 0;
6395         for (i = 0; i != priv->reta_idx_n; ++i)
6396                 queue[i] = (*priv->reta_idx)[i];
6397         flow_idx = flow_list_create(dev, &priv->ctrl_flows,
6398                                 &attr, items, actions, false, &error);
6399         if (!flow_idx)
6400                 return -rte_errno;
6401         return 0;
6402 }
6403
6404 /**
6405  * Enable a flow control configured from the control plane.
6406  *
6407  * @param dev
6408  *   Pointer to Ethernet device.
6409  * @param eth_spec
6410  *   An Ethernet flow spec to apply.
6411  * @param eth_mask
6412  *   An Ethernet flow mask to apply.
6413  *
6414  * @return
6415  *   0 on success, a negative errno value otherwise and rte_errno is set.
6416  */
6417 int
6418 mlx5_ctrl_flow(struct rte_eth_dev *dev,
6419                struct rte_flow_item_eth *eth_spec,
6420                struct rte_flow_item_eth *eth_mask)
6421 {
6422         return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL);
6423 }
6424
6425 /**
6426  * Create default miss flow rule matching lacp traffic
6427  *
6428  * @param dev
6429  *   Pointer to Ethernet device.
6430  * @param eth_spec
6431  *   An Ethernet flow spec to apply.
6432  *
6433  * @return
6434  *   0 on success, a negative errno value otherwise and rte_errno is set.
6435  */
6436 int
6437 mlx5_flow_lacp_miss(struct rte_eth_dev *dev)
6438 {
6439         struct mlx5_priv *priv = dev->data->dev_private;
6440         /*
6441          * The LACP matching is done by only using ether type since using
6442          * a multicast dst mac causes kernel to give low priority to this flow.
6443          */
6444         static const struct rte_flow_item_eth lacp_spec = {
6445                 .type = RTE_BE16(0x8809),
6446         };
6447         static const struct rte_flow_item_eth lacp_mask = {
6448                 .type = 0xffff,
6449         };
6450         const struct rte_flow_attr attr = {
6451                 .ingress = 1,
6452         };
6453         struct rte_flow_item items[] = {
6454                 {
6455                         .type = RTE_FLOW_ITEM_TYPE_ETH,
6456                         .spec = &lacp_spec,
6457                         .mask = &lacp_mask,
6458                 },
6459                 {
6460                         .type = RTE_FLOW_ITEM_TYPE_END,
6461                 },
6462         };
6463         struct rte_flow_action actions[] = {
6464                 {
6465                         .type = (enum rte_flow_action_type)
6466                                 MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS,
6467                 },
6468                 {
6469                         .type = RTE_FLOW_ACTION_TYPE_END,
6470                 },
6471         };
6472         struct rte_flow_error error;
6473         uint32_t flow_idx = flow_list_create(dev, &priv->ctrl_flows,
6474                                 &attr, items, actions, false, &error);
6475
6476         if (!flow_idx)
6477                 return -rte_errno;
6478         return 0;
6479 }
6480
6481 /**
6482  * Destroy a flow.
6483  *
6484  * @see rte_flow_destroy()
6485  * @see rte_flow_ops
6486  */
6487 int
6488 mlx5_flow_destroy(struct rte_eth_dev *dev,
6489                   struct rte_flow *flow,
6490                   struct rte_flow_error *error __rte_unused)
6491 {
6492         struct mlx5_priv *priv = dev->data->dev_private;
6493
6494         flow_list_destroy(dev, &priv->flows, (uintptr_t)(void *)flow);
6495         return 0;
6496 }
6497
6498 /**
6499  * Destroy all flows.
6500  *
6501  * @see rte_flow_flush()
6502  * @see rte_flow_ops
6503  */
6504 int
6505 mlx5_flow_flush(struct rte_eth_dev *dev,
6506                 struct rte_flow_error *error __rte_unused)
6507 {
6508         struct mlx5_priv *priv = dev->data->dev_private;
6509
6510         mlx5_flow_list_flush(dev, &priv->flows, false);
6511         return 0;
6512 }
6513
6514 /**
6515  * Isolated mode.
6516  *
6517  * @see rte_flow_isolate()
6518  * @see rte_flow_ops
6519  */
6520 int
6521 mlx5_flow_isolate(struct rte_eth_dev *dev,
6522                   int enable,
6523                   struct rte_flow_error *error)
6524 {
6525         struct mlx5_priv *priv = dev->data->dev_private;
6526
6527         if (dev->data->dev_started) {
6528                 rte_flow_error_set(error, EBUSY,
6529                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6530                                    NULL,
6531                                    "port must be stopped first");
6532                 return -rte_errno;
6533         }
6534         priv->isolated = !!enable;
6535         if (enable)
6536                 dev->dev_ops = &mlx5_dev_ops_isolate;
6537         else
6538                 dev->dev_ops = &mlx5_dev_ops;
6539
6540         dev->rx_descriptor_status = mlx5_rx_descriptor_status;
6541         dev->tx_descriptor_status = mlx5_tx_descriptor_status;
6542
6543         return 0;
6544 }
6545
6546 /**
6547  * Query a flow.
6548  *
6549  * @see rte_flow_query()
6550  * @see rte_flow_ops
6551  */
6552 static int
6553 flow_drv_query(struct rte_eth_dev *dev,
6554                uint32_t flow_idx,
6555                const struct rte_flow_action *actions,
6556                void *data,
6557                struct rte_flow_error *error)
6558 {
6559         struct mlx5_priv *priv = dev->data->dev_private;
6560         const struct mlx5_flow_driver_ops *fops;
6561         struct rte_flow *flow = mlx5_ipool_get(priv->sh->ipool
6562                                                [MLX5_IPOOL_RTE_FLOW],
6563                                                flow_idx);
6564         enum mlx5_flow_drv_type ftype;
6565
6566         if (!flow) {
6567                 return rte_flow_error_set(error, ENOENT,
6568                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6569                           NULL,
6570                           "invalid flow handle");
6571         }
6572         ftype = flow->drv_type;
6573         MLX5_ASSERT(ftype > MLX5_FLOW_TYPE_MIN && ftype < MLX5_FLOW_TYPE_MAX);
6574         fops = flow_get_drv_ops(ftype);
6575
6576         return fops->query(dev, flow, actions, data, error);
6577 }
6578
6579 /**
6580  * Query a flow.
6581  *
6582  * @see rte_flow_query()
6583  * @see rte_flow_ops
6584  */
6585 int
6586 mlx5_flow_query(struct rte_eth_dev *dev,
6587                 struct rte_flow *flow,
6588                 const struct rte_flow_action *actions,
6589                 void *data,
6590                 struct rte_flow_error *error)
6591 {
6592         int ret;
6593
6594         ret = flow_drv_query(dev, (uintptr_t)(void *)flow, actions, data,
6595                              error);
6596         if (ret < 0)
6597                 return ret;
6598         return 0;
6599 }
6600
6601 /**
6602  * Get rte_flow callbacks.
6603  *
6604  * @param dev
6605  *   Pointer to Ethernet device structure.
6606  * @param ops
6607  *   Pointer to operation-specific structure.
6608  *
6609  * @return 0
6610  */
6611 int
6612 mlx5_flow_ops_get(struct rte_eth_dev *dev __rte_unused,
6613                   const struct rte_flow_ops **ops)
6614 {
6615         *ops = &mlx5_flow_ops;
6616         return 0;
6617 }
6618
6619 /**
6620  * Create the needed meter and suffix tables.
6621  *
6622  * @param[in] dev
6623  *   Pointer to Ethernet device.
6624  *
6625  * @return
6626  *   Pointer to table set on success, NULL otherwise.
6627  */
6628 struct mlx5_meter_domains_infos *
6629 mlx5_flow_create_mtr_tbls(struct rte_eth_dev *dev)
6630 {
6631         const struct mlx5_flow_driver_ops *fops;
6632
6633         fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
6634         return fops->create_mtr_tbls(dev);
6635 }
6636
6637 /**
6638  * Destroy the meter table set.
6639  *
6640  * @param[in] dev
6641  *   Pointer to Ethernet device.
6642  * @param[in] tbl
6643  *   Pointer to the meter table set.
6644  *
6645  * @return
6646  *   0 on success.
6647  */
6648 int
6649 mlx5_flow_destroy_mtr_tbls(struct rte_eth_dev *dev,
6650                            struct mlx5_meter_domains_infos *tbls)
6651 {
6652         const struct mlx5_flow_driver_ops *fops;
6653
6654         fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
6655         return fops->destroy_mtr_tbls(dev, tbls);
6656 }
6657
6658 /**
6659  * Allocate the needed aso flow meter id.
6660  *
6661  * @param[in] dev
6662  *   Pointer to Ethernet device.
6663  *
6664  * @return
6665  *   Index to aso flow meter on success, NULL otherwise.
6666  */
6667 uint32_t
6668 mlx5_flow_mtr_alloc(struct rte_eth_dev *dev)
6669 {
6670         const struct mlx5_flow_driver_ops *fops;
6671
6672         fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
6673         return fops->create_meter(dev);
6674 }
6675
6676 /**
6677  * Free the aso flow meter id.
6678  *
6679  * @param[in] dev
6680  *   Pointer to Ethernet device.
6681  * @param[in] mtr_idx
6682  *  Index to aso flow meter to be free.
6683  *
6684  * @return
6685  *   0 on success.
6686  */
6687 void
6688 mlx5_flow_mtr_free(struct rte_eth_dev *dev, uint32_t mtr_idx)
6689 {
6690         const struct mlx5_flow_driver_ops *fops;
6691
6692         fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
6693         fops->free_meter(dev, mtr_idx);
6694 }
6695
6696 /**
6697  * Allocate a counter.
6698  *
6699  * @param[in] dev
6700  *   Pointer to Ethernet device structure.
6701  *
6702  * @return
6703  *   Index to allocated counter  on success, 0 otherwise.
6704  */
6705 uint32_t
6706 mlx5_counter_alloc(struct rte_eth_dev *dev)
6707 {
6708         const struct mlx5_flow_driver_ops *fops;
6709         struct rte_flow_attr attr = { .transfer = 0 };
6710
6711         if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
6712                 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
6713                 return fops->counter_alloc(dev);
6714         }
6715         DRV_LOG(ERR,
6716                 "port %u counter allocate is not supported.",
6717                  dev->data->port_id);
6718         return 0;
6719 }
6720
6721 /**
6722  * Free a counter.
6723  *
6724  * @param[in] dev
6725  *   Pointer to Ethernet device structure.
6726  * @param[in] cnt
6727  *   Index to counter to be free.
6728  */
6729 void
6730 mlx5_counter_free(struct rte_eth_dev *dev, uint32_t cnt)
6731 {
6732         const struct mlx5_flow_driver_ops *fops;
6733         struct rte_flow_attr attr = { .transfer = 0 };
6734
6735         if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
6736                 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
6737                 fops->counter_free(dev, cnt);
6738                 return;
6739         }
6740         DRV_LOG(ERR,
6741                 "port %u counter free is not supported.",
6742                  dev->data->port_id);
6743 }
6744
6745 /**
6746  * Query counter statistics.
6747  *
6748  * @param[in] dev
6749  *   Pointer to Ethernet device structure.
6750  * @param[in] cnt
6751  *   Index to counter to query.
6752  * @param[in] clear
6753  *   Set to clear counter statistics.
6754  * @param[out] pkts
6755  *   The counter hits packets number to save.
6756  * @param[out] bytes
6757  *   The counter hits bytes number to save.
6758  *
6759  * @return
6760  *   0 on success, a negative errno value otherwise.
6761  */
6762 int
6763 mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt,
6764                    bool clear, uint64_t *pkts, uint64_t *bytes)
6765 {
6766         const struct mlx5_flow_driver_ops *fops;
6767         struct rte_flow_attr attr = { .transfer = 0 };
6768
6769         if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
6770                 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
6771                 return fops->counter_query(dev, cnt, clear, pkts, bytes);
6772         }
6773         DRV_LOG(ERR,
6774                 "port %u counter query is not supported.",
6775                  dev->data->port_id);
6776         return -ENOTSUP;
6777 }
6778
6779 /**
6780  * Allocate a new memory for the counter values wrapped by all the needed
6781  * management.
6782  *
6783  * @param[in] sh
6784  *   Pointer to mlx5_dev_ctx_shared object.
6785  *
6786  * @return
6787  *   0 on success, a negative errno value otherwise.
6788  */
6789 static int
6790 mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh)
6791 {
6792         struct mlx5_devx_mkey_attr mkey_attr;
6793         struct mlx5_counter_stats_mem_mng *mem_mng;
6794         volatile struct flow_counter_stats *raw_data;
6795         int raws_n = MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES;
6796         int size = (sizeof(struct flow_counter_stats) *
6797                         MLX5_COUNTERS_PER_POOL +
6798                         sizeof(struct mlx5_counter_stats_raw)) * raws_n +
6799                         sizeof(struct mlx5_counter_stats_mem_mng);
6800         size_t pgsize = rte_mem_page_size();
6801         uint8_t *mem;
6802         int i;
6803
6804         if (pgsize == (size_t)-1) {
6805                 DRV_LOG(ERR, "Failed to get mem page size");
6806                 rte_errno = ENOMEM;
6807                 return -ENOMEM;
6808         }
6809         mem = mlx5_malloc(MLX5_MEM_ZERO, size, pgsize, SOCKET_ID_ANY);
6810         if (!mem) {
6811                 rte_errno = ENOMEM;
6812                 return -ENOMEM;
6813         }
6814         mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1;
6815         size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n;
6816         mem_mng->umem = mlx5_os_umem_reg(sh->ctx, mem, size,
6817                                                  IBV_ACCESS_LOCAL_WRITE);
6818         if (!mem_mng->umem) {
6819                 rte_errno = errno;
6820                 mlx5_free(mem);
6821                 return -rte_errno;
6822         }
6823         mkey_attr.addr = (uintptr_t)mem;
6824         mkey_attr.size = size;
6825         mkey_attr.umem_id = mlx5_os_get_umem_id(mem_mng->umem);
6826         mkey_attr.pd = sh->pdn;
6827         mkey_attr.log_entity_size = 0;
6828         mkey_attr.pg_access = 0;
6829         mkey_attr.klm_array = NULL;
6830         mkey_attr.klm_num = 0;
6831         mkey_attr.relaxed_ordering_write = sh->cmng.relaxed_ordering_write;
6832         mkey_attr.relaxed_ordering_read = sh->cmng.relaxed_ordering_read;
6833         mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);
6834         if (!mem_mng->dm) {
6835                 mlx5_os_umem_dereg(mem_mng->umem);
6836                 rte_errno = errno;
6837                 mlx5_free(mem);
6838                 return -rte_errno;
6839         }
6840         mem_mng->raws = (struct mlx5_counter_stats_raw *)(mem + size);
6841         raw_data = (volatile struct flow_counter_stats *)mem;
6842         for (i = 0; i < raws_n; ++i) {
6843                 mem_mng->raws[i].mem_mng = mem_mng;
6844                 mem_mng->raws[i].data = raw_data + i * MLX5_COUNTERS_PER_POOL;
6845         }
6846         for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i)
6847                 LIST_INSERT_HEAD(&sh->cmng.free_stat_raws,
6848                                  mem_mng->raws + MLX5_CNT_CONTAINER_RESIZE + i,
6849                                  next);
6850         LIST_INSERT_HEAD(&sh->cmng.mem_mngs, mem_mng, next);
6851         sh->cmng.mem_mng = mem_mng;
6852         return 0;
6853 }
6854
6855 /**
6856  * Set the statistic memory to the new counter pool.
6857  *
6858  * @param[in] sh
6859  *   Pointer to mlx5_dev_ctx_shared object.
6860  * @param[in] pool
6861  *   Pointer to the pool to set the statistic memory.
6862  *
6863  * @return
6864  *   0 on success, a negative errno value otherwise.
6865  */
6866 static int
6867 mlx5_flow_set_counter_stat_mem(struct mlx5_dev_ctx_shared *sh,
6868                                struct mlx5_flow_counter_pool *pool)
6869 {
6870         struct mlx5_flow_counter_mng *cmng = &sh->cmng;
6871         /* Resize statistic memory once used out. */
6872         if (!(pool->index % MLX5_CNT_CONTAINER_RESIZE) &&
6873             mlx5_flow_create_counter_stat_mem_mng(sh)) {
6874                 DRV_LOG(ERR, "Cannot resize counter stat mem.");
6875                 return -1;
6876         }
6877         rte_spinlock_lock(&pool->sl);
6878         pool->raw = cmng->mem_mng->raws + pool->index %
6879                     MLX5_CNT_CONTAINER_RESIZE;
6880         rte_spinlock_unlock(&pool->sl);
6881         pool->raw_hw = NULL;
6882         return 0;
6883 }
6884
6885 #define MLX5_POOL_QUERY_FREQ_US 1000000
6886
6887 /**
6888  * Set the periodic procedure for triggering asynchronous batch queries for all
6889  * the counter pools.
6890  *
6891  * @param[in] sh
6892  *   Pointer to mlx5_dev_ctx_shared object.
6893  */
6894 void
6895 mlx5_set_query_alarm(struct mlx5_dev_ctx_shared *sh)
6896 {
6897         uint32_t pools_n, us;
6898
6899         pools_n = __atomic_load_n(&sh->cmng.n_valid, __ATOMIC_RELAXED);
6900         us = MLX5_POOL_QUERY_FREQ_US / pools_n;
6901         DRV_LOG(DEBUG, "Set alarm for %u pools each %u us", pools_n, us);
6902         if (rte_eal_alarm_set(us, mlx5_flow_query_alarm, sh)) {
6903                 sh->cmng.query_thread_on = 0;
6904                 DRV_LOG(ERR, "Cannot reinitialize query alarm");
6905         } else {
6906                 sh->cmng.query_thread_on = 1;
6907         }
6908 }
6909
6910 /**
6911  * The periodic procedure for triggering asynchronous batch queries for all the
6912  * counter pools. This function is probably called by the host thread.
6913  *
6914  * @param[in] arg
6915  *   The parameter for the alarm process.
6916  */
6917 void
6918 mlx5_flow_query_alarm(void *arg)
6919 {
6920         struct mlx5_dev_ctx_shared *sh = arg;
6921         int ret;
6922         uint16_t pool_index = sh->cmng.pool_index;
6923         struct mlx5_flow_counter_mng *cmng = &sh->cmng;
6924         struct mlx5_flow_counter_pool *pool;
6925         uint16_t n_valid;
6926
6927         if (sh->cmng.pending_queries >= MLX5_MAX_PENDING_QUERIES)
6928                 goto set_alarm;
6929         rte_spinlock_lock(&cmng->pool_update_sl);
6930         pool = cmng->pools[pool_index];
6931         n_valid = cmng->n_valid;
6932         rte_spinlock_unlock(&cmng->pool_update_sl);
6933         /* Set the statistic memory to the new created pool. */
6934         if ((!pool->raw && mlx5_flow_set_counter_stat_mem(sh, pool)))
6935                 goto set_alarm;
6936         if (pool->raw_hw)
6937                 /* There is a pool query in progress. */
6938                 goto set_alarm;
6939         pool->raw_hw =
6940                 LIST_FIRST(&sh->cmng.free_stat_raws);
6941         if (!pool->raw_hw)
6942                 /* No free counter statistics raw memory. */
6943                 goto set_alarm;
6944         /*
6945          * Identify the counters released between query trigger and query
6946          * handle more efficiently. The counter released in this gap period
6947          * should wait for a new round of query as the new arrived packets
6948          * will not be taken into account.
6949          */
6950         pool->query_gen++;
6951         ret = mlx5_devx_cmd_flow_counter_query(pool->min_dcs, 0,
6952                                                MLX5_COUNTERS_PER_POOL,
6953                                                NULL, NULL,
6954                                                pool->raw_hw->mem_mng->dm->id,
6955                                                (void *)(uintptr_t)
6956                                                pool->raw_hw->data,
6957                                                sh->devx_comp,
6958                                                (uint64_t)(uintptr_t)pool);
6959         if (ret) {
6960                 DRV_LOG(ERR, "Failed to trigger asynchronous query for dcs ID"
6961                         " %d", pool->min_dcs->id);
6962                 pool->raw_hw = NULL;
6963                 goto set_alarm;
6964         }
6965         LIST_REMOVE(pool->raw_hw, next);
6966         sh->cmng.pending_queries++;
6967         pool_index++;
6968         if (pool_index >= n_valid)
6969                 pool_index = 0;
6970 set_alarm:
6971         sh->cmng.pool_index = pool_index;
6972         mlx5_set_query_alarm(sh);
6973 }
6974
6975 /**
6976  * Check and callback event for new aged flow in the counter pool
6977  *
6978  * @param[in] sh
6979  *   Pointer to mlx5_dev_ctx_shared object.
6980  * @param[in] pool
6981  *   Pointer to Current counter pool.
6982  */
6983 static void
6984 mlx5_flow_aging_check(struct mlx5_dev_ctx_shared *sh,
6985                    struct mlx5_flow_counter_pool *pool)
6986 {
6987         struct mlx5_priv *priv;
6988         struct mlx5_flow_counter *cnt;
6989         struct mlx5_age_info *age_info;
6990         struct mlx5_age_param *age_param;
6991         struct mlx5_counter_stats_raw *cur = pool->raw_hw;
6992         struct mlx5_counter_stats_raw *prev = pool->raw;
6993         const uint64_t curr_time = MLX5_CURR_TIME_SEC;
6994         const uint32_t time_delta = curr_time - pool->time_of_last_age_check;
6995         uint16_t expected = AGE_CANDIDATE;
6996         uint32_t i;
6997
6998         pool->time_of_last_age_check = curr_time;
6999         for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
7000                 cnt = MLX5_POOL_GET_CNT(pool, i);
7001                 age_param = MLX5_CNT_TO_AGE(cnt);
7002                 if (__atomic_load_n(&age_param->state,
7003                                     __ATOMIC_RELAXED) != AGE_CANDIDATE)
7004                         continue;
7005                 if (cur->data[i].hits != prev->data[i].hits) {
7006                         __atomic_store_n(&age_param->sec_since_last_hit, 0,
7007                                          __ATOMIC_RELAXED);
7008                         continue;
7009                 }
7010                 if (__atomic_add_fetch(&age_param->sec_since_last_hit,
7011                                        time_delta,
7012                                        __ATOMIC_RELAXED) <= age_param->timeout)
7013                         continue;
7014                 /**
7015                  * Hold the lock first, or if between the
7016                  * state AGE_TMOUT and tailq operation the
7017                  * release happened, the release procedure
7018                  * may delete a non-existent tailq node.
7019                  */
7020                 priv = rte_eth_devices[age_param->port_id].data->dev_private;
7021                 age_info = GET_PORT_AGE_INFO(priv);
7022                 rte_spinlock_lock(&age_info->aged_sl);
7023                 if (__atomic_compare_exchange_n(&age_param->state, &expected,
7024                                                 AGE_TMOUT, false,
7025                                                 __ATOMIC_RELAXED,
7026                                                 __ATOMIC_RELAXED)) {
7027                         TAILQ_INSERT_TAIL(&age_info->aged_counters, cnt, next);
7028                         MLX5_AGE_SET(age_info, MLX5_AGE_EVENT_NEW);
7029                 }
7030                 rte_spinlock_unlock(&age_info->aged_sl);
7031         }
7032         mlx5_age_event_prepare(sh);
7033 }
7034
7035 /**
7036  * Handler for the HW respond about ready values from an asynchronous batch
7037  * query. This function is probably called by the host thread.
7038  *
7039  * @param[in] sh
7040  *   The pointer to the shared device context.
7041  * @param[in] async_id
7042  *   The Devx async ID.
7043  * @param[in] status
7044  *   The status of the completion.
7045  */
7046 void
7047 mlx5_flow_async_pool_query_handle(struct mlx5_dev_ctx_shared *sh,
7048                                   uint64_t async_id, int status)
7049 {
7050         struct mlx5_flow_counter_pool *pool =
7051                 (struct mlx5_flow_counter_pool *)(uintptr_t)async_id;
7052         struct mlx5_counter_stats_raw *raw_to_free;
7053         uint8_t query_gen = pool->query_gen ^ 1;
7054         struct mlx5_flow_counter_mng *cmng = &sh->cmng;
7055         enum mlx5_counter_type cnt_type =
7056                 pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
7057                                 MLX5_COUNTER_TYPE_ORIGIN;
7058
7059         if (unlikely(status)) {
7060                 raw_to_free = pool->raw_hw;
7061         } else {
7062                 raw_to_free = pool->raw;
7063                 if (pool->is_aged)
7064                         mlx5_flow_aging_check(sh, pool);
7065                 rte_spinlock_lock(&pool->sl);
7066                 pool->raw = pool->raw_hw;
7067                 rte_spinlock_unlock(&pool->sl);
7068                 /* Be sure the new raw counters data is updated in memory. */
7069                 rte_io_wmb();
7070                 if (!TAILQ_EMPTY(&pool->counters[query_gen])) {
7071                         rte_spinlock_lock(&cmng->csl[cnt_type]);
7072                         TAILQ_CONCAT(&cmng->counters[cnt_type],
7073                                      &pool->counters[query_gen], next);
7074                         rte_spinlock_unlock(&cmng->csl[cnt_type]);
7075                 }
7076         }
7077         LIST_INSERT_HEAD(&sh->cmng.free_stat_raws, raw_to_free, next);
7078         pool->raw_hw = NULL;
7079         sh->cmng.pending_queries--;
7080 }
7081
7082 static int
7083 flow_group_to_table(uint32_t port_id, uint32_t group, uint32_t *table,
7084                     const struct flow_grp_info *grp_info,
7085                     struct rte_flow_error *error)
7086 {
7087         if (grp_info->transfer && grp_info->external &&
7088             grp_info->fdb_def_rule) {
7089                 if (group == UINT32_MAX)
7090                         return rte_flow_error_set
7091                                                 (error, EINVAL,
7092                                                  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
7093                                                  NULL,
7094                                                  "group index not supported");
7095                 *table = group + 1;
7096         } else {
7097                 *table = group;
7098         }
7099         DRV_LOG(DEBUG, "port %u group=%#x table=%#x", port_id, group, *table);
7100         return 0;
7101 }
7102
7103 /**
7104  * Translate the rte_flow group index to HW table value.
7105  *
7106  * If tunnel offload is disabled, all group ids converted to flow table
7107  * id using the standard method.
7108  * If tunnel offload is enabled, group id can be converted using the
7109  * standard or tunnel conversion method. Group conversion method
7110  * selection depends on flags in `grp_info` parameter:
7111  * - Internal (grp_info.external == 0) groups conversion uses the
7112  *   standard method.
7113  * - Group ids in JUMP action converted with the tunnel conversion.
7114  * - Group id in rule attribute conversion depends on a rule type and
7115  *   group id value:
7116  *   ** non zero group attributes converted with the tunnel method
7117  *   ** zero group attribute in non-tunnel rule is converted using the
7118  *      standard method - there's only one root table
7119  *   ** zero group attribute in steer tunnel rule is converted with the
7120  *      standard method - single root table
7121  *   ** zero group attribute in match tunnel rule is a special OvS
7122  *      case: that value is used for portability reasons. That group
7123  *      id is converted with the tunnel conversion method.
7124  *
7125  * @param[in] dev
7126  *   Port device
7127  * @param[in] tunnel
7128  *   PMD tunnel offload object
7129  * @param[in] group
7130  *   rte_flow group index value.
7131  * @param[out] table
7132  *   HW table value.
7133  * @param[in] grp_info
7134  *   flags used for conversion
7135  * @param[out] error
7136  *   Pointer to error structure.
7137  *
7138  * @return
7139  *   0 on success, a negative errno value otherwise and rte_errno is set.
7140  */
7141 int
7142 mlx5_flow_group_to_table(struct rte_eth_dev *dev,
7143                          const struct mlx5_flow_tunnel *tunnel,
7144                          uint32_t group, uint32_t *table,
7145                          const struct flow_grp_info *grp_info,
7146                          struct rte_flow_error *error)
7147 {
7148         int ret;
7149         bool standard_translation;
7150
7151         if (!grp_info->skip_scale && grp_info->external &&
7152             group < MLX5_MAX_TABLES_EXTERNAL)
7153                 group *= MLX5_FLOW_TABLE_FACTOR;
7154         if (is_tunnel_offload_active(dev)) {
7155                 standard_translation = !grp_info->external ||
7156                                         grp_info->std_tbl_fix;
7157         } else {
7158                 standard_translation = true;
7159         }
7160         DRV_LOG(DEBUG,
7161                 "port %u group=%u transfer=%d external=%d fdb_def_rule=%d translate=%s",
7162                 dev->data->port_id, group, grp_info->transfer,
7163                 grp_info->external, grp_info->fdb_def_rule,
7164                 standard_translation ? "STANDARD" : "TUNNEL");
7165         if (standard_translation)
7166                 ret = flow_group_to_table(dev->data->port_id, group, table,
7167                                           grp_info, error);
7168         else
7169                 ret = tunnel_flow_group_to_flow_table(dev, tunnel, group,
7170                                                       table, error);
7171
7172         return ret;
7173 }
7174
7175 /**
7176  * Discover availability of metadata reg_c's.
7177  *
7178  * Iteratively use test flows to check availability.
7179  *
7180  * @param[in] dev
7181  *   Pointer to the Ethernet device structure.
7182  *
7183  * @return
7184  *   0 on success, a negative errno value otherwise and rte_errno is set.
7185  */
7186 int
7187 mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev)
7188 {
7189         struct mlx5_priv *priv = dev->data->dev_private;
7190         struct mlx5_dev_config *config = &priv->config;
7191         enum modify_reg idx;
7192         int n = 0;
7193
7194         /* reg_c[0] and reg_c[1] are reserved. */
7195         config->flow_mreg_c[n++] = REG_C_0;
7196         config->flow_mreg_c[n++] = REG_C_1;
7197         /* Discover availability of other reg_c's. */
7198         for (idx = REG_C_2; idx <= REG_C_7; ++idx) {
7199                 struct rte_flow_attr attr = {
7200                         .group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
7201                         .priority = MLX5_FLOW_LOWEST_PRIO_INDICATOR,
7202                         .ingress = 1,
7203                 };
7204                 struct rte_flow_item items[] = {
7205                         [0] = {
7206                                 .type = RTE_FLOW_ITEM_TYPE_END,
7207                         },
7208                 };
7209                 struct rte_flow_action actions[] = {
7210                         [0] = {
7211                                 .type = (enum rte_flow_action_type)
7212                                         MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
7213                                 .conf = &(struct mlx5_flow_action_copy_mreg){
7214                                         .src = REG_C_1,
7215                                         .dst = idx,
7216                                 },
7217                         },
7218                         [1] = {
7219                                 .type = RTE_FLOW_ACTION_TYPE_JUMP,
7220                                 .conf = &(struct rte_flow_action_jump){
7221                                         .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP,
7222                                 },
7223                         },
7224                         [2] = {
7225                                 .type = RTE_FLOW_ACTION_TYPE_END,
7226                         },
7227                 };
7228                 uint32_t flow_idx;
7229                 struct rte_flow *flow;
7230                 struct rte_flow_error error;
7231
7232                 if (!config->dv_flow_en)
7233                         break;
7234                 /* Create internal flow, validation skips copy action. */
7235                 flow_idx = flow_list_create(dev, NULL, &attr, items,
7236                                             actions, false, &error);
7237                 flow = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW],
7238                                       flow_idx);
7239                 if (!flow)
7240                         continue;
7241                 config->flow_mreg_c[n++] = idx;
7242                 flow_list_destroy(dev, NULL, flow_idx);
7243         }
7244         for (; n < MLX5_MREG_C_NUM; ++n)
7245                 config->flow_mreg_c[n] = REG_NON;
7246         return 0;
7247 }
7248
7249 /**
7250  * Dump flow raw hw data to file
7251  *
7252  * @param[in] dev
7253  *    The pointer to Ethernet device.
7254  * @param[in] file
7255  *   A pointer to a file for output.
7256  * @param[out] error
7257  *   Perform verbose error reporting if not NULL. PMDs initialize this
7258  *   structure in case of error only.
7259  * @return
7260  *   0 on success, a nagative value otherwise.
7261  */
7262 int
7263 mlx5_flow_dev_dump(struct rte_eth_dev *dev, struct rte_flow *flow_idx,
7264                    FILE *file,
7265                    struct rte_flow_error *error __rte_unused)
7266 {
7267         struct mlx5_priv *priv = dev->data->dev_private;
7268         struct mlx5_dev_ctx_shared *sh = priv->sh;
7269         uint32_t handle_idx;
7270         int ret;
7271         struct mlx5_flow_handle *dh;
7272         struct rte_flow *flow;
7273
7274         if (!priv->config.dv_flow_en) {
7275                 if (fputs("device dv flow disabled\n", file) <= 0)
7276                         return -errno;
7277                 return -ENOTSUP;
7278         }
7279
7280         /* dump all */
7281         if (!flow_idx)
7282                 return mlx5_devx_cmd_flow_dump(sh->fdb_domain,
7283                                         sh->rx_domain,
7284                                         sh->tx_domain, file);
7285         /* dump one */
7286         flow = mlx5_ipool_get(priv->sh->ipool
7287                         [MLX5_IPOOL_RTE_FLOW], (uintptr_t)(void *)flow_idx);
7288         if (!flow)
7289                 return -ENOENT;
7290
7291         handle_idx = flow->dev_handles;
7292         while (handle_idx) {
7293                 dh = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW],
7294                                 handle_idx);
7295                 if (!dh)
7296                         return -ENOENT;
7297                 if (dh->drv_flow) {
7298                         ret = mlx5_devx_cmd_flow_single_dump(dh->drv_flow,
7299                                         file);
7300                         if (ret)
7301                                 return -ENOENT;
7302                 }
7303                 handle_idx = dh->next.next;
7304         }
7305         return 0;
7306 }
7307
7308 /**
7309  * Get aged-out flows.
7310  *
7311  * @param[in] dev
7312  *   Pointer to the Ethernet device structure.
7313  * @param[in] context
7314  *   The address of an array of pointers to the aged-out flows contexts.
7315  * @param[in] nb_countexts
7316  *   The length of context array pointers.
7317  * @param[out] error
7318  *   Perform verbose error reporting if not NULL. Initialized in case of
7319  *   error only.
7320  *
7321  * @return
7322  *   how many contexts get in success, otherwise negative errno value.
7323  *   if nb_contexts is 0, return the amount of all aged contexts.
7324  *   if nb_contexts is not 0 , return the amount of aged flows reported
7325  *   in the context array.
7326  */
7327 int
7328 mlx5_flow_get_aged_flows(struct rte_eth_dev *dev, void **contexts,
7329                         uint32_t nb_contexts, struct rte_flow_error *error)
7330 {
7331         const struct mlx5_flow_driver_ops *fops;
7332         struct rte_flow_attr attr = { .transfer = 0 };
7333
7334         if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
7335                 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
7336                 return fops->get_aged_flows(dev, contexts, nb_contexts,
7337                                                     error);
7338         }
7339         DRV_LOG(ERR,
7340                 "port %u get aged flows is not supported.",
7341                  dev->data->port_id);
7342         return -ENOTSUP;
7343 }
7344
7345 /* Wrapper for driver action_validate op callback */
7346 static int
7347 flow_drv_action_validate(struct rte_eth_dev *dev,
7348                          const struct rte_flow_indir_action_conf *conf,
7349                          const struct rte_flow_action *action,
7350                          const struct mlx5_flow_driver_ops *fops,
7351                          struct rte_flow_error *error)
7352 {
7353         static const char err_msg[] = "indirect action validation unsupported";
7354
7355         if (!fops->action_validate) {
7356                 DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
7357                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
7358                                    NULL, err_msg);
7359                 return -rte_errno;
7360         }
7361         return fops->action_validate(dev, conf, action, error);
7362 }
7363
7364 /**
7365  * Destroys the shared action by handle.
7366  *
7367  * @param dev
7368  *   Pointer to Ethernet device structure.
7369  * @param[in] handle
7370  *   Handle for the indirect action object to be destroyed.
7371  * @param[out] error
7372  *   Perform verbose error reporting if not NULL. PMDs initialize this
7373  *   structure in case of error only.
7374  *
7375  * @return
7376  *   0 on success, a negative errno value otherwise and rte_errno is set.
7377  *
7378  * @note: wrapper for driver action_create op callback.
7379  */
7380 static int
7381 mlx5_action_handle_destroy(struct rte_eth_dev *dev,
7382                            struct rte_flow_action_handle *handle,
7383                            struct rte_flow_error *error)
7384 {
7385         static const char err_msg[] = "indirect action destruction unsupported";
7386         struct rte_flow_attr attr = { .transfer = 0 };
7387         const struct mlx5_flow_driver_ops *fops =
7388                         flow_get_drv_ops(flow_get_drv_type(dev, &attr));
7389
7390         if (!fops->action_destroy) {
7391                 DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
7392                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
7393                                    NULL, err_msg);
7394                 return -rte_errno;
7395         }
7396         return fops->action_destroy(dev, handle, error);
7397 }
7398
7399 /* Wrapper for driver action_destroy op callback */
7400 static int
7401 flow_drv_action_update(struct rte_eth_dev *dev,
7402                        struct rte_flow_action_handle *handle,
7403                        const void *update,
7404                        const struct mlx5_flow_driver_ops *fops,
7405                        struct rte_flow_error *error)
7406 {
7407         static const char err_msg[] = "indirect action update unsupported";
7408
7409         if (!fops->action_update) {
7410                 DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
7411                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
7412                                    NULL, err_msg);
7413                 return -rte_errno;
7414         }
7415         return fops->action_update(dev, handle, update, error);
7416 }
7417
7418 /* Wrapper for driver action_destroy op callback */
7419 static int
7420 flow_drv_action_query(struct rte_eth_dev *dev,
7421                       const struct rte_flow_action_handle *handle,
7422                       void *data,
7423                       const struct mlx5_flow_driver_ops *fops,
7424                       struct rte_flow_error *error)
7425 {
7426         static const char err_msg[] = "indirect action query unsupported";
7427
7428         if (!fops->action_query) {
7429                 DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
7430                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
7431                                    NULL, err_msg);
7432                 return -rte_errno;
7433         }
7434         return fops->action_query(dev, handle, data, error);
7435 }
7436
7437 /**
7438  * Create indirect action for reuse in multiple flow rules.
7439  *
7440  * @param dev
7441  *   Pointer to Ethernet device structure.
7442  * @param conf
7443  *   Pointer to indirect action object configuration.
7444  * @param[in] action
7445  *   Action configuration for indirect action object creation.
7446  * @param[out] error
7447  *   Perform verbose error reporting if not NULL. PMDs initialize this
7448  *   structure in case of error only.
7449  * @return
7450  *   A valid handle in case of success, NULL otherwise and rte_errno is set.
7451  */
7452 static struct rte_flow_action_handle *
7453 mlx5_action_handle_create(struct rte_eth_dev *dev,
7454                           const struct rte_flow_indir_action_conf *conf,
7455                           const struct rte_flow_action *action,
7456                           struct rte_flow_error *error)
7457 {
7458         static const char err_msg[] = "indirect action creation unsupported";
7459         struct rte_flow_attr attr = { .transfer = 0 };
7460         const struct mlx5_flow_driver_ops *fops =
7461                         flow_get_drv_ops(flow_get_drv_type(dev, &attr));
7462
7463         if (flow_drv_action_validate(dev, conf, action, fops, error))
7464                 return NULL;
7465         if (!fops->action_create) {
7466                 DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
7467                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
7468                                    NULL, err_msg);
7469                 return NULL;
7470         }
7471         return fops->action_create(dev, conf, action, error);
7472 }
7473
7474 /**
7475  * Updates inplace the indirect action configuration pointed by *handle*
7476  * with the configuration provided as *update* argument.
7477  * The update of the indirect action configuration effects all flow rules
7478  * reusing the action via handle.
7479  *
7480  * @param dev
7481  *   Pointer to Ethernet device structure.
7482  * @param[in] handle
7483  *   Handle for the indirect action to be updated.
7484  * @param[in] update
7485  *   Action specification used to modify the action pointed by handle.
7486  *   *update* could be of same type with the action pointed by the *handle*
7487  *   handle argument, or some other structures like a wrapper, depending on
7488  *   the indirect action type.
7489  * @param[out] error
7490  *   Perform verbose error reporting if not NULL. PMDs initialize this
7491  *   structure in case of error only.
7492  *
7493  * @return
7494  *   0 on success, a negative errno value otherwise and rte_errno is set.
7495  */
7496 static int
7497 mlx5_action_handle_update(struct rte_eth_dev *dev,
7498                 struct rte_flow_action_handle *handle,
7499                 const void *update,
7500                 struct rte_flow_error *error)
7501 {
7502         struct rte_flow_attr attr = { .transfer = 0 };
7503         const struct mlx5_flow_driver_ops *fops =
7504                         flow_get_drv_ops(flow_get_drv_type(dev, &attr));
7505         int ret;
7506
7507         ret = flow_drv_action_validate(dev, NULL,
7508                         (const struct rte_flow_action *)update, fops, error);
7509         if (ret)
7510                 return ret;
7511         return flow_drv_action_update(dev, handle, update, fops,
7512                                       error);
7513 }
7514
7515 /**
7516  * Query the indirect action by handle.
7517  *
7518  * This function allows retrieving action-specific data such as counters.
7519  * Data is gathered by special action which may be present/referenced in
7520  * more than one flow rule definition.
7521  *
7522  * see @RTE_FLOW_ACTION_TYPE_COUNT
7523  *
7524  * @param dev
7525  *   Pointer to Ethernet device structure.
7526  * @param[in] handle
7527  *   Handle for the indirect action to query.
7528  * @param[in, out] data
7529  *   Pointer to storage for the associated query data type.
7530  * @param[out] error
7531  *   Perform verbose error reporting if not NULL. PMDs initialize this
7532  *   structure in case of error only.
7533  *
7534  * @return
7535  *   0 on success, a negative errno value otherwise and rte_errno is set.
7536  */
7537 static int
7538 mlx5_action_handle_query(struct rte_eth_dev *dev,
7539                          const struct rte_flow_action_handle *handle,
7540                          void *data,
7541                          struct rte_flow_error *error)
7542 {
7543         struct rte_flow_attr attr = { .transfer = 0 };
7544         const struct mlx5_flow_driver_ops *fops =
7545                         flow_get_drv_ops(flow_get_drv_type(dev, &attr));
7546
7547         return flow_drv_action_query(dev, handle, data, fops, error);
7548 }
7549
7550 /**
7551  * Destroy all indirect actions (shared RSS).
7552  *
7553  * @param dev
7554  *   Pointer to Ethernet device.
7555  *
7556  * @return
7557  *   0 on success, a negative errno value otherwise and rte_errno is set.
7558  */
7559 int
7560 mlx5_action_handle_flush(struct rte_eth_dev *dev)
7561 {
7562         struct rte_flow_error error;
7563         struct mlx5_priv *priv = dev->data->dev_private;
7564         struct mlx5_shared_action_rss *shared_rss;
7565         int ret = 0;
7566         uint32_t idx;
7567
7568         ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
7569                       priv->rss_shared_actions, idx, shared_rss, next) {
7570                 ret |= mlx5_action_handle_destroy(dev,
7571                        (struct rte_flow_action_handle *)(uintptr_t)idx, &error);
7572         }
7573         return ret;
7574 }
7575
7576 #ifndef HAVE_MLX5DV_DR
7577 #define MLX5_DOMAIN_SYNC_FLOW ((1 << 0) | (1 << 1))
7578 #else
7579 #define MLX5_DOMAIN_SYNC_FLOW \
7580         (MLX5DV_DR_DOMAIN_SYNC_FLAGS_SW | MLX5DV_DR_DOMAIN_SYNC_FLAGS_HW)
7581 #endif
7582
7583 int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains)
7584 {
7585         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
7586         const struct mlx5_flow_driver_ops *fops;
7587         int ret;
7588         struct rte_flow_attr attr = { .transfer = 0 };
7589
7590         fops = flow_get_drv_ops(flow_get_drv_type(dev, &attr));
7591         ret = fops->sync_domain(dev, domains, MLX5_DOMAIN_SYNC_FLOW);
7592         if (ret > 0)
7593                 ret = -ret;
7594         return ret;
7595 }
7596
7597 /**
7598  * tunnel offload functionalilty is defined for DV environment only
7599  */
7600 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
7601 __extension__
7602 union tunnel_offload_mark {
7603         uint32_t val;
7604         struct {
7605                 uint32_t app_reserve:8;
7606                 uint32_t table_id:15;
7607                 uint32_t transfer:1;
7608                 uint32_t _unused_:8;
7609         };
7610 };
7611
7612 static bool
7613 mlx5_access_tunnel_offload_db
7614         (struct rte_eth_dev *dev,
7615          bool (*match)(struct rte_eth_dev *,
7616                        struct mlx5_flow_tunnel *, const void *),
7617          void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *),
7618          void (*miss)(struct rte_eth_dev *, void *),
7619          void *ctx, bool lock_op);
7620
7621 static int
7622 flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
7623                              struct rte_flow *flow,
7624                              const struct rte_flow_attr *attr,
7625                              const struct rte_flow_action *app_actions,
7626                              uint32_t flow_idx,
7627                              struct tunnel_default_miss_ctx *ctx,
7628                              struct rte_flow_error *error)
7629 {
7630         struct mlx5_priv *priv = dev->data->dev_private;
7631         struct mlx5_flow *dev_flow;
7632         struct rte_flow_attr miss_attr = *attr;
7633         const struct mlx5_flow_tunnel *tunnel = app_actions[0].conf;
7634         const struct rte_flow_item miss_items[2] = {
7635                 {
7636                         .type = RTE_FLOW_ITEM_TYPE_ETH,
7637                         .spec = NULL,
7638                         .last = NULL,
7639                         .mask = NULL
7640                 },
7641                 {
7642                         .type = RTE_FLOW_ITEM_TYPE_END,
7643                         .spec = NULL,
7644                         .last = NULL,
7645                         .mask = NULL
7646                 }
7647         };
7648         union tunnel_offload_mark mark_id;
7649         struct rte_flow_action_mark miss_mark;
7650         struct rte_flow_action miss_actions[3] = {
7651                 [0] = { .type = RTE_FLOW_ACTION_TYPE_MARK, .conf = &miss_mark },
7652                 [2] = { .type = RTE_FLOW_ACTION_TYPE_END,  .conf = NULL }
7653         };
7654         const struct rte_flow_action_jump *jump_data;
7655         uint32_t i, flow_table = 0; /* prevent compilation warning */
7656         struct flow_grp_info grp_info = {
7657                 .external = 1,
7658                 .transfer = attr->transfer,
7659                 .fdb_def_rule = !!priv->fdb_def_rule,
7660                 .std_tbl_fix = 0,
7661         };
7662         int ret;
7663
7664         if (!attr->transfer) {
7665                 uint32_t q_size;
7666
7667                 miss_actions[1].type = RTE_FLOW_ACTION_TYPE_RSS;
7668                 q_size = priv->reta_idx_n * sizeof(ctx->queue[0]);
7669                 ctx->queue = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, q_size,
7670                                          0, SOCKET_ID_ANY);
7671                 if (!ctx->queue)
7672                         return rte_flow_error_set
7673                                 (error, ENOMEM,
7674                                 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
7675                                 NULL, "invalid default miss RSS");
7676                 ctx->action_rss.func = RTE_ETH_HASH_FUNCTION_DEFAULT,
7677                 ctx->action_rss.level = 0,
7678                 ctx->action_rss.types = priv->rss_conf.rss_hf,
7679                 ctx->action_rss.key_len = priv->rss_conf.rss_key_len,
7680                 ctx->action_rss.queue_num = priv->reta_idx_n,
7681                 ctx->action_rss.key = priv->rss_conf.rss_key,
7682                 ctx->action_rss.queue = ctx->queue;
7683                 if (!priv->reta_idx_n || !priv->rxqs_n)
7684                         return rte_flow_error_set
7685                                 (error, EINVAL,
7686                                 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
7687                                 NULL, "invalid port configuration");
7688                 if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
7689                         ctx->action_rss.types = 0;
7690                 for (i = 0; i != priv->reta_idx_n; ++i)
7691                         ctx->queue[i] = (*priv->reta_idx)[i];
7692         } else {
7693                 miss_actions[1].type = RTE_FLOW_ACTION_TYPE_JUMP;
7694                 ctx->miss_jump.group = MLX5_TNL_MISS_FDB_JUMP_GRP;
7695         }
7696         miss_actions[1].conf = (typeof(miss_actions[1].conf))ctx->raw;
7697         for (; app_actions->type != RTE_FLOW_ACTION_TYPE_JUMP; app_actions++);
7698         jump_data = app_actions->conf;
7699         miss_attr.priority = MLX5_TNL_MISS_RULE_PRIORITY;
7700         miss_attr.group = jump_data->group;
7701         ret = mlx5_flow_group_to_table(dev, tunnel, jump_data->group,
7702                                        &flow_table, &grp_info, error);
7703         if (ret)
7704                 return rte_flow_error_set(error, EINVAL,
7705                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
7706                                           NULL, "invalid tunnel id");
7707         mark_id.app_reserve = 0;
7708         mark_id.table_id = tunnel_flow_tbl_to_id(flow_table);
7709         mark_id.transfer = !!attr->transfer;
7710         mark_id._unused_ = 0;
7711         miss_mark.id = mark_id.val;
7712         dev_flow = flow_drv_prepare(dev, flow, &miss_attr,
7713                                     miss_items, miss_actions, flow_idx, error);
7714         if (!dev_flow)
7715                 return -rte_errno;
7716         dev_flow->flow = flow;
7717         dev_flow->external = true;
7718         dev_flow->tunnel = tunnel;
7719         /* Subflow object was created, we must include one in the list. */
7720         SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
7721                       dev_flow->handle, next);
7722         DRV_LOG(DEBUG,
7723                 "port %u tunnel type=%d id=%u miss rule priority=%u group=%u",
7724                 dev->data->port_id, tunnel->app_tunnel.type,
7725                 tunnel->tunnel_id, miss_attr.priority, miss_attr.group);
7726         ret = flow_drv_translate(dev, dev_flow, &miss_attr, miss_items,
7727                                   miss_actions, error);
7728         if (!ret)
7729                 ret = flow_mreg_update_copy_table(dev, flow, miss_actions,
7730                                                   error);
7731
7732         return ret;
7733 }
7734
7735 static const struct mlx5_flow_tbl_data_entry  *
7736 tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark)
7737 {
7738         struct mlx5_priv *priv = dev->data->dev_private;
7739         struct mlx5_dev_ctx_shared *sh = priv->sh;
7740         struct mlx5_hlist_entry *he;
7741         union tunnel_offload_mark mbits = { .val = mark };
7742         union mlx5_flow_tbl_key table_key = {
7743                 {
7744                         .level = tunnel_id_to_flow_tbl(mbits.table_id),
7745                         .id = 0,
7746                         .reserved = 0,
7747                         .dummy = 0,
7748                         .is_fdb = !!mbits.transfer,
7749                         .is_egress = 0,
7750                 }
7751         };
7752         he = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, NULL);
7753         return he ?
7754                container_of(he, struct mlx5_flow_tbl_data_entry, entry) : NULL;
7755 }
7756
7757 static void
7758 mlx5_flow_tunnel_grp2tbl_remove_cb(struct mlx5_hlist *list,
7759                                    struct mlx5_hlist_entry *entry)
7760 {
7761         struct mlx5_dev_ctx_shared *sh = list->ctx;
7762         struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash);
7763
7764         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
7765                         tunnel_flow_tbl_to_id(tte->flow_table));
7766         mlx5_free(tte);
7767 }
7768
7769 static int
7770 mlx5_flow_tunnel_grp2tbl_match_cb(struct mlx5_hlist *list __rte_unused,
7771                                   struct mlx5_hlist_entry *entry,
7772                                   uint64_t key, void *cb_ctx __rte_unused)
7773 {
7774         union tunnel_tbl_key tbl = {
7775                 .val = key,
7776         };
7777         struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash);
7778
7779         return tbl.tunnel_id != tte->tunnel_id || tbl.group != tte->group;
7780 }
7781
7782 static struct mlx5_hlist_entry *
7783 mlx5_flow_tunnel_grp2tbl_create_cb(struct mlx5_hlist *list, uint64_t key,
7784                                    void *ctx __rte_unused)
7785 {
7786         struct mlx5_dev_ctx_shared *sh = list->ctx;
7787         struct tunnel_tbl_entry *tte;
7788         union tunnel_tbl_key tbl = {
7789                 .val = key,
7790         };
7791
7792         tte = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO,
7793                           sizeof(*tte), 0,
7794                           SOCKET_ID_ANY);
7795         if (!tte)
7796                 goto err;
7797         mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
7798                           &tte->flow_table);
7799         if (tte->flow_table >= MLX5_MAX_TABLES) {
7800                 DRV_LOG(ERR, "Tunnel TBL ID %d exceed max limit.",
7801                         tte->flow_table);
7802                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
7803                                 tte->flow_table);
7804                 goto err;
7805         } else if (!tte->flow_table) {
7806                 goto err;
7807         }
7808         tte->flow_table = tunnel_id_to_flow_tbl(tte->flow_table);
7809         tte->tunnel_id = tbl.tunnel_id;
7810         tte->group = tbl.group;
7811         return &tte->hash;
7812 err:
7813         if (tte)
7814                 mlx5_free(tte);
7815         return NULL;
7816 }
7817
7818 static uint32_t
7819 tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev,
7820                                 const struct mlx5_flow_tunnel *tunnel,
7821                                 uint32_t group, uint32_t *table,
7822                                 struct rte_flow_error *error)
7823 {
7824         struct mlx5_hlist_entry *he;
7825         struct tunnel_tbl_entry *tte;
7826         union tunnel_tbl_key key = {
7827                 .tunnel_id = tunnel ? tunnel->tunnel_id : 0,
7828                 .group = group
7829         };
7830         struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
7831         struct mlx5_hlist *group_hash;
7832
7833         group_hash = tunnel ? tunnel->groups : thub->groups;
7834         he = mlx5_hlist_register(group_hash, key.val, NULL);
7835         if (!he)
7836                 return rte_flow_error_set(error, EINVAL,
7837                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
7838                                           NULL,
7839                                           "tunnel group index not supported");
7840         tte = container_of(he, typeof(*tte), hash);
7841         *table = tte->flow_table;
7842         DRV_LOG(DEBUG, "port %u tunnel %u group=%#x table=%#x",
7843                 dev->data->port_id, key.tunnel_id, group, *table);
7844         return 0;
7845 }
7846
7847 static void
7848 mlx5_flow_tunnel_free(struct rte_eth_dev *dev,
7849                       struct mlx5_flow_tunnel *tunnel)
7850 {
7851         struct mlx5_priv *priv = dev->data->dev_private;
7852         struct mlx5_indexed_pool *ipool;
7853
7854         DRV_LOG(DEBUG, "port %u release pmd tunnel id=0x%x",
7855                 dev->data->port_id, tunnel->tunnel_id);
7856         LIST_REMOVE(tunnel, chain);
7857         mlx5_hlist_destroy(tunnel->groups);
7858         ipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID];
7859         mlx5_ipool_free(ipool, tunnel->tunnel_id);
7860 }
7861
7862 static bool
7863 mlx5_access_tunnel_offload_db
7864         (struct rte_eth_dev *dev,
7865          bool (*match)(struct rte_eth_dev *,
7866                        struct mlx5_flow_tunnel *, const void *),
7867          void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *),
7868          void (*miss)(struct rte_eth_dev *, void *),
7869          void *ctx, bool lock_op)
7870 {
7871         bool verdict = false;
7872         struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
7873         struct mlx5_flow_tunnel *tunnel;
7874
7875         rte_spinlock_lock(&thub->sl);
7876         LIST_FOREACH(tunnel, &thub->tunnels, chain) {
7877                 verdict = match(dev, tunnel, (const void *)ctx);
7878                 if (verdict)
7879                         break;
7880         }
7881         if (!lock_op)
7882                 rte_spinlock_unlock(&thub->sl);
7883         if (verdict && hit)
7884                 hit(dev, tunnel, ctx);
7885         if (!verdict && miss)
7886                 miss(dev, ctx);
7887         if (lock_op)
7888                 rte_spinlock_unlock(&thub->sl);
7889
7890         return verdict;
7891 }
7892
7893 struct tunnel_db_find_tunnel_id_ctx {
7894         uint32_t tunnel_id;
7895         struct mlx5_flow_tunnel *tunnel;
7896 };
7897
7898 static bool
7899 find_tunnel_id_match(struct rte_eth_dev *dev,
7900                      struct mlx5_flow_tunnel *tunnel, const void *x)
7901 {
7902         const struct tunnel_db_find_tunnel_id_ctx *ctx = x;
7903
7904         RTE_SET_USED(dev);
7905         return tunnel->tunnel_id == ctx->tunnel_id;
7906 }
7907
7908 static void
7909 find_tunnel_id_hit(struct rte_eth_dev *dev,
7910                    struct mlx5_flow_tunnel *tunnel, void *x)
7911 {
7912         struct tunnel_db_find_tunnel_id_ctx *ctx = x;
7913         RTE_SET_USED(dev);
7914         ctx->tunnel = tunnel;
7915 }
7916
7917 static struct mlx5_flow_tunnel *
7918 mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id)
7919 {
7920         struct tunnel_db_find_tunnel_id_ctx ctx = {
7921                 .tunnel_id = id,
7922         };
7923
7924         mlx5_access_tunnel_offload_db(dev, find_tunnel_id_match,
7925                                       find_tunnel_id_hit, NULL, &ctx, true);
7926
7927         return ctx.tunnel;
7928 }
7929
7930 static struct mlx5_flow_tunnel *
7931 mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev,
7932                           const struct rte_flow_tunnel *app_tunnel)
7933 {
7934         struct mlx5_priv *priv = dev->data->dev_private;
7935         struct mlx5_indexed_pool *ipool;
7936         struct mlx5_flow_tunnel *tunnel;
7937         uint32_t id;
7938
7939         ipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID];
7940         tunnel = mlx5_ipool_zmalloc(ipool, &id);
7941         if (!tunnel)
7942                 return NULL;
7943         if (id >= MLX5_MAX_TUNNELS) {
7944                 mlx5_ipool_free(ipool, id);
7945                 DRV_LOG(ERR, "Tunnel ID %d exceed max limit.", id);
7946                 return NULL;
7947         }
7948         tunnel->groups = mlx5_hlist_create("tunnel groups", 1024, 0, 0,
7949                                            mlx5_flow_tunnel_grp2tbl_create_cb,
7950                                            mlx5_flow_tunnel_grp2tbl_match_cb,
7951                                            mlx5_flow_tunnel_grp2tbl_remove_cb);
7952         if (!tunnel->groups) {
7953                 mlx5_ipool_free(ipool, id);
7954                 return NULL;
7955         }
7956         tunnel->groups->ctx = priv->sh;
7957         /* initiate new PMD tunnel */
7958         memcpy(&tunnel->app_tunnel, app_tunnel, sizeof(*app_tunnel));
7959         tunnel->tunnel_id = id;
7960         tunnel->action.type = (typeof(tunnel->action.type))
7961                               MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET;
7962         tunnel->action.conf = tunnel;
7963         tunnel->item.type = (typeof(tunnel->item.type))
7964                             MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL;
7965         tunnel->item.spec = tunnel;
7966         tunnel->item.last = NULL;
7967         tunnel->item.mask = NULL;
7968
7969         DRV_LOG(DEBUG, "port %u new pmd tunnel id=0x%x",
7970                 dev->data->port_id, tunnel->tunnel_id);
7971
7972         return tunnel;
7973 }
7974
7975 struct tunnel_db_get_tunnel_ctx {
7976         const struct rte_flow_tunnel *app_tunnel;
7977         struct mlx5_flow_tunnel *tunnel;
7978 };
7979
7980 static bool get_tunnel_match(struct rte_eth_dev *dev,
7981                              struct mlx5_flow_tunnel *tunnel, const void *x)
7982 {
7983         const struct tunnel_db_get_tunnel_ctx *ctx = x;
7984
7985         RTE_SET_USED(dev);
7986         return !memcmp(ctx->app_tunnel, &tunnel->app_tunnel,
7987                        sizeof(*ctx->app_tunnel));
7988 }
7989
7990 static void get_tunnel_hit(struct rte_eth_dev *dev,
7991                            struct mlx5_flow_tunnel *tunnel, void *x)
7992 {
7993         /* called under tunnel spinlock protection */
7994         struct tunnel_db_get_tunnel_ctx *ctx = x;
7995
7996         RTE_SET_USED(dev);
7997         tunnel->refctn++;
7998         ctx->tunnel = tunnel;
7999 }
8000
8001 static void get_tunnel_miss(struct rte_eth_dev *dev, void *x)
8002 {
8003         /* called under tunnel spinlock protection */
8004         struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
8005         struct tunnel_db_get_tunnel_ctx *ctx = x;
8006
8007         rte_spinlock_unlock(&thub->sl);
8008         ctx->tunnel = mlx5_flow_tunnel_allocate(dev, ctx->app_tunnel);
8009         rte_spinlock_lock(&thub->sl);
8010         if (ctx->tunnel) {
8011                 ctx->tunnel->refctn = 1;
8012                 LIST_INSERT_HEAD(&thub->tunnels, ctx->tunnel, chain);
8013         }
8014 }
8015
8016
8017 static int
8018 mlx5_get_flow_tunnel(struct rte_eth_dev *dev,
8019                      const struct rte_flow_tunnel *app_tunnel,
8020                      struct mlx5_flow_tunnel **tunnel)
8021 {
8022         struct tunnel_db_get_tunnel_ctx ctx = {
8023                 .app_tunnel = app_tunnel,
8024         };
8025
8026         mlx5_access_tunnel_offload_db(dev, get_tunnel_match, get_tunnel_hit,
8027                                       get_tunnel_miss, &ctx, true);
8028         *tunnel = ctx.tunnel;
8029         return ctx.tunnel ? 0 : -ENOMEM;
8030 }
8031
8032 void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id)
8033 {
8034         struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
8035
8036         if (!thub)
8037                 return;
8038         if (!LIST_EMPTY(&thub->tunnels))
8039                 DRV_LOG(WARNING, "port %u tunnels present", port_id);
8040         mlx5_hlist_destroy(thub->groups);
8041         mlx5_free(thub);
8042 }
8043
8044 int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh)
8045 {
8046         int err;
8047         struct mlx5_flow_tunnel_hub *thub;
8048
8049         thub = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, sizeof(*thub),
8050                            0, SOCKET_ID_ANY);
8051         if (!thub)
8052                 return -ENOMEM;
8053         LIST_INIT(&thub->tunnels);
8054         rte_spinlock_init(&thub->sl);
8055         thub->groups = mlx5_hlist_create("flow groups",
8056                                          rte_align32pow2(MLX5_MAX_TABLES), 0,
8057                                          0, mlx5_flow_tunnel_grp2tbl_create_cb,
8058                                          mlx5_flow_tunnel_grp2tbl_match_cb,
8059                                          mlx5_flow_tunnel_grp2tbl_remove_cb);
8060         if (!thub->groups) {
8061                 err = -rte_errno;
8062                 goto err;
8063         }
8064         thub->groups->ctx = sh;
8065         sh->tunnel_hub = thub;
8066
8067         return 0;
8068
8069 err:
8070         if (thub->groups)
8071                 mlx5_hlist_destroy(thub->groups);
8072         if (thub)
8073                 mlx5_free(thub);
8074         return err;
8075 }
8076
8077 static inline bool
8078 mlx5_flow_tunnel_validate(struct rte_eth_dev *dev,
8079                           struct rte_flow_tunnel *tunnel,
8080                           const char *err_msg)
8081 {
8082         err_msg = NULL;
8083         if (!is_tunnel_offload_active(dev)) {
8084                 err_msg = "tunnel offload was not activated";
8085                 goto out;
8086         } else if (!tunnel) {
8087                 err_msg = "no application tunnel";
8088                 goto out;
8089         }
8090
8091         switch (tunnel->type) {
8092         default:
8093                 err_msg = "unsupported tunnel type";
8094                 goto out;
8095         case RTE_FLOW_ITEM_TYPE_VXLAN:
8096                 break;
8097         }
8098
8099 out:
8100         return !err_msg;
8101 }
8102
8103 static int
8104 mlx5_flow_tunnel_decap_set(struct rte_eth_dev *dev,
8105                     struct rte_flow_tunnel *app_tunnel,
8106                     struct rte_flow_action **actions,
8107                     uint32_t *num_of_actions,
8108                     struct rte_flow_error *error)
8109 {
8110         int ret;
8111         struct mlx5_flow_tunnel *tunnel;
8112         const char *err_msg = NULL;
8113         bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);
8114
8115         if (!verdict)
8116                 return rte_flow_error_set(error, EINVAL,
8117                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
8118                                           err_msg);
8119         ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
8120         if (ret < 0) {
8121                 return rte_flow_error_set(error, ret,
8122                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
8123                                           "failed to initialize pmd tunnel");
8124         }
8125         *actions = &tunnel->action;
8126         *num_of_actions = 1;
8127         return 0;
8128 }
8129
8130 static int
8131 mlx5_flow_tunnel_match(struct rte_eth_dev *dev,
8132                        struct rte_flow_tunnel *app_tunnel,
8133                        struct rte_flow_item **items,
8134                        uint32_t *num_of_items,
8135                        struct rte_flow_error *error)
8136 {
8137         int ret;
8138         struct mlx5_flow_tunnel *tunnel;
8139         const char *err_msg = NULL;
8140         bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);
8141
8142         if (!verdict)
8143                 return rte_flow_error_set(error, EINVAL,
8144                                           RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
8145                                           err_msg);
8146         ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
8147         if (ret < 0) {
8148                 return rte_flow_error_set(error, ret,
8149                                           RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
8150                                           "failed to initialize pmd tunnel");
8151         }
8152         *items = &tunnel->item;
8153         *num_of_items = 1;
8154         return 0;
8155 }
8156
8157 struct tunnel_db_element_release_ctx {
8158         struct rte_flow_item *items;
8159         struct rte_flow_action *actions;
8160         uint32_t num_elements;
8161         struct rte_flow_error *error;
8162         int ret;
8163 };
8164
8165 static bool
8166 tunnel_element_release_match(struct rte_eth_dev *dev,
8167                              struct mlx5_flow_tunnel *tunnel, const void *x)
8168 {
8169         const struct tunnel_db_element_release_ctx *ctx = x;
8170
8171         RTE_SET_USED(dev);
8172         if (ctx->num_elements != 1)
8173                 return false;
8174         else if (ctx->items)
8175                 return ctx->items == &tunnel->item;
8176         else if (ctx->actions)
8177                 return ctx->actions == &tunnel->action;
8178
8179         return false;
8180 }
8181
8182 static void
8183 tunnel_element_release_hit(struct rte_eth_dev *dev,
8184                            struct mlx5_flow_tunnel *tunnel, void *x)
8185 {
8186         struct tunnel_db_element_release_ctx *ctx = x;
8187         ctx->ret = 0;
8188         if (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED))
8189                 mlx5_flow_tunnel_free(dev, tunnel);
8190 }
8191
8192 static void
8193 tunnel_element_release_miss(struct rte_eth_dev *dev, void *x)
8194 {
8195         struct tunnel_db_element_release_ctx *ctx = x;
8196         RTE_SET_USED(dev);
8197         ctx->ret = rte_flow_error_set(ctx->error, EINVAL,
8198                                       RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
8199                                       "invalid argument");
8200 }
8201
8202 static int
8203 mlx5_flow_tunnel_item_release(struct rte_eth_dev *dev,
8204                        struct rte_flow_item *pmd_items,
8205                        uint32_t num_items, struct rte_flow_error *err)
8206 {
8207         struct tunnel_db_element_release_ctx ctx = {
8208                 .items = pmd_items,
8209                 .actions = NULL,
8210                 .num_elements = num_items,
8211                 .error = err,
8212         };
8213
8214         mlx5_access_tunnel_offload_db(dev, tunnel_element_release_match,
8215                                       tunnel_element_release_hit,
8216                                       tunnel_element_release_miss, &ctx, false);
8217
8218         return ctx.ret;
8219 }
8220
8221 static int
8222 mlx5_flow_tunnel_action_release(struct rte_eth_dev *dev,
8223                          struct rte_flow_action *pmd_actions,
8224                          uint32_t num_actions, struct rte_flow_error *err)
8225 {
8226         struct tunnel_db_element_release_ctx ctx = {
8227                 .items = NULL,
8228                 .actions = pmd_actions,
8229                 .num_elements = num_actions,
8230                 .error = err,
8231         };
8232
8233         mlx5_access_tunnel_offload_db(dev, tunnel_element_release_match,
8234                                       tunnel_element_release_hit,
8235                                       tunnel_element_release_miss, &ctx, false);
8236
8237         return ctx.ret;
8238 }
8239
8240 static int
8241 mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev,
8242                                   struct rte_mbuf *m,
8243                                   struct rte_flow_restore_info *info,
8244                                   struct rte_flow_error *err)
8245 {
8246         uint64_t ol_flags = m->ol_flags;
8247         const struct mlx5_flow_tbl_data_entry *tble;
8248         const uint64_t mask = PKT_RX_FDIR | PKT_RX_FDIR_ID;
8249
8250         if (!is_tunnel_offload_active(dev)) {
8251                 info->flags = 0;
8252                 return 0;
8253         }
8254
8255         if ((ol_flags & mask) != mask)
8256                 goto err;
8257         tble = tunnel_mark_decode(dev, m->hash.fdir.hi);
8258         if (!tble) {
8259                 DRV_LOG(DEBUG, "port %u invalid miss tunnel mark %#x",
8260                         dev->data->port_id, m->hash.fdir.hi);
8261                 goto err;
8262         }
8263         MLX5_ASSERT(tble->tunnel);
8264         memcpy(&info->tunnel, &tble->tunnel->app_tunnel, sizeof(info->tunnel));
8265         info->group_id = tble->group_id;
8266         info->flags = RTE_FLOW_RESTORE_INFO_TUNNEL |
8267                       RTE_FLOW_RESTORE_INFO_GROUP_ID |
8268                       RTE_FLOW_RESTORE_INFO_ENCAPSULATED;
8269
8270         return 0;
8271
8272 err:
8273         return rte_flow_error_set(err, EINVAL,
8274                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8275                                   "failed to get restore info");
8276 }
8277
8278 #else /* HAVE_IBV_FLOW_DV_SUPPORT */
8279 static int
8280 mlx5_flow_tunnel_decap_set(__rte_unused struct rte_eth_dev *dev,
8281                            __rte_unused struct rte_flow_tunnel *app_tunnel,
8282                            __rte_unused struct rte_flow_action **actions,
8283                            __rte_unused uint32_t *num_of_actions,
8284                            __rte_unused struct rte_flow_error *error)
8285 {
8286         return -ENOTSUP;
8287 }
8288
8289 static int
8290 mlx5_flow_tunnel_match(__rte_unused struct rte_eth_dev *dev,
8291                        __rte_unused struct rte_flow_tunnel *app_tunnel,
8292                        __rte_unused struct rte_flow_item **items,
8293                        __rte_unused uint32_t *num_of_items,
8294                        __rte_unused struct rte_flow_error *error)
8295 {
8296         return -ENOTSUP;
8297 }
8298
8299 static int
8300 mlx5_flow_tunnel_item_release(__rte_unused struct rte_eth_dev *dev,
8301                               __rte_unused struct rte_flow_item *pmd_items,
8302                               __rte_unused uint32_t num_items,
8303                               __rte_unused struct rte_flow_error *err)
8304 {
8305         return -ENOTSUP;
8306 }
8307
8308 static int
8309 mlx5_flow_tunnel_action_release(__rte_unused struct rte_eth_dev *dev,
8310                                 __rte_unused struct rte_flow_action *pmd_action,
8311                                 __rte_unused uint32_t num_actions,
8312                                 __rte_unused struct rte_flow_error *err)
8313 {
8314         return -ENOTSUP;
8315 }
8316
8317 static int
8318 mlx5_flow_tunnel_get_restore_info(__rte_unused struct rte_eth_dev *dev,
8319                                   __rte_unused struct rte_mbuf *m,
8320                                   __rte_unused struct rte_flow_restore_info *i,
8321                                   __rte_unused struct rte_flow_error *err)
8322 {
8323         return -ENOTSUP;
8324 }
8325
8326 static int
8327 flow_tunnel_add_default_miss(__rte_unused struct rte_eth_dev *dev,
8328                              __rte_unused struct rte_flow *flow,
8329                              __rte_unused const struct rte_flow_attr *attr,
8330                              __rte_unused const struct rte_flow_action *actions,
8331                              __rte_unused uint32_t flow_idx,
8332                              __rte_unused struct tunnel_default_miss_ctx *ctx,
8333                              __rte_unused struct rte_flow_error *error)
8334 {
8335         return -ENOTSUP;
8336 }
8337
8338 static struct mlx5_flow_tunnel *
8339 mlx5_find_tunnel_id(__rte_unused struct rte_eth_dev *dev,
8340                     __rte_unused uint32_t id)
8341 {
8342         return NULL;
8343 }
8344
8345 static void
8346 mlx5_flow_tunnel_free(__rte_unused struct rte_eth_dev *dev,
8347                       __rte_unused struct mlx5_flow_tunnel *tunnel)
8348 {
8349 }
8350
8351 static uint32_t
8352 tunnel_flow_group_to_flow_table(__rte_unused struct rte_eth_dev *dev,
8353                                 __rte_unused const struct mlx5_flow_tunnel *t,
8354                                 __rte_unused uint32_t group,
8355                                 __rte_unused uint32_t *table,
8356                                 struct rte_flow_error *error)
8357 {
8358         return rte_flow_error_set(error, ENOTSUP,
8359                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8360                                   "tunnel offload requires DV support");
8361 }
8362
8363 void
8364 mlx5_release_tunnel_hub(__rte_unused struct mlx5_dev_ctx_shared *sh,
8365                         __rte_unused  uint16_t port_id)
8366 {
8367 }
8368 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */