net/mlx5: support E-Switch mirroring and jump in one flow
[dpdk.git] / drivers / net / mlx5 / mlx5_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  * Copyright 2016 Mellanox Technologies, Ltd
4  */
5
6 #include <netinet/in.h>
7 #include <sys/queue.h>
8 #include <stdalign.h>
9 #include <stdint.h>
10 #include <string.h>
11 #include <stdbool.h>
12
13 #include <rte_common.h>
14 #include <rte_ether.h>
15 #include <ethdev_driver.h>
16 #include <rte_eal_paging.h>
17 #include <rte_flow.h>
18 #include <rte_cycles.h>
19 #include <rte_flow_driver.h>
20 #include <rte_malloc.h>
21 #include <rte_ip.h>
22
23 #include <mlx5_glue.h>
24 #include <mlx5_devx_cmds.h>
25 #include <mlx5_prm.h>
26 #include <mlx5_malloc.h>
27
28 #include "mlx5_defs.h"
29 #include "mlx5.h"
30 #include "mlx5_flow.h"
31 #include "mlx5_flow_os.h"
32 #include "mlx5_rxtx.h"
33 #include "mlx5_common_os.h"
34 #include "rte_pmd_mlx5.h"
35
36 struct tunnel_default_miss_ctx {
37         uint16_t *queue;
38         __extension__
39         union {
40                 struct rte_flow_action_rss action_rss;
41                 struct rte_flow_action_queue miss_queue;
42                 struct rte_flow_action_jump miss_jump;
43                 uint8_t raw[0];
44         };
45 };
46
47 static int
48 flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
49                              struct rte_flow *flow,
50                              const struct rte_flow_attr *attr,
51                              const struct rte_flow_action *app_actions,
52                              uint32_t flow_idx,
53                              struct tunnel_default_miss_ctx *ctx,
54                              struct rte_flow_error *error);
55 static struct mlx5_flow_tunnel *
56 mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id);
57 static void
58 mlx5_flow_tunnel_free(struct rte_eth_dev *dev, struct mlx5_flow_tunnel *tunnel);
59 static uint32_t
60 tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev,
61                                 const struct mlx5_flow_tunnel *tunnel,
62                                 uint32_t group, uint32_t *table,
63                                 struct rte_flow_error *error);
64
65 static struct mlx5_flow_workspace *mlx5_flow_push_thread_workspace(void);
66 static void mlx5_flow_pop_thread_workspace(void);
67
68
69 /** Device flow drivers. */
70 extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops;
71
72 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops;
73
74 const struct mlx5_flow_driver_ops *flow_drv_ops[] = {
75         [MLX5_FLOW_TYPE_MIN] = &mlx5_flow_null_drv_ops,
76 #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
77         [MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops,
78 #endif
79         [MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops,
80         [MLX5_FLOW_TYPE_MAX] = &mlx5_flow_null_drv_ops
81 };
82
83 /** Helper macro to build input graph for mlx5_flow_expand_rss(). */
84 #define MLX5_FLOW_EXPAND_RSS_NEXT(...) \
85         (const int []){ \
86                 __VA_ARGS__, 0, \
87         }
88
89 /** Node object of input graph for mlx5_flow_expand_rss(). */
90 struct mlx5_flow_expand_node {
91         const int *const next;
92         /**<
93          * List of next node indexes. Index 0 is interpreted as a terminator.
94          */
95         const enum rte_flow_item_type type;
96         /**< Pattern item type of current node. */
97         uint64_t rss_types;
98         /**<
99          * RSS types bit-field associated with this node
100          * (see ETH_RSS_* definitions).
101          */
102 };
103
104 /** Object returned by mlx5_flow_expand_rss(). */
105 struct mlx5_flow_expand_rss {
106         uint32_t entries;
107         /**< Number of entries @p patterns and @p priorities. */
108         struct {
109                 struct rte_flow_item *pattern; /**< Expanded pattern array. */
110                 uint32_t priority; /**< Priority offset for each expansion. */
111         } entry[];
112 };
113
114 static enum rte_flow_item_type
115 mlx5_flow_expand_rss_item_complete(const struct rte_flow_item *item)
116 {
117         enum rte_flow_item_type ret = RTE_FLOW_ITEM_TYPE_VOID;
118         uint16_t ether_type = 0;
119         uint16_t ether_type_m;
120         uint8_t ip_next_proto = 0;
121         uint8_t ip_next_proto_m;
122
123         if (item == NULL || item->spec == NULL)
124                 return ret;
125         switch (item->type) {
126         case RTE_FLOW_ITEM_TYPE_ETH:
127                 if (item->mask)
128                         ether_type_m = ((const struct rte_flow_item_eth *)
129                                                 (item->mask))->type;
130                 else
131                         ether_type_m = rte_flow_item_eth_mask.type;
132                 if (ether_type_m != RTE_BE16(0xFFFF))
133                         break;
134                 ether_type = ((const struct rte_flow_item_eth *)
135                                 (item->spec))->type;
136                 if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4)
137                         ret = RTE_FLOW_ITEM_TYPE_IPV4;
138                 else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6)
139                         ret = RTE_FLOW_ITEM_TYPE_IPV6;
140                 else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN)
141                         ret = RTE_FLOW_ITEM_TYPE_VLAN;
142                 else
143                         ret = RTE_FLOW_ITEM_TYPE_END;
144                 break;
145         case RTE_FLOW_ITEM_TYPE_VLAN:
146                 if (item->mask)
147                         ether_type_m = ((const struct rte_flow_item_vlan *)
148                                                 (item->mask))->inner_type;
149                 else
150                         ether_type_m = rte_flow_item_vlan_mask.inner_type;
151                 if (ether_type_m != RTE_BE16(0xFFFF))
152                         break;
153                 ether_type = ((const struct rte_flow_item_vlan *)
154                                 (item->spec))->inner_type;
155                 if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4)
156                         ret = RTE_FLOW_ITEM_TYPE_IPV4;
157                 else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6)
158                         ret = RTE_FLOW_ITEM_TYPE_IPV6;
159                 else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN)
160                         ret = RTE_FLOW_ITEM_TYPE_VLAN;
161                 else
162                         ret = RTE_FLOW_ITEM_TYPE_END;
163                 break;
164         case RTE_FLOW_ITEM_TYPE_IPV4:
165                 if (item->mask)
166                         ip_next_proto_m = ((const struct rte_flow_item_ipv4 *)
167                                         (item->mask))->hdr.next_proto_id;
168                 else
169                         ip_next_proto_m =
170                                 rte_flow_item_ipv4_mask.hdr.next_proto_id;
171                 if (ip_next_proto_m != 0xFF)
172                         break;
173                 ip_next_proto = ((const struct rte_flow_item_ipv4 *)
174                                 (item->spec))->hdr.next_proto_id;
175                 if (ip_next_proto == IPPROTO_UDP)
176                         ret = RTE_FLOW_ITEM_TYPE_UDP;
177                 else if (ip_next_proto == IPPROTO_TCP)
178                         ret = RTE_FLOW_ITEM_TYPE_TCP;
179                 else if (ip_next_proto == IPPROTO_IP)
180                         ret = RTE_FLOW_ITEM_TYPE_IPV4;
181                 else if (ip_next_proto == IPPROTO_IPV6)
182                         ret = RTE_FLOW_ITEM_TYPE_IPV6;
183                 else
184                         ret = RTE_FLOW_ITEM_TYPE_END;
185                 break;
186         case RTE_FLOW_ITEM_TYPE_IPV6:
187                 if (item->mask)
188                         ip_next_proto_m = ((const struct rte_flow_item_ipv6 *)
189                                                 (item->mask))->hdr.proto;
190                 else
191                         ip_next_proto_m =
192                                 rte_flow_item_ipv6_mask.hdr.proto;
193                 if (ip_next_proto_m != 0xFF)
194                         break;
195                 ip_next_proto = ((const struct rte_flow_item_ipv6 *)
196                                 (item->spec))->hdr.proto;
197                 if (ip_next_proto == IPPROTO_UDP)
198                         ret = RTE_FLOW_ITEM_TYPE_UDP;
199                 else if (ip_next_proto == IPPROTO_TCP)
200                         ret = RTE_FLOW_ITEM_TYPE_TCP;
201                 else if (ip_next_proto == IPPROTO_IP)
202                         ret = RTE_FLOW_ITEM_TYPE_IPV4;
203                 else if (ip_next_proto == IPPROTO_IPV6)
204                         ret = RTE_FLOW_ITEM_TYPE_IPV6;
205                 else
206                         ret = RTE_FLOW_ITEM_TYPE_END;
207                 break;
208         default:
209                 ret = RTE_FLOW_ITEM_TYPE_VOID;
210                 break;
211         }
212         return ret;
213 }
214
215 #define MLX5_RSS_EXP_ELT_N 8
216
217 /**
218  * Expand RSS flows into several possible flows according to the RSS hash
219  * fields requested and the driver capabilities.
220  *
221  * @param[out] buf
222  *   Buffer to store the result expansion.
223  * @param[in] size
224  *   Buffer size in bytes. If 0, @p buf can be NULL.
225  * @param[in] pattern
226  *   User flow pattern.
227  * @param[in] types
228  *   RSS types to expand (see ETH_RSS_* definitions).
229  * @param[in] graph
230  *   Input graph to expand @p pattern according to @p types.
231  * @param[in] graph_root_index
232  *   Index of root node in @p graph, typically 0.
233  *
234  * @return
235  *   A positive value representing the size of @p buf in bytes regardless of
236  *   @p size on success, a negative errno value otherwise and rte_errno is
237  *   set, the following errors are defined:
238  *
239  *   -E2BIG: graph-depth @p graph is too deep.
240  */
241 static int
242 mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size,
243                      const struct rte_flow_item *pattern, uint64_t types,
244                      const struct mlx5_flow_expand_node graph[],
245                      int graph_root_index)
246 {
247         const struct rte_flow_item *item;
248         const struct mlx5_flow_expand_node *node = &graph[graph_root_index];
249         const int *next_node;
250         const int *stack[MLX5_RSS_EXP_ELT_N];
251         int stack_pos = 0;
252         struct rte_flow_item flow_items[MLX5_RSS_EXP_ELT_N];
253         unsigned int i;
254         size_t lsize;
255         size_t user_pattern_size = 0;
256         void *addr = NULL;
257         const struct mlx5_flow_expand_node *next = NULL;
258         struct rte_flow_item missed_item;
259         int missed = 0;
260         int elt = 0;
261         const struct rte_flow_item *last_item = NULL;
262
263         memset(&missed_item, 0, sizeof(missed_item));
264         lsize = offsetof(struct mlx5_flow_expand_rss, entry) +
265                 MLX5_RSS_EXP_ELT_N * sizeof(buf->entry[0]);
266         if (lsize <= size) {
267                 buf->entry[0].priority = 0;
268                 buf->entry[0].pattern = (void *)&buf->entry[MLX5_RSS_EXP_ELT_N];
269                 buf->entries = 0;
270                 addr = buf->entry[0].pattern;
271         }
272         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
273                 if (item->type != RTE_FLOW_ITEM_TYPE_VOID)
274                         last_item = item;
275                 for (i = 0; node->next && node->next[i]; ++i) {
276                         next = &graph[node->next[i]];
277                         if (next->type == item->type)
278                                 break;
279                 }
280                 if (next)
281                         node = next;
282                 user_pattern_size += sizeof(*item);
283         }
284         user_pattern_size += sizeof(*item); /* Handle END item. */
285         lsize += user_pattern_size;
286         /* Copy the user pattern in the first entry of the buffer. */
287         if (lsize <= size) {
288                 rte_memcpy(addr, pattern, user_pattern_size);
289                 addr = (void *)(((uintptr_t)addr) + user_pattern_size);
290                 buf->entries = 1;
291         }
292         /* Start expanding. */
293         memset(flow_items, 0, sizeof(flow_items));
294         user_pattern_size -= sizeof(*item);
295         /*
296          * Check if the last valid item has spec set, need complete pattern,
297          * and the pattern can be used for expansion.
298          */
299         missed_item.type = mlx5_flow_expand_rss_item_complete(last_item);
300         if (missed_item.type == RTE_FLOW_ITEM_TYPE_END) {
301                 /* Item type END indicates expansion is not required. */
302                 return lsize;
303         }
304         if (missed_item.type != RTE_FLOW_ITEM_TYPE_VOID) {
305                 next = NULL;
306                 missed = 1;
307                 for (i = 0; node->next && node->next[i]; ++i) {
308                         next = &graph[node->next[i]];
309                         if (next->type == missed_item.type) {
310                                 flow_items[0].type = missed_item.type;
311                                 flow_items[1].type = RTE_FLOW_ITEM_TYPE_END;
312                                 break;
313                         }
314                         next = NULL;
315                 }
316         }
317         if (next && missed) {
318                 elt = 2; /* missed item + item end. */
319                 node = next;
320                 lsize += elt * sizeof(*item) + user_pattern_size;
321                 if ((node->rss_types & types) && lsize <= size) {
322                         buf->entry[buf->entries].priority = 1;
323                         buf->entry[buf->entries].pattern = addr;
324                         buf->entries++;
325                         rte_memcpy(addr, buf->entry[0].pattern,
326                                    user_pattern_size);
327                         addr = (void *)(((uintptr_t)addr) + user_pattern_size);
328                         rte_memcpy(addr, flow_items, elt * sizeof(*item));
329                         addr = (void *)(((uintptr_t)addr) +
330                                         elt * sizeof(*item));
331                 }
332         }
333         memset(flow_items, 0, sizeof(flow_items));
334         next_node = node->next;
335         stack[stack_pos] = next_node;
336         node = next_node ? &graph[*next_node] : NULL;
337         while (node) {
338                 flow_items[stack_pos].type = node->type;
339                 if (node->rss_types & types) {
340                         /*
341                          * compute the number of items to copy from the
342                          * expansion and copy it.
343                          * When the stack_pos is 0, there are 1 element in it,
344                          * plus the addition END item.
345                          */
346                         elt = stack_pos + 2;
347                         flow_items[stack_pos + 1].type = RTE_FLOW_ITEM_TYPE_END;
348                         lsize += elt * sizeof(*item) + user_pattern_size;
349                         if (lsize <= size) {
350                                 size_t n = elt * sizeof(*item);
351
352                                 buf->entry[buf->entries].priority =
353                                         stack_pos + 1 + missed;
354                                 buf->entry[buf->entries].pattern = addr;
355                                 buf->entries++;
356                                 rte_memcpy(addr, buf->entry[0].pattern,
357                                            user_pattern_size);
358                                 addr = (void *)(((uintptr_t)addr) +
359                                                 user_pattern_size);
360                                 rte_memcpy(addr, &missed_item,
361                                            missed * sizeof(*item));
362                                 addr = (void *)(((uintptr_t)addr) +
363                                         missed * sizeof(*item));
364                                 rte_memcpy(addr, flow_items, n);
365                                 addr = (void *)(((uintptr_t)addr) + n);
366                         }
367                 }
368                 /* Go deeper. */
369                 if (node->next) {
370                         next_node = node->next;
371                         if (stack_pos++ == MLX5_RSS_EXP_ELT_N) {
372                                 rte_errno = E2BIG;
373                                 return -rte_errno;
374                         }
375                         stack[stack_pos] = next_node;
376                 } else if (*(next_node + 1)) {
377                         /* Follow up with the next possibility. */
378                         ++next_node;
379                 } else {
380                         /* Move to the next path. */
381                         if (stack_pos)
382                                 next_node = stack[--stack_pos];
383                         next_node++;
384                         stack[stack_pos] = next_node;
385                 }
386                 node = *next_node ? &graph[*next_node] : NULL;
387         };
388         /* no expanded flows but we have missed item, create one rule for it */
389         if (buf->entries == 1 && missed != 0) {
390                 elt = 2;
391                 lsize += elt * sizeof(*item) + user_pattern_size;
392                 if (lsize <= size) {
393                         buf->entry[buf->entries].priority = 1;
394                         buf->entry[buf->entries].pattern = addr;
395                         buf->entries++;
396                         flow_items[0].type = missed_item.type;
397                         flow_items[1].type = RTE_FLOW_ITEM_TYPE_END;
398                         rte_memcpy(addr, buf->entry[0].pattern,
399                                    user_pattern_size);
400                         addr = (void *)(((uintptr_t)addr) + user_pattern_size);
401                         rte_memcpy(addr, flow_items, elt * sizeof(*item));
402                 }
403         }
404         return lsize;
405 }
406
407 enum mlx5_expansion {
408         MLX5_EXPANSION_ROOT,
409         MLX5_EXPANSION_ROOT_OUTER,
410         MLX5_EXPANSION_ROOT_ETH_VLAN,
411         MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN,
412         MLX5_EXPANSION_OUTER_ETH,
413         MLX5_EXPANSION_OUTER_ETH_VLAN,
414         MLX5_EXPANSION_OUTER_VLAN,
415         MLX5_EXPANSION_OUTER_IPV4,
416         MLX5_EXPANSION_OUTER_IPV4_UDP,
417         MLX5_EXPANSION_OUTER_IPV4_TCP,
418         MLX5_EXPANSION_OUTER_IPV6,
419         MLX5_EXPANSION_OUTER_IPV6_UDP,
420         MLX5_EXPANSION_OUTER_IPV6_TCP,
421         MLX5_EXPANSION_VXLAN,
422         MLX5_EXPANSION_VXLAN_GPE,
423         MLX5_EXPANSION_GRE,
424         MLX5_EXPANSION_MPLS,
425         MLX5_EXPANSION_ETH,
426         MLX5_EXPANSION_ETH_VLAN,
427         MLX5_EXPANSION_VLAN,
428         MLX5_EXPANSION_IPV4,
429         MLX5_EXPANSION_IPV4_UDP,
430         MLX5_EXPANSION_IPV4_TCP,
431         MLX5_EXPANSION_IPV6,
432         MLX5_EXPANSION_IPV6_UDP,
433         MLX5_EXPANSION_IPV6_TCP,
434 };
435
436 /** Supported expansion of items. */
437 static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
438         [MLX5_EXPANSION_ROOT] = {
439                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
440                                                   MLX5_EXPANSION_IPV4,
441                                                   MLX5_EXPANSION_IPV6),
442                 .type = RTE_FLOW_ITEM_TYPE_END,
443         },
444         [MLX5_EXPANSION_ROOT_OUTER] = {
445                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH,
446                                                   MLX5_EXPANSION_OUTER_IPV4,
447                                                   MLX5_EXPANSION_OUTER_IPV6),
448                 .type = RTE_FLOW_ITEM_TYPE_END,
449         },
450         [MLX5_EXPANSION_ROOT_ETH_VLAN] = {
451                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH_VLAN),
452                 .type = RTE_FLOW_ITEM_TYPE_END,
453         },
454         [MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN] = {
455                 .next = MLX5_FLOW_EXPAND_RSS_NEXT
456                                                 (MLX5_EXPANSION_OUTER_ETH_VLAN),
457                 .type = RTE_FLOW_ITEM_TYPE_END,
458         },
459         [MLX5_EXPANSION_OUTER_ETH] = {
460                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
461                                                   MLX5_EXPANSION_OUTER_IPV6,
462                                                   MLX5_EXPANSION_MPLS),
463                 .type = RTE_FLOW_ITEM_TYPE_ETH,
464                 .rss_types = 0,
465         },
466         [MLX5_EXPANSION_OUTER_ETH_VLAN] = {
467                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_VLAN),
468                 .type = RTE_FLOW_ITEM_TYPE_ETH,
469                 .rss_types = 0,
470         },
471         [MLX5_EXPANSION_OUTER_VLAN] = {
472                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
473                                                   MLX5_EXPANSION_OUTER_IPV6),
474                 .type = RTE_FLOW_ITEM_TYPE_VLAN,
475         },
476         [MLX5_EXPANSION_OUTER_IPV4] = {
477                 .next = MLX5_FLOW_EXPAND_RSS_NEXT
478                         (MLX5_EXPANSION_OUTER_IPV4_UDP,
479                          MLX5_EXPANSION_OUTER_IPV4_TCP,
480                          MLX5_EXPANSION_GRE,
481                          MLX5_EXPANSION_IPV4,
482                          MLX5_EXPANSION_IPV6),
483                 .type = RTE_FLOW_ITEM_TYPE_IPV4,
484                 .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
485                         ETH_RSS_NONFRAG_IPV4_OTHER,
486         },
487         [MLX5_EXPANSION_OUTER_IPV4_UDP] = {
488                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
489                                                   MLX5_EXPANSION_VXLAN_GPE),
490                 .type = RTE_FLOW_ITEM_TYPE_UDP,
491                 .rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
492         },
493         [MLX5_EXPANSION_OUTER_IPV4_TCP] = {
494                 .type = RTE_FLOW_ITEM_TYPE_TCP,
495                 .rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
496         },
497         [MLX5_EXPANSION_OUTER_IPV6] = {
498                 .next = MLX5_FLOW_EXPAND_RSS_NEXT
499                         (MLX5_EXPANSION_OUTER_IPV6_UDP,
500                          MLX5_EXPANSION_OUTER_IPV6_TCP,
501                          MLX5_EXPANSION_IPV4,
502                          MLX5_EXPANSION_IPV6),
503                 .type = RTE_FLOW_ITEM_TYPE_IPV6,
504                 .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
505                         ETH_RSS_NONFRAG_IPV6_OTHER,
506         },
507         [MLX5_EXPANSION_OUTER_IPV6_UDP] = {
508                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
509                                                   MLX5_EXPANSION_VXLAN_GPE),
510                 .type = RTE_FLOW_ITEM_TYPE_UDP,
511                 .rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
512         },
513         [MLX5_EXPANSION_OUTER_IPV6_TCP] = {
514                 .type = RTE_FLOW_ITEM_TYPE_TCP,
515                 .rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
516         },
517         [MLX5_EXPANSION_VXLAN] = {
518                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
519                                                   MLX5_EXPANSION_IPV4,
520                                                   MLX5_EXPANSION_IPV6),
521                 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
522         },
523         [MLX5_EXPANSION_VXLAN_GPE] = {
524                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
525                                                   MLX5_EXPANSION_IPV4,
526                                                   MLX5_EXPANSION_IPV6),
527                 .type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
528         },
529         [MLX5_EXPANSION_GRE] = {
530                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4),
531                 .type = RTE_FLOW_ITEM_TYPE_GRE,
532         },
533         [MLX5_EXPANSION_MPLS] = {
534                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
535                                                   MLX5_EXPANSION_IPV6),
536                 .type = RTE_FLOW_ITEM_TYPE_MPLS,
537         },
538         [MLX5_EXPANSION_ETH] = {
539                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
540                                                   MLX5_EXPANSION_IPV6),
541                 .type = RTE_FLOW_ITEM_TYPE_ETH,
542         },
543         [MLX5_EXPANSION_ETH_VLAN] = {
544                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VLAN),
545                 .type = RTE_FLOW_ITEM_TYPE_ETH,
546         },
547         [MLX5_EXPANSION_VLAN] = {
548                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
549                                                   MLX5_EXPANSION_IPV6),
550                 .type = RTE_FLOW_ITEM_TYPE_VLAN,
551         },
552         [MLX5_EXPANSION_IPV4] = {
553                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP,
554                                                   MLX5_EXPANSION_IPV4_TCP),
555                 .type = RTE_FLOW_ITEM_TYPE_IPV4,
556                 .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
557                         ETH_RSS_NONFRAG_IPV4_OTHER,
558         },
559         [MLX5_EXPANSION_IPV4_UDP] = {
560                 .type = RTE_FLOW_ITEM_TYPE_UDP,
561                 .rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
562         },
563         [MLX5_EXPANSION_IPV4_TCP] = {
564                 .type = RTE_FLOW_ITEM_TYPE_TCP,
565                 .rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
566         },
567         [MLX5_EXPANSION_IPV6] = {
568                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP,
569                                                   MLX5_EXPANSION_IPV6_TCP),
570                 .type = RTE_FLOW_ITEM_TYPE_IPV6,
571                 .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
572                         ETH_RSS_NONFRAG_IPV6_OTHER,
573         },
574         [MLX5_EXPANSION_IPV6_UDP] = {
575                 .type = RTE_FLOW_ITEM_TYPE_UDP,
576                 .rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
577         },
578         [MLX5_EXPANSION_IPV6_TCP] = {
579                 .type = RTE_FLOW_ITEM_TYPE_TCP,
580                 .rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
581         },
582 };
583
584 static struct rte_flow_shared_action *
585 mlx5_shared_action_create(struct rte_eth_dev *dev,
586                           const struct rte_flow_shared_action_conf *conf,
587                           const struct rte_flow_action *action,
588                           struct rte_flow_error *error);
589 static int mlx5_shared_action_destroy
590                                 (struct rte_eth_dev *dev,
591                                  struct rte_flow_shared_action *shared_action,
592                                  struct rte_flow_error *error);
593 static int mlx5_shared_action_update
594                                 (struct rte_eth_dev *dev,
595                                  struct rte_flow_shared_action *shared_action,
596                                  const struct rte_flow_action *action,
597                                  struct rte_flow_error *error);
598 static int mlx5_shared_action_query
599                                 (struct rte_eth_dev *dev,
600                                  const struct rte_flow_shared_action *action,
601                                  void *data,
602                                  struct rte_flow_error *error);
603 static int
604 mlx5_flow_tunnel_decap_set(struct rte_eth_dev *dev,
605                     struct rte_flow_tunnel *app_tunnel,
606                     struct rte_flow_action **actions,
607                     uint32_t *num_of_actions,
608                     struct rte_flow_error *error);
609 static int
610 mlx5_flow_tunnel_match(struct rte_eth_dev *dev,
611                        struct rte_flow_tunnel *app_tunnel,
612                        struct rte_flow_item **items,
613                        uint32_t *num_of_items,
614                        struct rte_flow_error *error);
615 static int
616 mlx5_flow_tunnel_item_release(struct rte_eth_dev *dev,
617                               struct rte_flow_item *pmd_items,
618                               uint32_t num_items, struct rte_flow_error *err);
619 static int
620 mlx5_flow_tunnel_action_release(struct rte_eth_dev *dev,
621                                 struct rte_flow_action *pmd_actions,
622                                 uint32_t num_actions,
623                                 struct rte_flow_error *err);
624 static int
625 mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev,
626                                   struct rte_mbuf *m,
627                                   struct rte_flow_restore_info *info,
628                                   struct rte_flow_error *err);
629
630 static const struct rte_flow_ops mlx5_flow_ops = {
631         .validate = mlx5_flow_validate,
632         .create = mlx5_flow_create,
633         .destroy = mlx5_flow_destroy,
634         .flush = mlx5_flow_flush,
635         .isolate = mlx5_flow_isolate,
636         .query = mlx5_flow_query,
637         .dev_dump = mlx5_flow_dev_dump,
638         .get_aged_flows = mlx5_flow_get_aged_flows,
639         .shared_action_create = mlx5_shared_action_create,
640         .shared_action_destroy = mlx5_shared_action_destroy,
641         .shared_action_update = mlx5_shared_action_update,
642         .shared_action_query = mlx5_shared_action_query,
643         .tunnel_decap_set = mlx5_flow_tunnel_decap_set,
644         .tunnel_match = mlx5_flow_tunnel_match,
645         .tunnel_action_decap_release = mlx5_flow_tunnel_action_release,
646         .tunnel_item_release = mlx5_flow_tunnel_item_release,
647         .get_restore_info = mlx5_flow_tunnel_get_restore_info,
648 };
649
650 /* Tunnel information. */
651 struct mlx5_flow_tunnel_info {
652         uint64_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */
653         uint32_t ptype; /**< Tunnel Ptype (see RTE_PTYPE_*). */
654 };
655
656 static struct mlx5_flow_tunnel_info tunnels_info[] = {
657         {
658                 .tunnel = MLX5_FLOW_LAYER_VXLAN,
659                 .ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP,
660         },
661         {
662                 .tunnel = MLX5_FLOW_LAYER_GENEVE,
663                 .ptype = RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L4_UDP,
664         },
665         {
666                 .tunnel = MLX5_FLOW_LAYER_VXLAN_GPE,
667                 .ptype = RTE_PTYPE_TUNNEL_VXLAN_GPE | RTE_PTYPE_L4_UDP,
668         },
669         {
670                 .tunnel = MLX5_FLOW_LAYER_GRE,
671                 .ptype = RTE_PTYPE_TUNNEL_GRE,
672         },
673         {
674                 .tunnel = MLX5_FLOW_LAYER_MPLS | MLX5_FLOW_LAYER_OUTER_L4_UDP,
675                 .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_UDP | RTE_PTYPE_L4_UDP,
676         },
677         {
678                 .tunnel = MLX5_FLOW_LAYER_MPLS,
679                 .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE,
680         },
681         {
682                 .tunnel = MLX5_FLOW_LAYER_NVGRE,
683                 .ptype = RTE_PTYPE_TUNNEL_NVGRE,
684         },
685         {
686                 .tunnel = MLX5_FLOW_LAYER_IPIP,
687                 .ptype = RTE_PTYPE_TUNNEL_IP,
688         },
689         {
690                 .tunnel = MLX5_FLOW_LAYER_IPV6_ENCAP,
691                 .ptype = RTE_PTYPE_TUNNEL_IP,
692         },
693         {
694                 .tunnel = MLX5_FLOW_LAYER_GTP,
695                 .ptype = RTE_PTYPE_TUNNEL_GTPU,
696         },
697 };
698
699
700
701 /**
702  * Translate tag ID to register.
703  *
704  * @param[in] dev
705  *   Pointer to the Ethernet device structure.
706  * @param[in] feature
707  *   The feature that request the register.
708  * @param[in] id
709  *   The request register ID.
710  * @param[out] error
711  *   Error description in case of any.
712  *
713  * @return
714  *   The request register on success, a negative errno
715  *   value otherwise and rte_errno is set.
716  */
717 int
718 mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
719                      enum mlx5_feature_name feature,
720                      uint32_t id,
721                      struct rte_flow_error *error)
722 {
723         struct mlx5_priv *priv = dev->data->dev_private;
724         struct mlx5_dev_config *config = &priv->config;
725         enum modify_reg start_reg;
726         bool skip_mtr_reg = false;
727
728         switch (feature) {
729         case MLX5_HAIRPIN_RX:
730                 return REG_B;
731         case MLX5_HAIRPIN_TX:
732                 return REG_A;
733         case MLX5_METADATA_RX:
734                 switch (config->dv_xmeta_en) {
735                 case MLX5_XMETA_MODE_LEGACY:
736                         return REG_B;
737                 case MLX5_XMETA_MODE_META16:
738                         return REG_C_0;
739                 case MLX5_XMETA_MODE_META32:
740                         return REG_C_1;
741                 }
742                 break;
743         case MLX5_METADATA_TX:
744                 return REG_A;
745         case MLX5_METADATA_FDB:
746                 switch (config->dv_xmeta_en) {
747                 case MLX5_XMETA_MODE_LEGACY:
748                         return REG_NON;
749                 case MLX5_XMETA_MODE_META16:
750                         return REG_C_0;
751                 case MLX5_XMETA_MODE_META32:
752                         return REG_C_1;
753                 }
754                 break;
755         case MLX5_FLOW_MARK:
756                 switch (config->dv_xmeta_en) {
757                 case MLX5_XMETA_MODE_LEGACY:
758                         return REG_NON;
759                 case MLX5_XMETA_MODE_META16:
760                         return REG_C_1;
761                 case MLX5_XMETA_MODE_META32:
762                         return REG_C_0;
763                 }
764                 break;
765         case MLX5_MTR_SFX:
766                 /*
767                  * If meter color and flow match share one register, flow match
768                  * should use the meter color register for match.
769                  */
770                 if (priv->mtr_reg_share)
771                         return priv->mtr_color_reg;
772                 else
773                         return priv->mtr_color_reg != REG_C_2 ? REG_C_2 :
774                                REG_C_3;
775         case MLX5_MTR_COLOR:
776         case MLX5_ASO_FLOW_HIT: /* Both features use the same REG_C. */
777                 MLX5_ASSERT(priv->mtr_color_reg != REG_NON);
778                 return priv->mtr_color_reg;
779         case MLX5_COPY_MARK:
780                 /*
781                  * Metadata COPY_MARK register using is in meter suffix sub
782                  * flow while with meter. It's safe to share the same register.
783                  */
784                 return priv->mtr_color_reg != REG_C_2 ? REG_C_2 : REG_C_3;
785         case MLX5_APP_TAG:
786                 /*
787                  * If meter is enable, it will engage the register for color
788                  * match and flow match. If meter color match is not using the
789                  * REG_C_2, need to skip the REG_C_x be used by meter color
790                  * match.
791                  * If meter is disable, free to use all available registers.
792                  */
793                 start_reg = priv->mtr_color_reg != REG_C_2 ? REG_C_2 :
794                             (priv->mtr_reg_share ? REG_C_3 : REG_C_4);
795                 skip_mtr_reg = !!(priv->mtr_en && start_reg == REG_C_2);
796                 if (id > (uint32_t)(REG_C_7 - start_reg))
797                         return rte_flow_error_set(error, EINVAL,
798                                                   RTE_FLOW_ERROR_TYPE_ITEM,
799                                                   NULL, "invalid tag id");
800                 if (config->flow_mreg_c[id + start_reg - REG_C_0] == REG_NON)
801                         return rte_flow_error_set(error, ENOTSUP,
802                                                   RTE_FLOW_ERROR_TYPE_ITEM,
803                                                   NULL, "unsupported tag id");
804                 /*
805                  * This case means meter is using the REG_C_x great than 2.
806                  * Take care not to conflict with meter color REG_C_x.
807                  * If the available index REG_C_y >= REG_C_x, skip the
808                  * color register.
809                  */
810                 if (skip_mtr_reg && config->flow_mreg_c
811                     [id + start_reg - REG_C_0] >= priv->mtr_color_reg) {
812                         if (id >= (uint32_t)(REG_C_7 - start_reg))
813                                 return rte_flow_error_set(error, EINVAL,
814                                                        RTE_FLOW_ERROR_TYPE_ITEM,
815                                                         NULL, "invalid tag id");
816                         if (config->flow_mreg_c
817                             [id + 1 + start_reg - REG_C_0] != REG_NON)
818                                 return config->flow_mreg_c
819                                                [id + 1 + start_reg - REG_C_0];
820                         return rte_flow_error_set(error, ENOTSUP,
821                                                   RTE_FLOW_ERROR_TYPE_ITEM,
822                                                   NULL, "unsupported tag id");
823                 }
824                 return config->flow_mreg_c[id + start_reg - REG_C_0];
825         }
826         MLX5_ASSERT(false);
827         return rte_flow_error_set(error, EINVAL,
828                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
829                                   NULL, "invalid feature name");
830 }
831
832 /**
833  * Check extensive flow metadata register support.
834  *
835  * @param dev
836  *   Pointer to rte_eth_dev structure.
837  *
838  * @return
839  *   True if device supports extensive flow metadata register, otherwise false.
840  */
841 bool
842 mlx5_flow_ext_mreg_supported(struct rte_eth_dev *dev)
843 {
844         struct mlx5_priv *priv = dev->data->dev_private;
845         struct mlx5_dev_config *config = &priv->config;
846
847         /*
848          * Having available reg_c can be regarded inclusively as supporting
849          * extensive flow metadata register, which could mean,
850          * - metadata register copy action by modify header.
851          * - 16 modify header actions is supported.
852          * - reg_c's are preserved across different domain (FDB and NIC) on
853          *   packet loopback by flow lookup miss.
854          */
855         return config->flow_mreg_c[2] != REG_NON;
856 }
857
858 /**
859  * Verify the @p item specifications (spec, last, mask) are compatible with the
860  * NIC capabilities.
861  *
862  * @param[in] item
863  *   Item specification.
864  * @param[in] mask
865  *   @p item->mask or flow default bit-masks.
866  * @param[in] nic_mask
867  *   Bit-masks covering supported fields by the NIC to compare with user mask.
868  * @param[in] size
869  *   Bit-masks size in bytes.
870  * @param[in] range_accepted
871  *   True if range of values is accepted for specific fields, false otherwise.
872  * @param[out] error
873  *   Pointer to error structure.
874  *
875  * @return
876  *   0 on success, a negative errno value otherwise and rte_errno is set.
877  */
878 int
879 mlx5_flow_item_acceptable(const struct rte_flow_item *item,
880                           const uint8_t *mask,
881                           const uint8_t *nic_mask,
882                           unsigned int size,
883                           bool range_accepted,
884                           struct rte_flow_error *error)
885 {
886         unsigned int i;
887
888         MLX5_ASSERT(nic_mask);
889         for (i = 0; i < size; ++i)
890                 if ((nic_mask[i] | mask[i]) != nic_mask[i])
891                         return rte_flow_error_set(error, ENOTSUP,
892                                                   RTE_FLOW_ERROR_TYPE_ITEM,
893                                                   item,
894                                                   "mask enables non supported"
895                                                   " bits");
896         if (!item->spec && (item->mask || item->last))
897                 return rte_flow_error_set(error, EINVAL,
898                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
899                                           "mask/last without a spec is not"
900                                           " supported");
901         if (item->spec && item->last && !range_accepted) {
902                 uint8_t spec[size];
903                 uint8_t last[size];
904                 unsigned int i;
905                 int ret;
906
907                 for (i = 0; i < size; ++i) {
908                         spec[i] = ((const uint8_t *)item->spec)[i] & mask[i];
909                         last[i] = ((const uint8_t *)item->last)[i] & mask[i];
910                 }
911                 ret = memcmp(spec, last, size);
912                 if (ret != 0)
913                         return rte_flow_error_set(error, EINVAL,
914                                                   RTE_FLOW_ERROR_TYPE_ITEM,
915                                                   item,
916                                                   "range is not valid");
917         }
918         return 0;
919 }
920
921 /**
922  * Adjust the hash fields according to the @p flow information.
923  *
924  * @param[in] dev_flow.
925  *   Pointer to the mlx5_flow.
926  * @param[in] tunnel
927  *   1 when the hash field is for a tunnel item.
928  * @param[in] layer_types
929  *   ETH_RSS_* types.
930  * @param[in] hash_fields
931  *   Item hash fields.
932  *
933  * @return
934  *   The hash fields that should be used.
935  */
936 uint64_t
937 mlx5_flow_hashfields_adjust(struct mlx5_flow_rss_desc *rss_desc,
938                             int tunnel __rte_unused, uint64_t layer_types,
939                             uint64_t hash_fields)
940 {
941 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
942         int rss_request_inner = rss_desc->level >= 2;
943
944         /* Check RSS hash level for tunnel. */
945         if (tunnel && rss_request_inner)
946                 hash_fields |= IBV_RX_HASH_INNER;
947         else if (tunnel || rss_request_inner)
948                 return 0;
949 #endif
950         /* Check if requested layer matches RSS hash fields. */
951         if (!(rss_desc->types & layer_types))
952                 return 0;
953         return hash_fields;
954 }
955
956 /**
957  * Lookup and set the ptype in the data Rx part.  A single Ptype can be used,
958  * if several tunnel rules are used on this queue, the tunnel ptype will be
959  * cleared.
960  *
961  * @param rxq_ctrl
962  *   Rx queue to update.
963  */
964 static void
965 flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl)
966 {
967         unsigned int i;
968         uint32_t tunnel_ptype = 0;
969
970         /* Look up for the ptype to use. */
971         for (i = 0; i != MLX5_FLOW_TUNNEL; ++i) {
972                 if (!rxq_ctrl->flow_tunnels_n[i])
973                         continue;
974                 if (!tunnel_ptype) {
975                         tunnel_ptype = tunnels_info[i].ptype;
976                 } else {
977                         tunnel_ptype = 0;
978                         break;
979                 }
980         }
981         rxq_ctrl->rxq.tunnel = tunnel_ptype;
982 }
983
984 /**
985  * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the devive
986  * flow.
987  *
988  * @param[in] dev
989  *   Pointer to the Ethernet device structure.
990  * @param[in] dev_handle
991  *   Pointer to device flow handle structure.
992  */
993 static void
994 flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
995                        struct mlx5_flow_handle *dev_handle)
996 {
997         struct mlx5_priv *priv = dev->data->dev_private;
998         const int mark = dev_handle->mark;
999         const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
1000         struct mlx5_ind_table_obj *ind_tbl = NULL;
1001         unsigned int i;
1002
1003         if (dev_handle->fate_action == MLX5_FLOW_FATE_QUEUE) {
1004                 struct mlx5_hrxq *hrxq;
1005
1006                 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
1007                               dev_handle->rix_hrxq);
1008                 if (hrxq)
1009                         ind_tbl = hrxq->ind_table;
1010         } else if (dev_handle->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
1011                 struct mlx5_shared_action_rss *shared_rss;
1012
1013                 shared_rss = mlx5_ipool_get
1014                         (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
1015                          dev_handle->rix_srss);
1016                 if (shared_rss)
1017                         ind_tbl = shared_rss->ind_tbl;
1018         }
1019         if (!ind_tbl)
1020                 return;
1021         for (i = 0; i != ind_tbl->queues_n; ++i) {
1022                 int idx = ind_tbl->queues[i];
1023                 struct mlx5_rxq_ctrl *rxq_ctrl =
1024                         container_of((*priv->rxqs)[idx],
1025                                      struct mlx5_rxq_ctrl, rxq);
1026
1027                 /*
1028                  * To support metadata register copy on Tx loopback,
1029                  * this must be always enabled (metadata may arive
1030                  * from other port - not from local flows only.
1031                  */
1032                 if (priv->config.dv_flow_en &&
1033                     priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
1034                     mlx5_flow_ext_mreg_supported(dev)) {
1035                         rxq_ctrl->rxq.mark = 1;
1036                         rxq_ctrl->flow_mark_n = 1;
1037                 } else if (mark) {
1038                         rxq_ctrl->rxq.mark = 1;
1039                         rxq_ctrl->flow_mark_n++;
1040                 }
1041                 if (tunnel) {
1042                         unsigned int j;
1043
1044                         /* Increase the counter matching the flow. */
1045                         for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
1046                                 if ((tunnels_info[j].tunnel &
1047                                      dev_handle->layers) ==
1048                                     tunnels_info[j].tunnel) {
1049                                         rxq_ctrl->flow_tunnels_n[j]++;
1050                                         break;
1051                                 }
1052                         }
1053                         flow_rxq_tunnel_ptype_update(rxq_ctrl);
1054                 }
1055         }
1056 }
1057
1058 /**
1059  * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) for a flow
1060  *
1061  * @param[in] dev
1062  *   Pointer to the Ethernet device structure.
1063  * @param[in] flow
1064  *   Pointer to flow structure.
1065  */
1066 static void
1067 flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
1068 {
1069         struct mlx5_priv *priv = dev->data->dev_private;
1070         uint32_t handle_idx;
1071         struct mlx5_flow_handle *dev_handle;
1072
1073         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
1074                        handle_idx, dev_handle, next)
1075                 flow_drv_rxq_flags_set(dev, dev_handle);
1076 }
1077
1078 /**
1079  * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the
1080  * device flow if no other flow uses it with the same kind of request.
1081  *
1082  * @param dev
1083  *   Pointer to Ethernet device.
1084  * @param[in] dev_handle
1085  *   Pointer to the device flow handle structure.
1086  */
1087 static void
1088 flow_drv_rxq_flags_trim(struct rte_eth_dev *dev,
1089                         struct mlx5_flow_handle *dev_handle)
1090 {
1091         struct mlx5_priv *priv = dev->data->dev_private;
1092         const int mark = dev_handle->mark;
1093         const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
1094         struct mlx5_ind_table_obj *ind_tbl = NULL;
1095         unsigned int i;
1096
1097         if (dev_handle->fate_action == MLX5_FLOW_FATE_QUEUE) {
1098                 struct mlx5_hrxq *hrxq;
1099
1100                 hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
1101                               dev_handle->rix_hrxq);
1102                 if (hrxq)
1103                         ind_tbl = hrxq->ind_table;
1104         } else if (dev_handle->fate_action == MLX5_FLOW_FATE_SHARED_RSS) {
1105                 struct mlx5_shared_action_rss *shared_rss;
1106
1107                 shared_rss = mlx5_ipool_get
1108                         (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
1109                          dev_handle->rix_srss);
1110                 if (shared_rss)
1111                         ind_tbl = shared_rss->ind_tbl;
1112         }
1113         if (!ind_tbl)
1114                 return;
1115         MLX5_ASSERT(dev->data->dev_started);
1116         for (i = 0; i != ind_tbl->queues_n; ++i) {
1117                 int idx = ind_tbl->queues[i];
1118                 struct mlx5_rxq_ctrl *rxq_ctrl =
1119                         container_of((*priv->rxqs)[idx],
1120                                      struct mlx5_rxq_ctrl, rxq);
1121
1122                 if (priv->config.dv_flow_en &&
1123                     priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
1124                     mlx5_flow_ext_mreg_supported(dev)) {
1125                         rxq_ctrl->rxq.mark = 1;
1126                         rxq_ctrl->flow_mark_n = 1;
1127                 } else if (mark) {
1128                         rxq_ctrl->flow_mark_n--;
1129                         rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n;
1130                 }
1131                 if (tunnel) {
1132                         unsigned int j;
1133
1134                         /* Decrease the counter matching the flow. */
1135                         for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
1136                                 if ((tunnels_info[j].tunnel &
1137                                      dev_handle->layers) ==
1138                                     tunnels_info[j].tunnel) {
1139                                         rxq_ctrl->flow_tunnels_n[j]--;
1140                                         break;
1141                                 }
1142                         }
1143                         flow_rxq_tunnel_ptype_update(rxq_ctrl);
1144                 }
1145         }
1146 }
1147
1148 /**
1149  * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the
1150  * @p flow if no other flow uses it with the same kind of request.
1151  *
1152  * @param dev
1153  *   Pointer to Ethernet device.
1154  * @param[in] flow
1155  *   Pointer to the flow.
1156  */
1157 static void
1158 flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
1159 {
1160         struct mlx5_priv *priv = dev->data->dev_private;
1161         uint32_t handle_idx;
1162         struct mlx5_flow_handle *dev_handle;
1163
1164         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
1165                        handle_idx, dev_handle, next)
1166                 flow_drv_rxq_flags_trim(dev, dev_handle);
1167 }
1168
1169 /**
1170  * Clear the Mark/Flag and Tunnel ptype information in all Rx queues.
1171  *
1172  * @param dev
1173  *   Pointer to Ethernet device.
1174  */
1175 static void
1176 flow_rxq_flags_clear(struct rte_eth_dev *dev)
1177 {
1178         struct mlx5_priv *priv = dev->data->dev_private;
1179         unsigned int i;
1180
1181         for (i = 0; i != priv->rxqs_n; ++i) {
1182                 struct mlx5_rxq_ctrl *rxq_ctrl;
1183                 unsigned int j;
1184
1185                 if (!(*priv->rxqs)[i])
1186                         continue;
1187                 rxq_ctrl = container_of((*priv->rxqs)[i],
1188                                         struct mlx5_rxq_ctrl, rxq);
1189                 rxq_ctrl->flow_mark_n = 0;
1190                 rxq_ctrl->rxq.mark = 0;
1191                 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j)
1192                         rxq_ctrl->flow_tunnels_n[j] = 0;
1193                 rxq_ctrl->rxq.tunnel = 0;
1194         }
1195 }
1196
1197 /**
1198  * Set the Rx queue dynamic metadata (mask and offset) for a flow
1199  *
1200  * @param[in] dev
1201  *   Pointer to the Ethernet device structure.
1202  */
1203 void
1204 mlx5_flow_rxq_dynf_metadata_set(struct rte_eth_dev *dev)
1205 {
1206         struct mlx5_priv *priv = dev->data->dev_private;
1207         struct mlx5_rxq_data *data;
1208         unsigned int i;
1209
1210         for (i = 0; i != priv->rxqs_n; ++i) {
1211                 if (!(*priv->rxqs)[i])
1212                         continue;
1213                 data = (*priv->rxqs)[i];
1214                 if (!rte_flow_dynf_metadata_avail()) {
1215                         data->dynf_meta = 0;
1216                         data->flow_meta_mask = 0;
1217                         data->flow_meta_offset = -1;
1218                 } else {
1219                         data->dynf_meta = 1;
1220                         data->flow_meta_mask = rte_flow_dynf_metadata_mask;
1221                         data->flow_meta_offset = rte_flow_dynf_metadata_offs;
1222                 }
1223         }
1224 }
1225
1226 /*
1227  * return a pointer to the desired action in the list of actions.
1228  *
1229  * @param[in] actions
1230  *   The list of actions to search the action in.
1231  * @param[in] action
1232  *   The action to find.
1233  *
1234  * @return
1235  *   Pointer to the action in the list, if found. NULL otherwise.
1236  */
1237 const struct rte_flow_action *
1238 mlx5_flow_find_action(const struct rte_flow_action *actions,
1239                       enum rte_flow_action_type action)
1240 {
1241         if (actions == NULL)
1242                 return NULL;
1243         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++)
1244                 if (actions->type == action)
1245                         return actions;
1246         return NULL;
1247 }
1248
1249 /*
1250  * Validate the flag action.
1251  *
1252  * @param[in] action_flags
1253  *   Bit-fields that holds the actions detected until now.
1254  * @param[in] attr
1255  *   Attributes of flow that includes this action.
1256  * @param[out] error
1257  *   Pointer to error structure.
1258  *
1259  * @return
1260  *   0 on success, a negative errno value otherwise and rte_errno is set.
1261  */
1262 int
1263 mlx5_flow_validate_action_flag(uint64_t action_flags,
1264                                const struct rte_flow_attr *attr,
1265                                struct rte_flow_error *error)
1266 {
1267         if (action_flags & MLX5_FLOW_ACTION_MARK)
1268                 return rte_flow_error_set(error, EINVAL,
1269                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1270                                           "can't mark and flag in same flow");
1271         if (action_flags & MLX5_FLOW_ACTION_FLAG)
1272                 return rte_flow_error_set(error, EINVAL,
1273                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1274                                           "can't have 2 flag"
1275                                           " actions in same flow");
1276         if (attr->egress)
1277                 return rte_flow_error_set(error, ENOTSUP,
1278                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1279                                           "flag action not supported for "
1280                                           "egress");
1281         return 0;
1282 }
1283
1284 /*
1285  * Validate the mark action.
1286  *
1287  * @param[in] action
1288  *   Pointer to the queue action.
1289  * @param[in] action_flags
1290  *   Bit-fields that holds the actions detected until now.
1291  * @param[in] attr
1292  *   Attributes of flow that includes this action.
1293  * @param[out] error
1294  *   Pointer to error structure.
1295  *
1296  * @return
1297  *   0 on success, a negative errno value otherwise and rte_errno is set.
1298  */
1299 int
1300 mlx5_flow_validate_action_mark(const struct rte_flow_action *action,
1301                                uint64_t action_flags,
1302                                const struct rte_flow_attr *attr,
1303                                struct rte_flow_error *error)
1304 {
1305         const struct rte_flow_action_mark *mark = action->conf;
1306
1307         if (!mark)
1308                 return rte_flow_error_set(error, EINVAL,
1309                                           RTE_FLOW_ERROR_TYPE_ACTION,
1310                                           action,
1311                                           "configuration cannot be null");
1312         if (mark->id >= MLX5_FLOW_MARK_MAX)
1313                 return rte_flow_error_set(error, EINVAL,
1314                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1315                                           &mark->id,
1316                                           "mark id must in 0 <= id < "
1317                                           RTE_STR(MLX5_FLOW_MARK_MAX));
1318         if (action_flags & MLX5_FLOW_ACTION_FLAG)
1319                 return rte_flow_error_set(error, EINVAL,
1320                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1321                                           "can't flag and mark in same flow");
1322         if (action_flags & MLX5_FLOW_ACTION_MARK)
1323                 return rte_flow_error_set(error, EINVAL,
1324                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1325                                           "can't have 2 mark actions in same"
1326                                           " flow");
1327         if (attr->egress)
1328                 return rte_flow_error_set(error, ENOTSUP,
1329                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1330                                           "mark action not supported for "
1331                                           "egress");
1332         return 0;
1333 }
1334
1335 /*
1336  * Validate the drop action.
1337  *
1338  * @param[in] action_flags
1339  *   Bit-fields that holds the actions detected until now.
1340  * @param[in] attr
1341  *   Attributes of flow that includes this action.
1342  * @param[out] error
1343  *   Pointer to error structure.
1344  *
1345  * @return
1346  *   0 on success, a negative errno value otherwise and rte_errno is set.
1347  */
1348 int
1349 mlx5_flow_validate_action_drop(uint64_t action_flags __rte_unused,
1350                                const struct rte_flow_attr *attr,
1351                                struct rte_flow_error *error)
1352 {
1353         if (attr->egress)
1354                 return rte_flow_error_set(error, ENOTSUP,
1355                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1356                                           "drop action not supported for "
1357                                           "egress");
1358         return 0;
1359 }
1360
1361 /*
1362  * Validate the queue action.
1363  *
1364  * @param[in] action
1365  *   Pointer to the queue action.
1366  * @param[in] action_flags
1367  *   Bit-fields that holds the actions detected until now.
1368  * @param[in] dev
1369  *   Pointer to the Ethernet device structure.
1370  * @param[in] attr
1371  *   Attributes of flow that includes this action.
1372  * @param[out] error
1373  *   Pointer to error structure.
1374  *
1375  * @return
1376  *   0 on success, a negative errno value otherwise and rte_errno is set.
1377  */
1378 int
1379 mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
1380                                 uint64_t action_flags,
1381                                 struct rte_eth_dev *dev,
1382                                 const struct rte_flow_attr *attr,
1383                                 struct rte_flow_error *error)
1384 {
1385         struct mlx5_priv *priv = dev->data->dev_private;
1386         const struct rte_flow_action_queue *queue = action->conf;
1387
1388         if (action_flags & MLX5_FLOW_FATE_ACTIONS)
1389                 return rte_flow_error_set(error, EINVAL,
1390                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1391                                           "can't have 2 fate actions in"
1392                                           " same flow");
1393         if (!priv->rxqs_n)
1394                 return rte_flow_error_set(error, EINVAL,
1395                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1396                                           NULL, "No Rx queues configured");
1397         if (queue->index >= priv->rxqs_n)
1398                 return rte_flow_error_set(error, EINVAL,
1399                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1400                                           &queue->index,
1401                                           "queue index out of range");
1402         if (!(*priv->rxqs)[queue->index])
1403                 return rte_flow_error_set(error, EINVAL,
1404                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1405                                           &queue->index,
1406                                           "queue is not configured");
1407         if (attr->egress)
1408                 return rte_flow_error_set(error, ENOTSUP,
1409                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1410                                           "queue action not supported for "
1411                                           "egress");
1412         return 0;
1413 }
1414
1415 /*
1416  * Validate the rss action.
1417  *
1418  * @param[in] dev
1419  *   Pointer to the Ethernet device structure.
1420  * @param[in] action
1421  *   Pointer to the queue action.
1422  * @param[out] error
1423  *   Pointer to error structure.
1424  *
1425  * @return
1426  *   0 on success, a negative errno value otherwise and rte_errno is set.
1427  */
1428 int
1429 mlx5_validate_action_rss(struct rte_eth_dev *dev,
1430                          const struct rte_flow_action *action,
1431                          struct rte_flow_error *error)
1432 {
1433         struct mlx5_priv *priv = dev->data->dev_private;
1434         const struct rte_flow_action_rss *rss = action->conf;
1435         enum mlx5_rxq_type rxq_type = MLX5_RXQ_TYPE_UNDEFINED;
1436         unsigned int i;
1437
1438         if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT &&
1439             rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ)
1440                 return rte_flow_error_set(error, ENOTSUP,
1441                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1442                                           &rss->func,
1443                                           "RSS hash function not supported");
1444 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
1445         if (rss->level > 2)
1446 #else
1447         if (rss->level > 1)
1448 #endif
1449                 return rte_flow_error_set(error, ENOTSUP,
1450                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1451                                           &rss->level,
1452                                           "tunnel RSS is not supported");
1453         /* allow RSS key_len 0 in case of NULL (default) RSS key. */
1454         if (rss->key_len == 0 && rss->key != NULL)
1455                 return rte_flow_error_set(error, ENOTSUP,
1456                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1457                                           &rss->key_len,
1458                                           "RSS hash key length 0");
1459         if (rss->key_len > 0 && rss->key_len < MLX5_RSS_HASH_KEY_LEN)
1460                 return rte_flow_error_set(error, ENOTSUP,
1461                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1462                                           &rss->key_len,
1463                                           "RSS hash key too small");
1464         if (rss->key_len > MLX5_RSS_HASH_KEY_LEN)
1465                 return rte_flow_error_set(error, ENOTSUP,
1466                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1467                                           &rss->key_len,
1468                                           "RSS hash key too large");
1469         if (rss->queue_num > priv->config.ind_table_max_size)
1470                 return rte_flow_error_set(error, ENOTSUP,
1471                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1472                                           &rss->queue_num,
1473                                           "number of queues too large");
1474         if (rss->types & MLX5_RSS_HF_MASK)
1475                 return rte_flow_error_set(error, ENOTSUP,
1476                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1477                                           &rss->types,
1478                                           "some RSS protocols are not"
1479                                           " supported");
1480         if ((rss->types & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY)) &&
1481             !(rss->types & ETH_RSS_IP))
1482                 return rte_flow_error_set(error, EINVAL,
1483                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
1484                                           "L3 partial RSS requested but L3 RSS"
1485                                           " type not specified");
1486         if ((rss->types & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)) &&
1487             !(rss->types & (ETH_RSS_UDP | ETH_RSS_TCP)))
1488                 return rte_flow_error_set(error, EINVAL,
1489                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
1490                                           "L4 partial RSS requested but L4 RSS"
1491                                           " type not specified");
1492         if (!priv->rxqs_n)
1493                 return rte_flow_error_set(error, EINVAL,
1494                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1495                                           NULL, "No Rx queues configured");
1496         if (!rss->queue_num)
1497                 return rte_flow_error_set(error, EINVAL,
1498                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1499                                           NULL, "No queues configured");
1500         for (i = 0; i != rss->queue_num; ++i) {
1501                 struct mlx5_rxq_ctrl *rxq_ctrl;
1502
1503                 if (rss->queue[i] >= priv->rxqs_n)
1504                         return rte_flow_error_set
1505                                 (error, EINVAL,
1506                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1507                                  &rss->queue[i], "queue index out of range");
1508                 if (!(*priv->rxqs)[rss->queue[i]])
1509                         return rte_flow_error_set
1510                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1511                                  &rss->queue[i], "queue is not configured");
1512                 rxq_ctrl = container_of((*priv->rxqs)[rss->queue[i]],
1513                                         struct mlx5_rxq_ctrl, rxq);
1514                 if (i == 0)
1515                         rxq_type = rxq_ctrl->type;
1516                 if (rxq_type != rxq_ctrl->type)
1517                         return rte_flow_error_set
1518                                 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1519                                  &rss->queue[i],
1520                                  "combining hairpin and regular RSS queues is not supported");
1521         }
1522         return 0;
1523 }
1524
1525 /*
1526  * Validate the rss action.
1527  *
1528  * @param[in] action
1529  *   Pointer to the queue action.
1530  * @param[in] action_flags
1531  *   Bit-fields that holds the actions detected until now.
1532  * @param[in] dev
1533  *   Pointer to the Ethernet device structure.
1534  * @param[in] attr
1535  *   Attributes of flow that includes this action.
1536  * @param[in] item_flags
1537  *   Items that were detected.
1538  * @param[out] error
1539  *   Pointer to error structure.
1540  *
1541  * @return
1542  *   0 on success, a negative errno value otherwise and rte_errno is set.
1543  */
1544 int
1545 mlx5_flow_validate_action_rss(const struct rte_flow_action *action,
1546                               uint64_t action_flags,
1547                               struct rte_eth_dev *dev,
1548                               const struct rte_flow_attr *attr,
1549                               uint64_t item_flags,
1550                               struct rte_flow_error *error)
1551 {
1552         const struct rte_flow_action_rss *rss = action->conf;
1553         int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1554         int ret;
1555
1556         if (action_flags & MLX5_FLOW_FATE_ACTIONS)
1557                 return rte_flow_error_set(error, EINVAL,
1558                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1559                                           "can't have 2 fate actions"
1560                                           " in same flow");
1561         ret = mlx5_validate_action_rss(dev, action, error);
1562         if (ret)
1563                 return ret;
1564         if (attr->egress)
1565                 return rte_flow_error_set(error, ENOTSUP,
1566                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1567                                           "rss action not supported for "
1568                                           "egress");
1569         if (rss->level > 1 && !tunnel)
1570                 return rte_flow_error_set(error, EINVAL,
1571                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
1572                                           "inner RSS is not supported for "
1573                                           "non-tunnel flows");
1574         if ((item_flags & MLX5_FLOW_LAYER_ECPRI) &&
1575             !(item_flags & MLX5_FLOW_LAYER_INNER_L4_UDP)) {
1576                 return rte_flow_error_set(error, EINVAL,
1577                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
1578                                           "RSS on eCPRI is not supported now");
1579         }
1580         return 0;
1581 }
1582
1583 /*
1584  * Validate the default miss action.
1585  *
1586  * @param[in] action_flags
1587  *   Bit-fields that holds the actions detected until now.
1588  * @param[out] error
1589  *   Pointer to error structure.
1590  *
1591  * @return
1592  *   0 on success, a negative errno value otherwise and rte_errno is set.
1593  */
1594 int
1595 mlx5_flow_validate_action_default_miss(uint64_t action_flags,
1596                                 const struct rte_flow_attr *attr,
1597                                 struct rte_flow_error *error)
1598 {
1599         if (action_flags & MLX5_FLOW_FATE_ACTIONS)
1600                 return rte_flow_error_set(error, EINVAL,
1601                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1602                                           "can't have 2 fate actions in"
1603                                           " same flow");
1604         if (attr->egress)
1605                 return rte_flow_error_set(error, ENOTSUP,
1606                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1607                                           "default miss action not supported "
1608                                           "for egress");
1609         if (attr->group)
1610                 return rte_flow_error_set(error, ENOTSUP,
1611                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL,
1612                                           "only group 0 is supported");
1613         if (attr->transfer)
1614                 return rte_flow_error_set(error, ENOTSUP,
1615                                           RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1616                                           NULL, "transfer is not supported");
1617         return 0;
1618 }
1619
1620 /*
1621  * Validate the count action.
1622  *
1623  * @param[in] dev
1624  *   Pointer to the Ethernet device structure.
1625  * @param[in] attr
1626  *   Attributes of flow that includes this action.
1627  * @param[out] error
1628  *   Pointer to error structure.
1629  *
1630  * @return
1631  *   0 on success, a negative errno value otherwise and rte_errno is set.
1632  */
1633 int
1634 mlx5_flow_validate_action_count(struct rte_eth_dev *dev __rte_unused,
1635                                 const struct rte_flow_attr *attr,
1636                                 struct rte_flow_error *error)
1637 {
1638         if (attr->egress)
1639                 return rte_flow_error_set(error, ENOTSUP,
1640                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1641                                           "count action not supported for "
1642                                           "egress");
1643         return 0;
1644 }
1645
1646 /**
1647  * Verify the @p attributes will be correctly understood by the NIC and store
1648  * them in the @p flow if everything is correct.
1649  *
1650  * @param[in] dev
1651  *   Pointer to the Ethernet device structure.
1652  * @param[in] attributes
1653  *   Pointer to flow attributes
1654  * @param[out] error
1655  *   Pointer to error structure.
1656  *
1657  * @return
1658  *   0 on success, a negative errno value otherwise and rte_errno is set.
1659  */
1660 int
1661 mlx5_flow_validate_attributes(struct rte_eth_dev *dev,
1662                               const struct rte_flow_attr *attributes,
1663                               struct rte_flow_error *error)
1664 {
1665         struct mlx5_priv *priv = dev->data->dev_private;
1666         uint32_t priority_max = priv->config.flow_prio - 1;
1667
1668         if (attributes->group)
1669                 return rte_flow_error_set(error, ENOTSUP,
1670                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1671                                           NULL, "groups is not supported");
1672         if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
1673             attributes->priority >= priority_max)
1674                 return rte_flow_error_set(error, ENOTSUP,
1675                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1676                                           NULL, "priority out of range");
1677         if (attributes->egress)
1678                 return rte_flow_error_set(error, ENOTSUP,
1679                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1680                                           "egress is not supported");
1681         if (attributes->transfer && !priv->config.dv_esw_en)
1682                 return rte_flow_error_set(error, ENOTSUP,
1683                                           RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1684                                           NULL, "transfer is not supported");
1685         if (!attributes->ingress)
1686                 return rte_flow_error_set(error, EINVAL,
1687                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1688                                           NULL,
1689                                           "ingress attribute is mandatory");
1690         return 0;
1691 }
1692
1693 /**
1694  * Validate ICMP6 item.
1695  *
1696  * @param[in] item
1697  *   Item specification.
1698  * @param[in] item_flags
1699  *   Bit-fields that holds the items detected until now.
1700  * @param[in] ext_vlan_sup
1701  *   Whether extended VLAN features are supported or not.
1702  * @param[out] error
1703  *   Pointer to error structure.
1704  *
1705  * @return
1706  *   0 on success, a negative errno value otherwise and rte_errno is set.
1707  */
1708 int
1709 mlx5_flow_validate_item_icmp6(const struct rte_flow_item *item,
1710                                uint64_t item_flags,
1711                                uint8_t target_protocol,
1712                                struct rte_flow_error *error)
1713 {
1714         const struct rte_flow_item_icmp6 *mask = item->mask;
1715         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1716         const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1717                                       MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1718         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1719                                       MLX5_FLOW_LAYER_OUTER_L4;
1720         int ret;
1721
1722         if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMPV6)
1723                 return rte_flow_error_set(error, EINVAL,
1724                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1725                                           "protocol filtering not compatible"
1726                                           " with ICMP6 layer");
1727         if (!(item_flags & l3m))
1728                 return rte_flow_error_set(error, EINVAL,
1729                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1730                                           "IPv6 is mandatory to filter on"
1731                                           " ICMP6");
1732         if (item_flags & l4m)
1733                 return rte_flow_error_set(error, EINVAL,
1734                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1735                                           "multiple L4 layers not supported");
1736         if (!mask)
1737                 mask = &rte_flow_item_icmp6_mask;
1738         ret = mlx5_flow_item_acceptable
1739                 (item, (const uint8_t *)mask,
1740                  (const uint8_t *)&rte_flow_item_icmp6_mask,
1741                  sizeof(struct rte_flow_item_icmp6),
1742                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1743         if (ret < 0)
1744                 return ret;
1745         return 0;
1746 }
1747
1748 /**
1749  * Validate ICMP item.
1750  *
1751  * @param[in] item
1752  *   Item specification.
1753  * @param[in] item_flags
1754  *   Bit-fields that holds the items detected until now.
1755  * @param[out] error
1756  *   Pointer to error structure.
1757  *
1758  * @return
1759  *   0 on success, a negative errno value otherwise and rte_errno is set.
1760  */
1761 int
1762 mlx5_flow_validate_item_icmp(const struct rte_flow_item *item,
1763                              uint64_t item_flags,
1764                              uint8_t target_protocol,
1765                              struct rte_flow_error *error)
1766 {
1767         const struct rte_flow_item_icmp *mask = item->mask;
1768         const struct rte_flow_item_icmp nic_mask = {
1769                 .hdr.icmp_type = 0xff,
1770                 .hdr.icmp_code = 0xff,
1771                 .hdr.icmp_ident = RTE_BE16(0xffff),
1772                 .hdr.icmp_seq_nb = RTE_BE16(0xffff),
1773         };
1774         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1775         const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1776                                       MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1777         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1778                                       MLX5_FLOW_LAYER_OUTER_L4;
1779         int ret;
1780
1781         if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMP)
1782                 return rte_flow_error_set(error, EINVAL,
1783                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1784                                           "protocol filtering not compatible"
1785                                           " with ICMP layer");
1786         if (!(item_flags & l3m))
1787                 return rte_flow_error_set(error, EINVAL,
1788                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1789                                           "IPv4 is mandatory to filter"
1790                                           " on ICMP");
1791         if (item_flags & l4m)
1792                 return rte_flow_error_set(error, EINVAL,
1793                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1794                                           "multiple L4 layers not supported");
1795         if (!mask)
1796                 mask = &nic_mask;
1797         ret = mlx5_flow_item_acceptable
1798                 (item, (const uint8_t *)mask,
1799                  (const uint8_t *)&nic_mask,
1800                  sizeof(struct rte_flow_item_icmp),
1801                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1802         if (ret < 0)
1803                 return ret;
1804         return 0;
1805 }
1806
1807 /**
1808  * Validate Ethernet item.
1809  *
1810  * @param[in] item
1811  *   Item specification.
1812  * @param[in] item_flags
1813  *   Bit-fields that holds the items detected until now.
1814  * @param[out] error
1815  *   Pointer to error structure.
1816  *
1817  * @return
1818  *   0 on success, a negative errno value otherwise and rte_errno is set.
1819  */
1820 int
1821 mlx5_flow_validate_item_eth(const struct rte_flow_item *item,
1822                             uint64_t item_flags, bool ext_vlan_sup,
1823                             struct rte_flow_error *error)
1824 {
1825         const struct rte_flow_item_eth *mask = item->mask;
1826         const struct rte_flow_item_eth nic_mask = {
1827                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
1828                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
1829                 .type = RTE_BE16(0xffff),
1830                 .has_vlan = ext_vlan_sup ? 1 : 0,
1831         };
1832         int ret;
1833         int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1834         const uint64_t ethm = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1835                                        MLX5_FLOW_LAYER_OUTER_L2;
1836
1837         if (item_flags & ethm)
1838                 return rte_flow_error_set(error, ENOTSUP,
1839                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1840                                           "multiple L2 layers not supported");
1841         if ((!tunnel && (item_flags & MLX5_FLOW_LAYER_OUTER_L3)) ||
1842             (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_L3)))
1843                 return rte_flow_error_set(error, EINVAL,
1844                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1845                                           "L2 layer should not follow "
1846                                           "L3 layers");
1847         if ((!tunnel && (item_flags & MLX5_FLOW_LAYER_OUTER_VLAN)) ||
1848             (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_VLAN)))
1849                 return rte_flow_error_set(error, EINVAL,
1850                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1851                                           "L2 layer should not follow VLAN");
1852         if (!mask)
1853                 mask = &rte_flow_item_eth_mask;
1854         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1855                                         (const uint8_t *)&nic_mask,
1856                                         sizeof(struct rte_flow_item_eth),
1857                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1858         return ret;
1859 }
1860
1861 /**
1862  * Validate VLAN item.
1863  *
1864  * @param[in] item
1865  *   Item specification.
1866  * @param[in] item_flags
1867  *   Bit-fields that holds the items detected until now.
1868  * @param[in] dev
1869  *   Ethernet device flow is being created on.
1870  * @param[out] error
1871  *   Pointer to error structure.
1872  *
1873  * @return
1874  *   0 on success, a negative errno value otherwise and rte_errno is set.
1875  */
1876 int
1877 mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
1878                              uint64_t item_flags,
1879                              struct rte_eth_dev *dev,
1880                              struct rte_flow_error *error)
1881 {
1882         const struct rte_flow_item_vlan *spec = item->spec;
1883         const struct rte_flow_item_vlan *mask = item->mask;
1884         const struct rte_flow_item_vlan nic_mask = {
1885                 .tci = RTE_BE16(UINT16_MAX),
1886                 .inner_type = RTE_BE16(UINT16_MAX),
1887         };
1888         uint16_t vlan_tag = 0;
1889         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1890         int ret;
1891         const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
1892                                         MLX5_FLOW_LAYER_INNER_L4) :
1893                                        (MLX5_FLOW_LAYER_OUTER_L3 |
1894                                         MLX5_FLOW_LAYER_OUTER_L4);
1895         const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
1896                                         MLX5_FLOW_LAYER_OUTER_VLAN;
1897
1898         if (item_flags & vlanm)
1899                 return rte_flow_error_set(error, EINVAL,
1900                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1901                                           "multiple VLAN layers not supported");
1902         else if ((item_flags & l34m) != 0)
1903                 return rte_flow_error_set(error, EINVAL,
1904                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1905                                           "VLAN cannot follow L3/L4 layer");
1906         if (!mask)
1907                 mask = &rte_flow_item_vlan_mask;
1908         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1909                                         (const uint8_t *)&nic_mask,
1910                                         sizeof(struct rte_flow_item_vlan),
1911                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1912         if (ret)
1913                 return ret;
1914         if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
1915                 struct mlx5_priv *priv = dev->data->dev_private;
1916
1917                 if (priv->vmwa_context) {
1918                         /*
1919                          * Non-NULL context means we have a virtual machine
1920                          * and SR-IOV enabled, we have to create VLAN interface
1921                          * to make hypervisor to setup E-Switch vport
1922                          * context correctly. We avoid creating the multiple
1923                          * VLAN interfaces, so we cannot support VLAN tag mask.
1924                          */
1925                         return rte_flow_error_set(error, EINVAL,
1926                                                   RTE_FLOW_ERROR_TYPE_ITEM,
1927                                                   item,
1928                                                   "VLAN tag mask is not"
1929                                                   " supported in virtual"
1930                                                   " environment");
1931                 }
1932         }
1933         if (spec) {
1934                 vlan_tag = spec->tci;
1935                 vlan_tag &= mask->tci;
1936         }
1937         /*
1938          * From verbs perspective an empty VLAN is equivalent
1939          * to a packet without VLAN layer.
1940          */
1941         if (!vlan_tag)
1942                 return rte_flow_error_set(error, EINVAL,
1943                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1944                                           item->spec,
1945                                           "VLAN cannot be empty");
1946         return 0;
1947 }
1948
1949 /**
1950  * Validate IPV4 item.
1951  *
1952  * @param[in] item
1953  *   Item specification.
1954  * @param[in] item_flags
1955  *   Bit-fields that holds the items detected until now.
1956  * @param[in] last_item
1957  *   Previous validated item in the pattern items.
1958  * @param[in] ether_type
1959  *   Type in the ethernet layer header (including dot1q).
1960  * @param[in] acc_mask
1961  *   Acceptable mask, if NULL default internal default mask
1962  *   will be used to check whether item fields are supported.
1963  * @param[in] range_accepted
1964  *   True if range of values is accepted for specific fields, false otherwise.
1965  * @param[out] error
1966  *   Pointer to error structure.
1967  *
1968  * @return
1969  *   0 on success, a negative errno value otherwise and rte_errno is set.
1970  */
1971 int
1972 mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
1973                              uint64_t item_flags,
1974                              uint64_t last_item,
1975                              uint16_t ether_type,
1976                              const struct rte_flow_item_ipv4 *acc_mask,
1977                              bool range_accepted,
1978                              struct rte_flow_error *error)
1979 {
1980         const struct rte_flow_item_ipv4 *mask = item->mask;
1981         const struct rte_flow_item_ipv4 *spec = item->spec;
1982         const struct rte_flow_item_ipv4 nic_mask = {
1983                 .hdr = {
1984                         .src_addr = RTE_BE32(0xffffffff),
1985                         .dst_addr = RTE_BE32(0xffffffff),
1986                         .type_of_service = 0xff,
1987                         .next_proto_id = 0xff,
1988                 },
1989         };
1990         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1991         const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
1992                                       MLX5_FLOW_LAYER_OUTER_L3;
1993         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1994                                       MLX5_FLOW_LAYER_OUTER_L4;
1995         int ret;
1996         uint8_t next_proto = 0xFF;
1997         const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 |
1998                                   MLX5_FLOW_LAYER_OUTER_VLAN |
1999                                   MLX5_FLOW_LAYER_INNER_VLAN);
2000
2001         if ((last_item & l2_vlan) && ether_type &&
2002             ether_type != RTE_ETHER_TYPE_IPV4)
2003                 return rte_flow_error_set(error, EINVAL,
2004                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2005                                           "IPv4 cannot follow L2/VLAN layer "
2006                                           "which ether type is not IPv4");
2007         if (item_flags & MLX5_FLOW_LAYER_IPIP) {
2008                 if (mask && spec)
2009                         next_proto = mask->hdr.next_proto_id &
2010                                      spec->hdr.next_proto_id;
2011                 if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6)
2012                         return rte_flow_error_set(error, EINVAL,
2013                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2014                                                   item,
2015                                                   "multiple tunnel "
2016                                                   "not supported");
2017         }
2018         if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP)
2019                 return rte_flow_error_set(error, EINVAL,
2020                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2021                                           "wrong tunnel type - IPv6 specified "
2022                                           "but IPv4 item provided");
2023         if (item_flags & l3m)
2024                 return rte_flow_error_set(error, ENOTSUP,
2025                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2026                                           "multiple L3 layers not supported");
2027         else if (item_flags & l4m)
2028                 return rte_flow_error_set(error, EINVAL,
2029                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2030                                           "L3 cannot follow an L4 layer.");
2031         else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) &&
2032                   !(item_flags & MLX5_FLOW_LAYER_INNER_L2))
2033                 return rte_flow_error_set(error, EINVAL,
2034                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2035                                           "L3 cannot follow an NVGRE layer.");
2036         if (!mask)
2037                 mask = &rte_flow_item_ipv4_mask;
2038         else if (mask->hdr.next_proto_id != 0 &&
2039                  mask->hdr.next_proto_id != 0xff)
2040                 return rte_flow_error_set(error, EINVAL,
2041                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
2042                                           "partial mask is not supported"
2043                                           " for protocol");
2044         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2045                                         acc_mask ? (const uint8_t *)acc_mask
2046                                                  : (const uint8_t *)&nic_mask,
2047                                         sizeof(struct rte_flow_item_ipv4),
2048                                         range_accepted, error);
2049         if (ret < 0)
2050                 return ret;
2051         return 0;
2052 }
2053
2054 /**
2055  * Validate IPV6 item.
2056  *
2057  * @param[in] item
2058  *   Item specification.
2059  * @param[in] item_flags
2060  *   Bit-fields that holds the items detected until now.
2061  * @param[in] last_item
2062  *   Previous validated item in the pattern items.
2063  * @param[in] ether_type
2064  *   Type in the ethernet layer header (including dot1q).
2065  * @param[in] acc_mask
2066  *   Acceptable mask, if NULL default internal default mask
2067  *   will be used to check whether item fields are supported.
2068  * @param[out] error
2069  *   Pointer to error structure.
2070  *
2071  * @return
2072  *   0 on success, a negative errno value otherwise and rte_errno is set.
2073  */
2074 int
2075 mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
2076                              uint64_t item_flags,
2077                              uint64_t last_item,
2078                              uint16_t ether_type,
2079                              const struct rte_flow_item_ipv6 *acc_mask,
2080                              struct rte_flow_error *error)
2081 {
2082         const struct rte_flow_item_ipv6 *mask = item->mask;
2083         const struct rte_flow_item_ipv6 *spec = item->spec;
2084         const struct rte_flow_item_ipv6 nic_mask = {
2085                 .hdr = {
2086                         .src_addr =
2087                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
2088                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
2089                         .dst_addr =
2090                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
2091                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
2092                         .vtc_flow = RTE_BE32(0xffffffff),
2093                         .proto = 0xff,
2094                 },
2095         };
2096         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2097         const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
2098                                       MLX5_FLOW_LAYER_OUTER_L3;
2099         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2100                                       MLX5_FLOW_LAYER_OUTER_L4;
2101         int ret;
2102         uint8_t next_proto = 0xFF;
2103         const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 |
2104                                   MLX5_FLOW_LAYER_OUTER_VLAN |
2105                                   MLX5_FLOW_LAYER_INNER_VLAN);
2106
2107         if ((last_item & l2_vlan) && ether_type &&
2108             ether_type != RTE_ETHER_TYPE_IPV6)
2109                 return rte_flow_error_set(error, EINVAL,
2110                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2111                                           "IPv6 cannot follow L2/VLAN layer "
2112                                           "which ether type is not IPv6");
2113         if (mask && mask->hdr.proto == UINT8_MAX && spec)
2114                 next_proto = spec->hdr.proto;
2115         if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) {
2116                 if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6)
2117                         return rte_flow_error_set(error, EINVAL,
2118                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2119                                                   item,
2120                                                   "multiple tunnel "
2121                                                   "not supported");
2122         }
2123         if (next_proto == IPPROTO_HOPOPTS  ||
2124             next_proto == IPPROTO_ROUTING  ||
2125             next_proto == IPPROTO_FRAGMENT ||
2126             next_proto == IPPROTO_ESP      ||
2127             next_proto == IPPROTO_AH       ||
2128             next_proto == IPPROTO_DSTOPTS)
2129                 return rte_flow_error_set(error, EINVAL,
2130                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2131                                           "IPv6 proto (next header) should "
2132                                           "not be set as extension header");
2133         if (item_flags & MLX5_FLOW_LAYER_IPIP)
2134                 return rte_flow_error_set(error, EINVAL,
2135                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2136                                           "wrong tunnel type - IPv4 specified "
2137                                           "but IPv6 item provided");
2138         if (item_flags & l3m)
2139                 return rte_flow_error_set(error, ENOTSUP,
2140                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2141                                           "multiple L3 layers not supported");
2142         else if (item_flags & l4m)
2143                 return rte_flow_error_set(error, EINVAL,
2144                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2145                                           "L3 cannot follow an L4 layer.");
2146         else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) &&
2147                   !(item_flags & MLX5_FLOW_LAYER_INNER_L2))
2148                 return rte_flow_error_set(error, EINVAL,
2149                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2150                                           "L3 cannot follow an NVGRE layer.");
2151         if (!mask)
2152                 mask = &rte_flow_item_ipv6_mask;
2153         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2154                                         acc_mask ? (const uint8_t *)acc_mask
2155                                                  : (const uint8_t *)&nic_mask,
2156                                         sizeof(struct rte_flow_item_ipv6),
2157                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2158         if (ret < 0)
2159                 return ret;
2160         return 0;
2161 }
2162
2163 /**
2164  * Validate UDP item.
2165  *
2166  * @param[in] item
2167  *   Item specification.
2168  * @param[in] item_flags
2169  *   Bit-fields that holds the items detected until now.
2170  * @param[in] target_protocol
2171  *   The next protocol in the previous item.
2172  * @param[in] flow_mask
2173  *   mlx5 flow-specific (DV, verbs, etc.) supported header fields mask.
2174  * @param[out] error
2175  *   Pointer to error structure.
2176  *
2177  * @return
2178  *   0 on success, a negative errno value otherwise and rte_errno is set.
2179  */
2180 int
2181 mlx5_flow_validate_item_udp(const struct rte_flow_item *item,
2182                             uint64_t item_flags,
2183                             uint8_t target_protocol,
2184                             struct rte_flow_error *error)
2185 {
2186         const struct rte_flow_item_udp *mask = item->mask;
2187         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2188         const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
2189                                       MLX5_FLOW_LAYER_OUTER_L3;
2190         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2191                                       MLX5_FLOW_LAYER_OUTER_L4;
2192         int ret;
2193
2194         if (target_protocol != 0xff && target_protocol != IPPROTO_UDP)
2195                 return rte_flow_error_set(error, EINVAL,
2196                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2197                                           "protocol filtering not compatible"
2198                                           " with UDP layer");
2199         if (!(item_flags & l3m))
2200                 return rte_flow_error_set(error, EINVAL,
2201                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2202                                           "L3 is mandatory to filter on L4");
2203         if (item_flags & l4m)
2204                 return rte_flow_error_set(error, EINVAL,
2205                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2206                                           "multiple L4 layers not supported");
2207         if (!mask)
2208                 mask = &rte_flow_item_udp_mask;
2209         ret = mlx5_flow_item_acceptable
2210                 (item, (const uint8_t *)mask,
2211                  (const uint8_t *)&rte_flow_item_udp_mask,
2212                  sizeof(struct rte_flow_item_udp), MLX5_ITEM_RANGE_NOT_ACCEPTED,
2213                  error);
2214         if (ret < 0)
2215                 return ret;
2216         return 0;
2217 }
2218
2219 /**
2220  * Validate TCP item.
2221  *
2222  * @param[in] item
2223  *   Item specification.
2224  * @param[in] item_flags
2225  *   Bit-fields that holds the items detected until now.
2226  * @param[in] target_protocol
2227  *   The next protocol in the previous item.
2228  * @param[out] error
2229  *   Pointer to error structure.
2230  *
2231  * @return
2232  *   0 on success, a negative errno value otherwise and rte_errno is set.
2233  */
2234 int
2235 mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,
2236                             uint64_t item_flags,
2237                             uint8_t target_protocol,
2238                             const struct rte_flow_item_tcp *flow_mask,
2239                             struct rte_flow_error *error)
2240 {
2241         const struct rte_flow_item_tcp *mask = item->mask;
2242         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2243         const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
2244                                       MLX5_FLOW_LAYER_OUTER_L3;
2245         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2246                                       MLX5_FLOW_LAYER_OUTER_L4;
2247         int ret;
2248
2249         MLX5_ASSERT(flow_mask);
2250         if (target_protocol != 0xff && target_protocol != IPPROTO_TCP)
2251                 return rte_flow_error_set(error, EINVAL,
2252                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2253                                           "protocol filtering not compatible"
2254                                           " with TCP layer");
2255         if (!(item_flags & l3m))
2256                 return rte_flow_error_set(error, EINVAL,
2257                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2258                                           "L3 is mandatory to filter on L4");
2259         if (item_flags & l4m)
2260                 return rte_flow_error_set(error, EINVAL,
2261                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2262                                           "multiple L4 layers not supported");
2263         if (!mask)
2264                 mask = &rte_flow_item_tcp_mask;
2265         ret = mlx5_flow_item_acceptable
2266                 (item, (const uint8_t *)mask,
2267                  (const uint8_t *)flow_mask,
2268                  sizeof(struct rte_flow_item_tcp), MLX5_ITEM_RANGE_NOT_ACCEPTED,
2269                  error);
2270         if (ret < 0)
2271                 return ret;
2272         return 0;
2273 }
2274
2275 /**
2276  * Validate VXLAN item.
2277  *
2278  * @param[in] item
2279  *   Item specification.
2280  * @param[in] item_flags
2281  *   Bit-fields that holds the items detected until now.
2282  * @param[in] target_protocol
2283  *   The next protocol in the previous item.
2284  * @param[out] error
2285  *   Pointer to error structure.
2286  *
2287  * @return
2288  *   0 on success, a negative errno value otherwise and rte_errno is set.
2289  */
2290 int
2291 mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item,
2292                               uint64_t item_flags,
2293                               struct rte_flow_error *error)
2294 {
2295         const struct rte_flow_item_vxlan *spec = item->spec;
2296         const struct rte_flow_item_vxlan *mask = item->mask;
2297         int ret;
2298         union vni {
2299                 uint32_t vlan_id;
2300                 uint8_t vni[4];
2301         } id = { .vlan_id = 0, };
2302
2303
2304         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2305                 return rte_flow_error_set(error, ENOTSUP,
2306                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2307                                           "multiple tunnel layers not"
2308                                           " supported");
2309         /*
2310          * Verify only UDPv4 is present as defined in
2311          * https://tools.ietf.org/html/rfc7348
2312          */
2313         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2314                 return rte_flow_error_set(error, EINVAL,
2315                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2316                                           "no outer UDP layer found");
2317         if (!mask)
2318                 mask = &rte_flow_item_vxlan_mask;
2319         ret = mlx5_flow_item_acceptable
2320                 (item, (const uint8_t *)mask,
2321                  (const uint8_t *)&rte_flow_item_vxlan_mask,
2322                  sizeof(struct rte_flow_item_vxlan),
2323                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2324         if (ret < 0)
2325                 return ret;
2326         if (spec) {
2327                 memcpy(&id.vni[1], spec->vni, 3);
2328                 memcpy(&id.vni[1], mask->vni, 3);
2329         }
2330         if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
2331                 return rte_flow_error_set(error, ENOTSUP,
2332                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2333                                           "VXLAN tunnel must be fully defined");
2334         return 0;
2335 }
2336
2337 /**
2338  * Validate VXLAN_GPE item.
2339  *
2340  * @param[in] item
2341  *   Item specification.
2342  * @param[in] item_flags
2343  *   Bit-fields that holds the items detected until now.
2344  * @param[in] priv
2345  *   Pointer to the private data structure.
2346  * @param[in] target_protocol
2347  *   The next protocol in the previous item.
2348  * @param[out] error
2349  *   Pointer to error structure.
2350  *
2351  * @return
2352  *   0 on success, a negative errno value otherwise and rte_errno is set.
2353  */
2354 int
2355 mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,
2356                                   uint64_t item_flags,
2357                                   struct rte_eth_dev *dev,
2358                                   struct rte_flow_error *error)
2359 {
2360         struct mlx5_priv *priv = dev->data->dev_private;
2361         const struct rte_flow_item_vxlan_gpe *spec = item->spec;
2362         const struct rte_flow_item_vxlan_gpe *mask = item->mask;
2363         int ret;
2364         union vni {
2365                 uint32_t vlan_id;
2366                 uint8_t vni[4];
2367         } id = { .vlan_id = 0, };
2368
2369         if (!priv->config.l3_vxlan_en)
2370                 return rte_flow_error_set(error, ENOTSUP,
2371                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2372                                           "L3 VXLAN is not enabled by device"
2373                                           " parameter and/or not configured in"
2374                                           " firmware");
2375         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2376                 return rte_flow_error_set(error, ENOTSUP,
2377                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2378                                           "multiple tunnel layers not"
2379                                           " supported");
2380         /*
2381          * Verify only UDPv4 is present as defined in
2382          * https://tools.ietf.org/html/rfc7348
2383          */
2384         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2385                 return rte_flow_error_set(error, EINVAL,
2386                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2387                                           "no outer UDP layer found");
2388         if (!mask)
2389                 mask = &rte_flow_item_vxlan_gpe_mask;
2390         ret = mlx5_flow_item_acceptable
2391                 (item, (const uint8_t *)mask,
2392                  (const uint8_t *)&rte_flow_item_vxlan_gpe_mask,
2393                  sizeof(struct rte_flow_item_vxlan_gpe),
2394                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2395         if (ret < 0)
2396                 return ret;
2397         if (spec) {
2398                 if (spec->protocol)
2399                         return rte_flow_error_set(error, ENOTSUP,
2400                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2401                                                   item,
2402                                                   "VxLAN-GPE protocol"
2403                                                   " not supported");
2404                 memcpy(&id.vni[1], spec->vni, 3);
2405                 memcpy(&id.vni[1], mask->vni, 3);
2406         }
2407         if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
2408                 return rte_flow_error_set(error, ENOTSUP,
2409                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2410                                           "VXLAN-GPE tunnel must be fully"
2411                                           " defined");
2412         return 0;
2413 }
2414 /**
2415  * Validate GRE Key item.
2416  *
2417  * @param[in] item
2418  *   Item specification.
2419  * @param[in] item_flags
2420  *   Bit flags to mark detected items.
2421  * @param[in] gre_item
2422  *   Pointer to gre_item
2423  * @param[out] error
2424  *   Pointer to error structure.
2425  *
2426  * @return
2427  *   0 on success, a negative errno value otherwise and rte_errno is set.
2428  */
2429 int
2430 mlx5_flow_validate_item_gre_key(const struct rte_flow_item *item,
2431                                 uint64_t item_flags,
2432                                 const struct rte_flow_item *gre_item,
2433                                 struct rte_flow_error *error)
2434 {
2435         const rte_be32_t *mask = item->mask;
2436         int ret = 0;
2437         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
2438         const struct rte_flow_item_gre *gre_spec;
2439         const struct rte_flow_item_gre *gre_mask;
2440
2441         if (item_flags & MLX5_FLOW_LAYER_GRE_KEY)
2442                 return rte_flow_error_set(error, ENOTSUP,
2443                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2444                                           "Multiple GRE key not support");
2445         if (!(item_flags & MLX5_FLOW_LAYER_GRE))
2446                 return rte_flow_error_set(error, ENOTSUP,
2447                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2448                                           "No preceding GRE header");
2449         if (item_flags & MLX5_FLOW_LAYER_INNER)
2450                 return rte_flow_error_set(error, ENOTSUP,
2451                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2452                                           "GRE key following a wrong item");
2453         gre_mask = gre_item->mask;
2454         if (!gre_mask)
2455                 gre_mask = &rte_flow_item_gre_mask;
2456         gre_spec = gre_item->spec;
2457         if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x2000)) &&
2458                          !(gre_spec->c_rsvd0_ver & RTE_BE16(0x2000)))
2459                 return rte_flow_error_set(error, EINVAL,
2460                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2461                                           "Key bit must be on");
2462
2463         if (!mask)
2464                 mask = &gre_key_default_mask;
2465         ret = mlx5_flow_item_acceptable
2466                 (item, (const uint8_t *)mask,
2467                  (const uint8_t *)&gre_key_default_mask,
2468                  sizeof(rte_be32_t), MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2469         return ret;
2470 }
2471
2472 /**
2473  * Validate GRE item.
2474  *
2475  * @param[in] item
2476  *   Item specification.
2477  * @param[in] item_flags
2478  *   Bit flags to mark detected items.
2479  * @param[in] target_protocol
2480  *   The next protocol in the previous item.
2481  * @param[out] error
2482  *   Pointer to error structure.
2483  *
2484  * @return
2485  *   0 on success, a negative errno value otherwise and rte_errno is set.
2486  */
2487 int
2488 mlx5_flow_validate_item_gre(const struct rte_flow_item *item,
2489                             uint64_t item_flags,
2490                             uint8_t target_protocol,
2491                             struct rte_flow_error *error)
2492 {
2493         const struct rte_flow_item_gre *spec __rte_unused = item->spec;
2494         const struct rte_flow_item_gre *mask = item->mask;
2495         int ret;
2496         const struct rte_flow_item_gre nic_mask = {
2497                 .c_rsvd0_ver = RTE_BE16(0xB000),
2498                 .protocol = RTE_BE16(UINT16_MAX),
2499         };
2500
2501         if (target_protocol != 0xff && target_protocol != IPPROTO_GRE)
2502                 return rte_flow_error_set(error, EINVAL,
2503                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2504                                           "protocol filtering not compatible"
2505                                           " with this GRE layer");
2506         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2507                 return rte_flow_error_set(error, ENOTSUP,
2508                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2509                                           "multiple tunnel layers not"
2510                                           " supported");
2511         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3))
2512                 return rte_flow_error_set(error, ENOTSUP,
2513                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2514                                           "L3 Layer is missing");
2515         if (!mask)
2516                 mask = &rte_flow_item_gre_mask;
2517         ret = mlx5_flow_item_acceptable
2518                 (item, (const uint8_t *)mask,
2519                  (const uint8_t *)&nic_mask,
2520                  sizeof(struct rte_flow_item_gre), MLX5_ITEM_RANGE_NOT_ACCEPTED,
2521                  error);
2522         if (ret < 0)
2523                 return ret;
2524 #ifndef HAVE_MLX5DV_DR
2525 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
2526         if (spec && (spec->protocol & mask->protocol))
2527                 return rte_flow_error_set(error, ENOTSUP,
2528                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2529                                           "without MPLS support the"
2530                                           " specification cannot be used for"
2531                                           " filtering");
2532 #endif
2533 #endif
2534         return 0;
2535 }
2536
2537 /**
2538  * Validate Geneve item.
2539  *
2540  * @param[in] item
2541  *   Item specification.
2542  * @param[in] itemFlags
2543  *   Bit-fields that holds the items detected until now.
2544  * @param[in] enPriv
2545  *   Pointer to the private data structure.
2546  * @param[out] error
2547  *   Pointer to error structure.
2548  *
2549  * @return
2550  *   0 on success, a negative errno value otherwise and rte_errno is set.
2551  */
2552
2553 int
2554 mlx5_flow_validate_item_geneve(const struct rte_flow_item *item,
2555                                uint64_t item_flags,
2556                                struct rte_eth_dev *dev,
2557                                struct rte_flow_error *error)
2558 {
2559         struct mlx5_priv *priv = dev->data->dev_private;
2560         const struct rte_flow_item_geneve *spec = item->spec;
2561         const struct rte_flow_item_geneve *mask = item->mask;
2562         int ret;
2563         uint16_t gbhdr;
2564         uint8_t opt_len = priv->config.hca_attr.geneve_max_opt_len ?
2565                           MLX5_GENEVE_OPT_LEN_1 : MLX5_GENEVE_OPT_LEN_0;
2566         const struct rte_flow_item_geneve nic_mask = {
2567                 .ver_opt_len_o_c_rsvd0 = RTE_BE16(0x3f80),
2568                 .vni = "\xff\xff\xff",
2569                 .protocol = RTE_BE16(UINT16_MAX),
2570         };
2571
2572         if (!priv->config.hca_attr.tunnel_stateless_geneve_rx)
2573                 return rte_flow_error_set(error, ENOTSUP,
2574                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2575                                           "L3 Geneve is not enabled by device"
2576                                           " parameter and/or not configured in"
2577                                           " firmware");
2578         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2579                 return rte_flow_error_set(error, ENOTSUP,
2580                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2581                                           "multiple tunnel layers not"
2582                                           " supported");
2583         /*
2584          * Verify only UDPv4 is present as defined in
2585          * https://tools.ietf.org/html/rfc7348
2586          */
2587         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2588                 return rte_flow_error_set(error, EINVAL,
2589                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2590                                           "no outer UDP layer found");
2591         if (!mask)
2592                 mask = &rte_flow_item_geneve_mask;
2593         ret = mlx5_flow_item_acceptable
2594                                   (item, (const uint8_t *)mask,
2595                                    (const uint8_t *)&nic_mask,
2596                                    sizeof(struct rte_flow_item_geneve),
2597                                    MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2598         if (ret)
2599                 return ret;
2600         if (spec) {
2601                 gbhdr = rte_be_to_cpu_16(spec->ver_opt_len_o_c_rsvd0);
2602                 if (MLX5_GENEVE_VER_VAL(gbhdr) ||
2603                      MLX5_GENEVE_CRITO_VAL(gbhdr) ||
2604                      MLX5_GENEVE_RSVD_VAL(gbhdr) || spec->rsvd1)
2605                         return rte_flow_error_set(error, ENOTSUP,
2606                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2607                                                   item,
2608                                                   "Geneve protocol unsupported"
2609                                                   " fields are being used");
2610                 if (MLX5_GENEVE_OPTLEN_VAL(gbhdr) > opt_len)
2611                         return rte_flow_error_set
2612                                         (error, ENOTSUP,
2613                                          RTE_FLOW_ERROR_TYPE_ITEM,
2614                                          item,
2615                                          "Unsupported Geneve options length");
2616         }
2617         if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
2618                 return rte_flow_error_set
2619                                     (error, ENOTSUP,
2620                                      RTE_FLOW_ERROR_TYPE_ITEM, item,
2621                                      "Geneve tunnel must be fully defined");
2622         return 0;
2623 }
2624
2625 /**
2626  * Validate Geneve TLV option item.
2627  *
2628  * @param[in] item
2629  *   Item specification.
2630  * @param[in] last_item
2631  *   Previous validated item in the pattern items.
2632  * @param[in] geneve_item
2633  *   Previous GENEVE item specification.
2634  * @param[in] dev
2635  *   Pointer to the rte_eth_dev structure.
2636  * @param[out] error
2637  *   Pointer to error structure.
2638  *
2639  * @return
2640  *   0 on success, a negative errno value otherwise and rte_errno is set.
2641  */
2642 int
2643 mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item,
2644                                    uint64_t last_item,
2645                                    const struct rte_flow_item *geneve_item,
2646                                    struct rte_eth_dev *dev,
2647                                    struct rte_flow_error *error)
2648 {
2649         struct mlx5_priv *priv = dev->data->dev_private;
2650         struct mlx5_dev_ctx_shared *sh = priv->sh;
2651         struct mlx5_geneve_tlv_option_resource *geneve_opt_resource;
2652         struct mlx5_hca_attr *hca_attr = &priv->config.hca_attr;
2653         uint8_t data_max_supported =
2654                         hca_attr->max_geneve_tlv_option_data_len * 4;
2655         struct mlx5_dev_config *config = &priv->config;
2656         const struct rte_flow_item_geneve *geneve_spec;
2657         const struct rte_flow_item_geneve *geneve_mask;
2658         const struct rte_flow_item_geneve_opt *spec = item->spec;
2659         const struct rte_flow_item_geneve_opt *mask = item->mask;
2660         unsigned int i;
2661         unsigned int data_len;
2662         uint8_t tlv_option_len;
2663         uint16_t optlen_m, optlen_v;
2664         const struct rte_flow_item_geneve_opt full_mask = {
2665                 .option_class = RTE_BE16(0xffff),
2666                 .option_type = 0xff,
2667                 .option_len = 0x1f,
2668         };
2669
2670         if (!mask)
2671                 mask = &rte_flow_item_geneve_opt_mask;
2672         if (!spec)
2673                 return rte_flow_error_set
2674                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2675                         "Geneve TLV opt class/type/length must be specified");
2676         if ((uint32_t)spec->option_len > MLX5_GENEVE_OPTLEN_MASK)
2677                 return rte_flow_error_set
2678                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2679                         "Geneve TLV opt length exceeeds the limit (31)");
2680         /* Check if class type and length masks are full. */
2681         if (full_mask.option_class != mask->option_class ||
2682             full_mask.option_type != mask->option_type ||
2683             full_mask.option_len != (mask->option_len & full_mask.option_len))
2684                 return rte_flow_error_set
2685                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2686                         "Geneve TLV opt class/type/length masks must be full");
2687         /* Check if length is supported */
2688         if ((uint32_t)spec->option_len >
2689                         config->hca_attr.max_geneve_tlv_option_data_len)
2690                 return rte_flow_error_set
2691                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2692                         "Geneve TLV opt length not supported");
2693         if (config->hca_attr.max_geneve_tlv_options > 1)
2694                 DRV_LOG(DEBUG,
2695                         "max_geneve_tlv_options supports more than 1 option");
2696         /* Check GENEVE item preceding. */
2697         if (!geneve_item || !(last_item & MLX5_FLOW_LAYER_GENEVE))
2698                 return rte_flow_error_set
2699                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2700                         "Geneve opt item must be preceded with Geneve item");
2701         geneve_spec = geneve_item->spec;
2702         geneve_mask = geneve_item->mask ? geneve_item->mask :
2703                                           &rte_flow_item_geneve_mask;
2704         /* Check if GENEVE TLV option size doesn't exceed option length */
2705         if (geneve_spec && (geneve_mask->ver_opt_len_o_c_rsvd0 ||
2706                             geneve_spec->ver_opt_len_o_c_rsvd0)) {
2707                 tlv_option_len = spec->option_len & mask->option_len;
2708                 optlen_v = rte_be_to_cpu_16(geneve_spec->ver_opt_len_o_c_rsvd0);
2709                 optlen_v = MLX5_GENEVE_OPTLEN_VAL(optlen_v);
2710                 optlen_m = rte_be_to_cpu_16(geneve_mask->ver_opt_len_o_c_rsvd0);
2711                 optlen_m = MLX5_GENEVE_OPTLEN_VAL(optlen_m);
2712                 if ((optlen_v & optlen_m) <= tlv_option_len)
2713                         return rte_flow_error_set
2714                                 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2715                                  "GENEVE TLV option length exceeds optlen");
2716         }
2717         /* Check if length is 0 or data is 0. */
2718         if (spec->data == NULL || spec->option_len == 0)
2719                 return rte_flow_error_set
2720                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2721                         "Geneve TLV opt with zero data/length not supported");
2722         /* Check not all data & mask are 0. */
2723         data_len = spec->option_len * 4;
2724         if (mask->data == NULL) {
2725                 for (i = 0; i < data_len; i++)
2726                         if (spec->data[i])
2727                                 break;
2728                 if (i == data_len)
2729                         return rte_flow_error_set(error, ENOTSUP,
2730                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
2731                                 "Can't match on Geneve option data 0");
2732         } else {
2733                 for (i = 0; i < data_len; i++)
2734                         if (spec->data[i] & mask->data[i])
2735                                 break;
2736                 if (i == data_len)
2737                         return rte_flow_error_set(error, ENOTSUP,
2738                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
2739                                 "Can't match on Geneve option data and mask 0");
2740                 /* Check data mask supported. */
2741                 for (i = data_max_supported; i < data_len ; i++)
2742                         if (mask->data[i])
2743                                 return rte_flow_error_set(error, ENOTSUP,
2744                                         RTE_FLOW_ERROR_TYPE_ITEM, item,
2745                                         "Data mask is of unsupported size");
2746         }
2747         /* Check GENEVE option is supported in NIC. */
2748         if (!config->hca_attr.geneve_tlv_opt)
2749                 return rte_flow_error_set
2750                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, item,
2751                         "Geneve TLV opt not supported");
2752         /* Check if we already have geneve option with different type/class. */
2753         rte_spinlock_lock(&sh->geneve_tlv_opt_sl);
2754         geneve_opt_resource = sh->geneve_tlv_option_resource;
2755         if (geneve_opt_resource != NULL)
2756                 if (geneve_opt_resource->option_class != spec->option_class ||
2757                     geneve_opt_resource->option_type != spec->option_type ||
2758                     geneve_opt_resource->length != spec->option_len) {
2759                         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
2760                         return rte_flow_error_set(error, ENOTSUP,
2761                                 RTE_FLOW_ERROR_TYPE_ITEM, item,
2762                                 "Only one Geneve TLV option supported");
2763                 }
2764         rte_spinlock_unlock(&sh->geneve_tlv_opt_sl);
2765         return 0;
2766 }
2767
2768 /**
2769  * Validate MPLS item.
2770  *
2771  * @param[in] dev
2772  *   Pointer to the rte_eth_dev structure.
2773  * @param[in] item
2774  *   Item specification.
2775  * @param[in] item_flags
2776  *   Bit-fields that holds the items detected until now.
2777  * @param[in] prev_layer
2778  *   The protocol layer indicated in previous item.
2779  * @param[out] error
2780  *   Pointer to error structure.
2781  *
2782  * @return
2783  *   0 on success, a negative errno value otherwise and rte_errno is set.
2784  */
2785 int
2786 mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused,
2787                              const struct rte_flow_item *item __rte_unused,
2788                              uint64_t item_flags __rte_unused,
2789                              uint64_t prev_layer __rte_unused,
2790                              struct rte_flow_error *error)
2791 {
2792 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
2793         const struct rte_flow_item_mpls *mask = item->mask;
2794         struct mlx5_priv *priv = dev->data->dev_private;
2795         int ret;
2796
2797         if (!priv->config.mpls_en)
2798                 return rte_flow_error_set(error, ENOTSUP,
2799                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2800                                           "MPLS not supported or"
2801                                           " disabled in firmware"
2802                                           " configuration.");
2803         /* MPLS over IP, UDP, GRE is allowed */
2804         if (!(prev_layer & (MLX5_FLOW_LAYER_OUTER_L3 |
2805                             MLX5_FLOW_LAYER_OUTER_L4_UDP |
2806                             MLX5_FLOW_LAYER_GRE |
2807                             MLX5_FLOW_LAYER_GRE_KEY)))
2808                 return rte_flow_error_set(error, EINVAL,
2809                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2810                                           "protocol filtering not compatible"
2811                                           " with MPLS layer");
2812         /* Multi-tunnel isn't allowed but MPLS over GRE is an exception. */
2813         if ((item_flags & MLX5_FLOW_LAYER_TUNNEL) &&
2814             !(item_flags & MLX5_FLOW_LAYER_GRE))
2815                 return rte_flow_error_set(error, ENOTSUP,
2816                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2817                                           "multiple tunnel layers not"
2818                                           " supported");
2819         if (!mask)
2820                 mask = &rte_flow_item_mpls_mask;
2821         ret = mlx5_flow_item_acceptable
2822                 (item, (const uint8_t *)mask,
2823                  (const uint8_t *)&rte_flow_item_mpls_mask,
2824                  sizeof(struct rte_flow_item_mpls),
2825                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2826         if (ret < 0)
2827                 return ret;
2828         return 0;
2829 #else
2830         return rte_flow_error_set(error, ENOTSUP,
2831                                   RTE_FLOW_ERROR_TYPE_ITEM, item,
2832                                   "MPLS is not supported by Verbs, please"
2833                                   " update.");
2834 #endif
2835 }
2836
2837 /**
2838  * Validate NVGRE item.
2839  *
2840  * @param[in] item
2841  *   Item specification.
2842  * @param[in] item_flags
2843  *   Bit flags to mark detected items.
2844  * @param[in] target_protocol
2845  *   The next protocol in the previous item.
2846  * @param[out] error
2847  *   Pointer to error structure.
2848  *
2849  * @return
2850  *   0 on success, a negative errno value otherwise and rte_errno is set.
2851  */
2852 int
2853 mlx5_flow_validate_item_nvgre(const struct rte_flow_item *item,
2854                               uint64_t item_flags,
2855                               uint8_t target_protocol,
2856                               struct rte_flow_error *error)
2857 {
2858         const struct rte_flow_item_nvgre *mask = item->mask;
2859         int ret;
2860
2861         if (target_protocol != 0xff && target_protocol != IPPROTO_GRE)
2862                 return rte_flow_error_set(error, EINVAL,
2863                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2864                                           "protocol filtering not compatible"
2865                                           " with this GRE layer");
2866         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2867                 return rte_flow_error_set(error, ENOTSUP,
2868                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2869                                           "multiple tunnel layers not"
2870                                           " supported");
2871         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3))
2872                 return rte_flow_error_set(error, ENOTSUP,
2873                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2874                                           "L3 Layer is missing");
2875         if (!mask)
2876                 mask = &rte_flow_item_nvgre_mask;
2877         ret = mlx5_flow_item_acceptable
2878                 (item, (const uint8_t *)mask,
2879                  (const uint8_t *)&rte_flow_item_nvgre_mask,
2880                  sizeof(struct rte_flow_item_nvgre),
2881                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2882         if (ret < 0)
2883                 return ret;
2884         return 0;
2885 }
2886
2887 /**
2888  * Validate eCPRI item.
2889  *
2890  * @param[in] item
2891  *   Item specification.
2892  * @param[in] item_flags
2893  *   Bit-fields that holds the items detected until now.
2894  * @param[in] last_item
2895  *   Previous validated item in the pattern items.
2896  * @param[in] ether_type
2897  *   Type in the ethernet layer header (including dot1q).
2898  * @param[in] acc_mask
2899  *   Acceptable mask, if NULL default internal default mask
2900  *   will be used to check whether item fields are supported.
2901  * @param[out] error
2902  *   Pointer to error structure.
2903  *
2904  * @return
2905  *   0 on success, a negative errno value otherwise and rte_errno is set.
2906  */
2907 int
2908 mlx5_flow_validate_item_ecpri(const struct rte_flow_item *item,
2909                               uint64_t item_flags,
2910                               uint64_t last_item,
2911                               uint16_t ether_type,
2912                               const struct rte_flow_item_ecpri *acc_mask,
2913                               struct rte_flow_error *error)
2914 {
2915         const struct rte_flow_item_ecpri *mask = item->mask;
2916         const struct rte_flow_item_ecpri nic_mask = {
2917                 .hdr = {
2918                         .common = {
2919                                 .u32 =
2920                                 RTE_BE32(((const struct rte_ecpri_common_hdr) {
2921                                         .type = 0xFF,
2922                                         }).u32),
2923                         },
2924                         .dummy[0] = 0xFFFFFFFF,
2925                 },
2926         };
2927         const uint64_t outer_l2_vlan = (MLX5_FLOW_LAYER_OUTER_L2 |
2928                                         MLX5_FLOW_LAYER_OUTER_VLAN);
2929         struct rte_flow_item_ecpri mask_lo;
2930
2931         if (!(last_item & outer_l2_vlan) &&
2932             last_item != MLX5_FLOW_LAYER_OUTER_L4_UDP)
2933                 return rte_flow_error_set(error, EINVAL,
2934                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2935                                           "eCPRI can only follow L2/VLAN layer or UDP layer");
2936         if ((last_item & outer_l2_vlan) && ether_type &&
2937             ether_type != RTE_ETHER_TYPE_ECPRI)
2938                 return rte_flow_error_set(error, EINVAL,
2939                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2940                                           "eCPRI cannot follow L2/VLAN layer which ether type is not 0xAEFE");
2941         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2942                 return rte_flow_error_set(error, EINVAL,
2943                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2944                                           "eCPRI with tunnel is not supported right now");
2945         if (item_flags & MLX5_FLOW_LAYER_OUTER_L3)
2946                 return rte_flow_error_set(error, ENOTSUP,
2947                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2948                                           "multiple L3 layers not supported");
2949         else if (item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP)
2950                 return rte_flow_error_set(error, EINVAL,
2951                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2952                                           "eCPRI cannot coexist with a TCP layer");
2953         /* In specification, eCPRI could be over UDP layer. */
2954         else if (item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)
2955                 return rte_flow_error_set(error, EINVAL,
2956                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2957                                           "eCPRI over UDP layer is not yet supported right now");
2958         /* Mask for type field in common header could be zero. */
2959         if (!mask)
2960                 mask = &rte_flow_item_ecpri_mask;
2961         mask_lo.hdr.common.u32 = rte_be_to_cpu_32(mask->hdr.common.u32);
2962         /* Input mask is in big-endian format. */
2963         if (mask_lo.hdr.common.type != 0 && mask_lo.hdr.common.type != 0xff)
2964                 return rte_flow_error_set(error, EINVAL,
2965                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
2966                                           "partial mask is not supported for protocol");
2967         else if (mask_lo.hdr.common.type == 0 && mask->hdr.dummy[0] != 0)
2968                 return rte_flow_error_set(error, EINVAL,
2969                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
2970                                           "message header mask must be after a type mask");
2971         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2972                                          acc_mask ? (const uint8_t *)acc_mask
2973                                                   : (const uint8_t *)&nic_mask,
2974                                          sizeof(struct rte_flow_item_ecpri),
2975                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2976 }
2977
2978 /**
2979  * Release resource related QUEUE/RSS action split.
2980  *
2981  * @param dev
2982  *   Pointer to Ethernet device.
2983  * @param flow
2984  *   Flow to release id's from.
2985  */
2986 static void
2987 flow_mreg_split_qrss_release(struct rte_eth_dev *dev,
2988                              struct rte_flow *flow)
2989 {
2990         struct mlx5_priv *priv = dev->data->dev_private;
2991         uint32_t handle_idx;
2992         struct mlx5_flow_handle *dev_handle;
2993
2994         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
2995                        handle_idx, dev_handle, next)
2996                 if (dev_handle->split_flow_id)
2997                         mlx5_ipool_free(priv->sh->ipool
2998                                         [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
2999                                         dev_handle->split_flow_id);
3000 }
3001
3002 static int
3003 flow_null_validate(struct rte_eth_dev *dev __rte_unused,
3004                    const struct rte_flow_attr *attr __rte_unused,
3005                    const struct rte_flow_item items[] __rte_unused,
3006                    const struct rte_flow_action actions[] __rte_unused,
3007                    bool external __rte_unused,
3008                    int hairpin __rte_unused,
3009                    struct rte_flow_error *error)
3010 {
3011         return rte_flow_error_set(error, ENOTSUP,
3012                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
3013 }
3014
3015 static struct mlx5_flow *
3016 flow_null_prepare(struct rte_eth_dev *dev __rte_unused,
3017                   const struct rte_flow_attr *attr __rte_unused,
3018                   const struct rte_flow_item items[] __rte_unused,
3019                   const struct rte_flow_action actions[] __rte_unused,
3020                   struct rte_flow_error *error)
3021 {
3022         rte_flow_error_set(error, ENOTSUP,
3023                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
3024         return NULL;
3025 }
3026
3027 static int
3028 flow_null_translate(struct rte_eth_dev *dev __rte_unused,
3029                     struct mlx5_flow *dev_flow __rte_unused,
3030                     const struct rte_flow_attr *attr __rte_unused,
3031                     const struct rte_flow_item items[] __rte_unused,
3032                     const struct rte_flow_action actions[] __rte_unused,
3033                     struct rte_flow_error *error)
3034 {
3035         return rte_flow_error_set(error, ENOTSUP,
3036                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
3037 }
3038
3039 static int
3040 flow_null_apply(struct rte_eth_dev *dev __rte_unused,
3041                 struct rte_flow *flow __rte_unused,
3042                 struct rte_flow_error *error)
3043 {
3044         return rte_flow_error_set(error, ENOTSUP,
3045                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
3046 }
3047
3048 static void
3049 flow_null_remove(struct rte_eth_dev *dev __rte_unused,
3050                  struct rte_flow *flow __rte_unused)
3051 {
3052 }
3053
3054 static void
3055 flow_null_destroy(struct rte_eth_dev *dev __rte_unused,
3056                   struct rte_flow *flow __rte_unused)
3057 {
3058 }
3059
3060 static int
3061 flow_null_query(struct rte_eth_dev *dev __rte_unused,
3062                 struct rte_flow *flow __rte_unused,
3063                 const struct rte_flow_action *actions __rte_unused,
3064                 void *data __rte_unused,
3065                 struct rte_flow_error *error)
3066 {
3067         return rte_flow_error_set(error, ENOTSUP,
3068                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
3069 }
3070
3071 static int
3072 flow_null_sync_domain(struct rte_eth_dev *dev __rte_unused,
3073                       uint32_t domains __rte_unused,
3074                       uint32_t flags __rte_unused)
3075 {
3076         return 0;
3077 }
3078
3079 /* Void driver to protect from null pointer reference. */
3080 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops = {
3081         .validate = flow_null_validate,
3082         .prepare = flow_null_prepare,
3083         .translate = flow_null_translate,
3084         .apply = flow_null_apply,
3085         .remove = flow_null_remove,
3086         .destroy = flow_null_destroy,
3087         .query = flow_null_query,
3088         .sync_domain = flow_null_sync_domain,
3089 };
3090
3091 /**
3092  * Select flow driver type according to flow attributes and device
3093  * configuration.
3094  *
3095  * @param[in] dev
3096  *   Pointer to the dev structure.
3097  * @param[in] attr
3098  *   Pointer to the flow attributes.
3099  *
3100  * @return
3101  *   flow driver type, MLX5_FLOW_TYPE_MAX otherwise.
3102  */
3103 static enum mlx5_flow_drv_type
3104 flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr)
3105 {
3106         struct mlx5_priv *priv = dev->data->dev_private;
3107         /* The OS can determine first a specific flow type (DV, VERBS) */
3108         enum mlx5_flow_drv_type type = mlx5_flow_os_get_type();
3109
3110         if (type != MLX5_FLOW_TYPE_MAX)
3111                 return type;
3112         /* If no OS specific type - continue with DV/VERBS selection */
3113         if (attr->transfer && priv->config.dv_esw_en)
3114                 type = MLX5_FLOW_TYPE_DV;
3115         if (!attr->transfer)
3116                 type = priv->config.dv_flow_en ? MLX5_FLOW_TYPE_DV :
3117                                                  MLX5_FLOW_TYPE_VERBS;
3118         return type;
3119 }
3120
3121 #define flow_get_drv_ops(type) flow_drv_ops[type]
3122
3123 /**
3124  * Flow driver validation API. This abstracts calling driver specific functions.
3125  * The type of flow driver is determined according to flow attributes.
3126  *
3127  * @param[in] dev
3128  *   Pointer to the dev structure.
3129  * @param[in] attr
3130  *   Pointer to the flow attributes.
3131  * @param[in] items
3132  *   Pointer to the list of items.
3133  * @param[in] actions
3134  *   Pointer to the list of actions.
3135  * @param[in] external
3136  *   This flow rule is created by request external to PMD.
3137  * @param[in] hairpin
3138  *   Number of hairpin TX actions, 0 means classic flow.
3139  * @param[out] error
3140  *   Pointer to the error structure.
3141  *
3142  * @return
3143  *   0 on success, a negative errno value otherwise and rte_errno is set.
3144  */
3145 static inline int
3146 flow_drv_validate(struct rte_eth_dev *dev,
3147                   const struct rte_flow_attr *attr,
3148                   const struct rte_flow_item items[],
3149                   const struct rte_flow_action actions[],
3150                   bool external, int hairpin, struct rte_flow_error *error)
3151 {
3152         const struct mlx5_flow_driver_ops *fops;
3153         enum mlx5_flow_drv_type type = flow_get_drv_type(dev, attr);
3154
3155         fops = flow_get_drv_ops(type);
3156         return fops->validate(dev, attr, items, actions, external,
3157                               hairpin, error);
3158 }
3159
3160 /**
3161  * Flow driver preparation API. This abstracts calling driver specific
3162  * functions. Parent flow (rte_flow) should have driver type (drv_type). It
3163  * calculates the size of memory required for device flow, allocates the memory,
3164  * initializes the device flow and returns the pointer.
3165  *
3166  * @note
3167  *   This function initializes device flow structure such as dv or verbs in
3168  *   struct mlx5_flow. However, it is caller's responsibility to initialize the
3169  *   rest. For example, adding returning device flow to flow->dev_flow list and
3170  *   setting backward reference to the flow should be done out of this function.
3171  *   layers field is not filled either.
3172  *
3173  * @param[in] dev
3174  *   Pointer to the dev structure.
3175  * @param[in] attr
3176  *   Pointer to the flow attributes.
3177  * @param[in] items
3178  *   Pointer to the list of items.
3179  * @param[in] actions
3180  *   Pointer to the list of actions.
3181  * @param[in] flow_idx
3182  *   This memory pool index to the flow.
3183  * @param[out] error
3184  *   Pointer to the error structure.
3185  *
3186  * @return
3187  *   Pointer to device flow on success, otherwise NULL and rte_errno is set.
3188  */
3189 static inline struct mlx5_flow *
3190 flow_drv_prepare(struct rte_eth_dev *dev,
3191                  const struct rte_flow *flow,
3192                  const struct rte_flow_attr *attr,
3193                  const struct rte_flow_item items[],
3194                  const struct rte_flow_action actions[],
3195                  uint32_t flow_idx,
3196                  struct rte_flow_error *error)
3197 {
3198         const struct mlx5_flow_driver_ops *fops;
3199         enum mlx5_flow_drv_type type = flow->drv_type;
3200         struct mlx5_flow *mlx5_flow = NULL;
3201
3202         MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
3203         fops = flow_get_drv_ops(type);
3204         mlx5_flow = fops->prepare(dev, attr, items, actions, error);
3205         if (mlx5_flow)
3206                 mlx5_flow->flow_idx = flow_idx;
3207         return mlx5_flow;
3208 }
3209
3210 /**
3211  * Flow driver translation API. This abstracts calling driver specific
3212  * functions. Parent flow (rte_flow) should have driver type (drv_type). It
3213  * translates a generic flow into a driver flow. flow_drv_prepare() must
3214  * precede.
3215  *
3216  * @note
3217  *   dev_flow->layers could be filled as a result of parsing during translation
3218  *   if needed by flow_drv_apply(). dev_flow->flow->actions can also be filled
3219  *   if necessary. As a flow can have multiple dev_flows by RSS flow expansion,
3220  *   flow->actions could be overwritten even though all the expanded dev_flows
3221  *   have the same actions.
3222  *
3223  * @param[in] dev
3224  *   Pointer to the rte dev structure.
3225  * @param[in, out] dev_flow
3226  *   Pointer to the mlx5 flow.
3227  * @param[in] attr
3228  *   Pointer to the flow attributes.
3229  * @param[in] items
3230  *   Pointer to the list of items.
3231  * @param[in] actions
3232  *   Pointer to the list of actions.
3233  * @param[out] error
3234  *   Pointer to the error structure.
3235  *
3236  * @return
3237  *   0 on success, a negative errno value otherwise and rte_errno is set.
3238  */
3239 static inline int
3240 flow_drv_translate(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow,
3241                    const struct rte_flow_attr *attr,
3242                    const struct rte_flow_item items[],
3243                    const struct rte_flow_action actions[],
3244                    struct rte_flow_error *error)
3245 {
3246         const struct mlx5_flow_driver_ops *fops;
3247         enum mlx5_flow_drv_type type = dev_flow->flow->drv_type;
3248
3249         MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
3250         fops = flow_get_drv_ops(type);
3251         return fops->translate(dev, dev_flow, attr, items, actions, error);
3252 }
3253
3254 /**
3255  * Flow driver apply API. This abstracts calling driver specific functions.
3256  * Parent flow (rte_flow) should have driver type (drv_type). It applies
3257  * translated driver flows on to device. flow_drv_translate() must precede.
3258  *
3259  * @param[in] dev
3260  *   Pointer to Ethernet device structure.
3261  * @param[in, out] flow
3262  *   Pointer to flow structure.
3263  * @param[out] error
3264  *   Pointer to error structure.
3265  *
3266  * @return
3267  *   0 on success, a negative errno value otherwise and rte_errno is set.
3268  */
3269 static inline int
3270 flow_drv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
3271                struct rte_flow_error *error)
3272 {
3273         const struct mlx5_flow_driver_ops *fops;
3274         enum mlx5_flow_drv_type type = flow->drv_type;
3275
3276         MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
3277         fops = flow_get_drv_ops(type);
3278         return fops->apply(dev, flow, error);
3279 }
3280
3281 /**
3282  * Flow driver destroy API. This abstracts calling driver specific functions.
3283  * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow
3284  * on device and releases resources of the flow.
3285  *
3286  * @param[in] dev
3287  *   Pointer to Ethernet device.
3288  * @param[in, out] flow
3289  *   Pointer to flow structure.
3290  */
3291 static inline void
3292 flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
3293 {
3294         const struct mlx5_flow_driver_ops *fops;
3295         enum mlx5_flow_drv_type type = flow->drv_type;
3296
3297         flow_mreg_split_qrss_release(dev, flow);
3298         MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
3299         fops = flow_get_drv_ops(type);
3300         fops->destroy(dev, flow);
3301 }
3302
3303 /**
3304  * Get RSS action from the action list.
3305  *
3306  * @param[in] actions
3307  *   Pointer to the list of actions.
3308  *
3309  * @return
3310  *   Pointer to the RSS action if exist, else return NULL.
3311  */
3312 static const struct rte_flow_action_rss*
3313 flow_get_rss_action(const struct rte_flow_action actions[])
3314 {
3315         const struct rte_flow_action_rss *rss = NULL;
3316
3317         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3318                 switch (actions->type) {
3319                 case RTE_FLOW_ACTION_TYPE_RSS:
3320                         rss = actions->conf;
3321                         break;
3322                 case RTE_FLOW_ACTION_TYPE_SAMPLE:
3323                 {
3324                         const struct rte_flow_action_sample *sample =
3325                                                                 actions->conf;
3326                         const struct rte_flow_action *act = sample->actions;
3327                         for (; act->type != RTE_FLOW_ACTION_TYPE_END; act++)
3328                                 if (act->type == RTE_FLOW_ACTION_TYPE_RSS)
3329                                         rss = act->conf;
3330                         break;
3331                 }
3332                 default:
3333                         break;
3334                 }
3335         }
3336         return rss;
3337 }
3338
3339 /**
3340  * Get ASO age action by index.
3341  *
3342  * @param[in] dev
3343  *   Pointer to the Ethernet device structure.
3344  * @param[in] age_idx
3345  *   Index to the ASO age action.
3346  *
3347  * @return
3348  *   The specified ASO age action.
3349  */
3350 struct mlx5_aso_age_action*
3351 flow_aso_age_get_by_idx(struct rte_eth_dev *dev, uint32_t age_idx)
3352 {
3353         uint16_t pool_idx = age_idx & UINT16_MAX;
3354         uint16_t offset = (age_idx >> 16) & UINT16_MAX;
3355         struct mlx5_priv *priv = dev->data->dev_private;
3356         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
3357         struct mlx5_aso_age_pool *pool = mng->pools[pool_idx];
3358
3359         return &pool->actions[offset - 1];
3360 }
3361
3362 /* maps shared action to translated non shared in some actions array */
3363 struct mlx5_translated_shared_action {
3364         struct rte_flow_shared_action *action; /**< Shared action */
3365         int index; /**< Index in related array of rte_flow_action */
3366 };
3367
3368 /**
3369  * Translates actions of type RTE_FLOW_ACTION_TYPE_SHARED to related
3370  * non shared action if translation possible.
3371  * This functionality used to run same execution path for both shared & non
3372  * shared actions on flow create. All necessary preparations for shared
3373  * action handling should be preformed on *shared* actions list returned
3374  * from this call.
3375  *
3376  * @param[in] dev
3377  *   Pointer to Ethernet device.
3378  * @param[in] actions
3379  *   List of actions to translate.
3380  * @param[out] shared
3381  *   List to store translated shared actions.
3382  * @param[in, out] shared_n
3383  *   Size of *shared* array. On return should be updated with number of shared
3384  *   actions retrieved from the *actions* list.
3385  * @param[out] translated_actions
3386  *   List of actions where all shared actions were translated to non shared
3387  *   if possible. NULL if no translation took place.
3388  * @param[out] error
3389  *   Pointer to the error structure.
3390  *
3391  * @return
3392  *   0 on success, a negative errno value otherwise and rte_errno is set.
3393  */
3394 static int
3395 flow_shared_actions_translate(struct rte_eth_dev *dev,
3396                               const struct rte_flow_action actions[],
3397                               struct mlx5_translated_shared_action *shared,
3398                               int *shared_n,
3399                               struct rte_flow_action **translated_actions,
3400                               struct rte_flow_error *error)
3401 {
3402         struct mlx5_priv *priv = dev->data->dev_private;
3403         struct rte_flow_action *translated = NULL;
3404         size_t actions_size;
3405         int n;
3406         int copied_n = 0;
3407         struct mlx5_translated_shared_action *shared_end = NULL;
3408
3409         for (n = 0; actions[n].type != RTE_FLOW_ACTION_TYPE_END; n++) {
3410                 if (actions[n].type != RTE_FLOW_ACTION_TYPE_SHARED)
3411                         continue;
3412                 if (copied_n == *shared_n) {
3413                         return rte_flow_error_set
3414                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_NUM,
3415                                  NULL, "too many shared actions");
3416                 }
3417                 rte_memcpy(&shared[copied_n].action, &actions[n].conf,
3418                            sizeof(actions[n].conf));
3419                 shared[copied_n].index = n;
3420                 copied_n++;
3421         }
3422         n++;
3423         *shared_n = copied_n;
3424         if (!copied_n)
3425                 return 0;
3426         actions_size = sizeof(struct rte_flow_action) * n;
3427         translated = mlx5_malloc(MLX5_MEM_ZERO, actions_size, 0, SOCKET_ID_ANY);
3428         if (!translated) {
3429                 rte_errno = ENOMEM;
3430                 return -ENOMEM;
3431         }
3432         memcpy(translated, actions, actions_size);
3433         for (shared_end = shared + copied_n; shared < shared_end; shared++) {
3434                 struct mlx5_shared_action_rss *shared_rss;
3435                 uint32_t act_idx = (uint32_t)(uintptr_t)shared->action;
3436                 uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET;
3437                 uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET)
3438                                                                            - 1);
3439
3440                 switch (type) {
3441                 case MLX5_SHARED_ACTION_TYPE_RSS:
3442                         shared_rss = mlx5_ipool_get
3443                           (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
3444                         translated[shared->index].type =
3445                                 RTE_FLOW_ACTION_TYPE_RSS;
3446                         translated[shared->index].conf =
3447                                 &shared_rss->origin;
3448                         break;
3449                 case MLX5_SHARED_ACTION_TYPE_AGE:
3450                         if (priv->sh->flow_hit_aso_en) {
3451                                 translated[shared->index].type =
3452                                         (enum rte_flow_action_type)
3453                                         MLX5_RTE_FLOW_ACTION_TYPE_AGE;
3454                                 translated[shared->index].conf =
3455                                                          (void *)(uintptr_t)idx;
3456                                 break;
3457                         }
3458                         /* Fall-through */
3459                 default:
3460                         mlx5_free(translated);
3461                         return rte_flow_error_set
3462                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
3463                                  NULL, "invalid shared action type");
3464                 }
3465         }
3466         *translated_actions = translated;
3467         return 0;
3468 }
3469
3470 /**
3471  * Get Shared RSS action from the action list.
3472  *
3473  * @param[in] dev
3474  *   Pointer to Ethernet device.
3475  * @param[in] shared
3476  *   Pointer to the list of actions.
3477  * @param[in] shared_n
3478  *   Actions list length.
3479  *
3480  * @return
3481  *   The MLX5 RSS action ID if exists, otherwise return 0.
3482  */
3483 static uint32_t
3484 flow_get_shared_rss_action(struct rte_eth_dev *dev,
3485                            struct mlx5_translated_shared_action *shared,
3486                            int shared_n)
3487 {
3488         struct mlx5_translated_shared_action *shared_end;
3489         struct mlx5_priv *priv = dev->data->dev_private;
3490         struct mlx5_shared_action_rss *shared_rss;
3491
3492
3493         for (shared_end = shared + shared_n; shared < shared_end; shared++) {
3494                 uint32_t act_idx = (uint32_t)(uintptr_t)shared->action;
3495                 uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET;
3496                 uint32_t idx = act_idx &
3497                                    ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1);
3498                 switch (type) {
3499                 case MLX5_SHARED_ACTION_TYPE_RSS:
3500                         shared_rss = mlx5_ipool_get
3501                                 (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
3502                                                                            idx);
3503                         __atomic_add_fetch(&shared_rss->refcnt, 1,
3504                                            __ATOMIC_RELAXED);
3505                         return idx;
3506                 default:
3507                         break;
3508                 }
3509         }
3510         return 0;
3511 }
3512
3513 static unsigned int
3514 find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level)
3515 {
3516         const struct rte_flow_item *item;
3517         unsigned int has_vlan = 0;
3518
3519         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3520                 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
3521                         has_vlan = 1;
3522                         break;
3523                 }
3524         }
3525         if (has_vlan)
3526                 return rss_level < 2 ? MLX5_EXPANSION_ROOT_ETH_VLAN :
3527                                        MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN;
3528         return rss_level < 2 ? MLX5_EXPANSION_ROOT :
3529                                MLX5_EXPANSION_ROOT_OUTER;
3530 }
3531
3532 /**
3533  *  Get layer flags from the prefix flow.
3534  *
3535  *  Some flows may be split to several subflows, the prefix subflow gets the
3536  *  match items and the suffix sub flow gets the actions.
3537  *  Some actions need the user defined match item flags to get the detail for
3538  *  the action.
3539  *  This function helps the suffix flow to get the item layer flags from prefix
3540  *  subflow.
3541  *
3542  * @param[in] dev_flow
3543  *   Pointer the created preifx subflow.
3544  *
3545  * @return
3546  *   The layers get from prefix subflow.
3547  */
3548 static inline uint64_t
3549 flow_get_prefix_layer_flags(struct mlx5_flow *dev_flow)
3550 {
3551         uint64_t layers = 0;
3552
3553         /*
3554          * Layers bits could be localization, but usually the compiler will
3555          * help to do the optimization work for source code.
3556          * If no decap actions, use the layers directly.
3557          */
3558         if (!(dev_flow->act_flags & MLX5_FLOW_ACTION_DECAP))
3559                 return dev_flow->handle->layers;
3560         /* Convert L3 layers with decap action. */
3561         if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV4)
3562                 layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3563         else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV6)
3564                 layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3565         /* Convert L4 layers with decap action.  */
3566         if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_TCP)
3567                 layers |= MLX5_FLOW_LAYER_OUTER_L4_TCP;
3568         else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_UDP)
3569                 layers |= MLX5_FLOW_LAYER_OUTER_L4_UDP;
3570         return layers;
3571 }
3572
3573 /**
3574  * Get metadata split action information.
3575  *
3576  * @param[in] actions
3577  *   Pointer to the list of actions.
3578  * @param[out] qrss
3579  *   Pointer to the return pointer.
3580  * @param[out] qrss_type
3581  *   Pointer to the action type to return. RTE_FLOW_ACTION_TYPE_END is returned
3582  *   if no QUEUE/RSS is found.
3583  * @param[out] encap_idx
3584  *   Pointer to the index of the encap action if exists, otherwise the last
3585  *   action index.
3586  *
3587  * @return
3588  *   Total number of actions.
3589  */
3590 static int
3591 flow_parse_metadata_split_actions_info(const struct rte_flow_action actions[],
3592                                        const struct rte_flow_action **qrss,
3593                                        int *encap_idx)
3594 {
3595         const struct rte_flow_action_raw_encap *raw_encap;
3596         int actions_n = 0;
3597         int raw_decap_idx = -1;
3598
3599         *encap_idx = -1;
3600         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3601                 switch (actions->type) {
3602                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3603                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3604                         *encap_idx = actions_n;
3605                         break;
3606                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
3607                         raw_decap_idx = actions_n;
3608                         break;
3609                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3610                         raw_encap = actions->conf;
3611                         if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
3612                                 *encap_idx = raw_decap_idx != -1 ?
3613                                                       raw_decap_idx : actions_n;
3614                         break;
3615                 case RTE_FLOW_ACTION_TYPE_QUEUE:
3616                 case RTE_FLOW_ACTION_TYPE_RSS:
3617                         *qrss = actions;
3618                         break;
3619                 default:
3620                         break;
3621                 }
3622                 actions_n++;
3623         }
3624         if (*encap_idx == -1)
3625                 *encap_idx = actions_n;
3626         /* Count RTE_FLOW_ACTION_TYPE_END. */
3627         return actions_n + 1;
3628 }
3629
3630 /**
3631  * Check meter action from the action list.
3632  *
3633  * @param[in] actions
3634  *   Pointer to the list of actions.
3635  * @param[out] mtr
3636  *   Pointer to the meter exist flag.
3637  *
3638  * @return
3639  *   Total number of actions.
3640  */
3641 static int
3642 flow_check_meter_action(const struct rte_flow_action actions[], uint32_t *mtr)
3643 {
3644         int actions_n = 0;
3645
3646         MLX5_ASSERT(mtr);
3647         *mtr = 0;
3648         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3649                 switch (actions->type) {
3650                 case RTE_FLOW_ACTION_TYPE_METER:
3651                         *mtr = 1;
3652                         break;
3653                 default:
3654                         break;
3655                 }
3656                 actions_n++;
3657         }
3658         /* Count RTE_FLOW_ACTION_TYPE_END. */
3659         return actions_n + 1;
3660 }
3661
3662 /**
3663  * Check if the flow should be split due to hairpin.
3664  * The reason for the split is that in current HW we can't
3665  * support encap and push-vlan on Rx, so if a flow contains
3666  * these actions we move it to Tx.
3667  *
3668  * @param dev
3669  *   Pointer to Ethernet device.
3670  * @param[in] attr
3671  *   Flow rule attributes.
3672  * @param[in] actions
3673  *   Associated actions (list terminated by the END action).
3674  *
3675  * @return
3676  *   > 0 the number of actions and the flow should be split,
3677  *   0 when no split required.
3678  */
3679 static int
3680 flow_check_hairpin_split(struct rte_eth_dev *dev,
3681                          const struct rte_flow_attr *attr,
3682                          const struct rte_flow_action actions[])
3683 {
3684         int queue_action = 0;
3685         int action_n = 0;
3686         int split = 0;
3687         const struct rte_flow_action_queue *queue;
3688         const struct rte_flow_action_rss *rss;
3689         const struct rte_flow_action_raw_encap *raw_encap;
3690         const struct rte_eth_hairpin_conf *conf;
3691
3692         if (!attr->ingress)
3693                 return 0;
3694         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3695                 switch (actions->type) {
3696                 case RTE_FLOW_ACTION_TYPE_QUEUE:
3697                         queue = actions->conf;
3698                         if (queue == NULL)
3699                                 return 0;
3700                         conf = mlx5_rxq_get_hairpin_conf(dev, queue->index);
3701                         if (conf == NULL || conf->tx_explicit != 0)
3702                                 return 0;
3703                         queue_action = 1;
3704                         action_n++;
3705                         break;
3706                 case RTE_FLOW_ACTION_TYPE_RSS:
3707                         rss = actions->conf;
3708                         if (rss == NULL || rss->queue_num == 0)
3709                                 return 0;
3710                         conf = mlx5_rxq_get_hairpin_conf(dev, rss->queue[0]);
3711                         if (conf == NULL || conf->tx_explicit != 0)
3712                                 return 0;
3713                         queue_action = 1;
3714                         action_n++;
3715                         break;
3716                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3717                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3718                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
3719                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
3720                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
3721                         split++;
3722                         action_n++;
3723                         break;
3724                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3725                         raw_encap = actions->conf;
3726                         if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
3727                                 split++;
3728                         action_n++;
3729                         break;
3730                 default:
3731                         action_n++;
3732                         break;
3733                 }
3734         }
3735         if (split && queue_action)
3736                 return action_n;
3737         return 0;
3738 }
3739
3740 /* Declare flow create/destroy prototype in advance. */
3741 static uint32_t
3742 flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
3743                  const struct rte_flow_attr *attr,
3744                  const struct rte_flow_item items[],
3745                  const struct rte_flow_action actions[],
3746                  bool external, struct rte_flow_error *error);
3747
3748 static void
3749 flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,
3750                   uint32_t flow_idx);
3751
3752 int
3753 flow_dv_mreg_match_cb(struct mlx5_hlist *list __rte_unused,
3754                       struct mlx5_hlist_entry *entry,
3755                       uint64_t key, void *cb_ctx __rte_unused)
3756 {
3757         struct mlx5_flow_mreg_copy_resource *mcp_res =
3758                 container_of(entry, typeof(*mcp_res), hlist_ent);
3759
3760         return mcp_res->mark_id != key;
3761 }
3762
3763 struct mlx5_hlist_entry *
3764 flow_dv_mreg_create_cb(struct mlx5_hlist *list, uint64_t key,
3765                        void *cb_ctx)
3766 {
3767         struct rte_eth_dev *dev = list->ctx;
3768         struct mlx5_priv *priv = dev->data->dev_private;
3769         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3770         struct mlx5_flow_mreg_copy_resource *mcp_res;
3771         struct rte_flow_error *error = ctx->error;
3772         uint32_t idx = 0;
3773         int ret;
3774         uint32_t mark_id = key;
3775         struct rte_flow_attr attr = {
3776                 .group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
3777                 .ingress = 1,
3778         };
3779         struct mlx5_rte_flow_item_tag tag_spec = {
3780                 .data = mark_id,
3781         };
3782         struct rte_flow_item items[] = {
3783                 [1] = { .type = RTE_FLOW_ITEM_TYPE_END, },
3784         };
3785         struct rte_flow_action_mark ftag = {
3786                 .id = mark_id,
3787         };
3788         struct mlx5_flow_action_copy_mreg cp_mreg = {
3789                 .dst = REG_B,
3790                 .src = REG_NON,
3791         };
3792         struct rte_flow_action_jump jump = {
3793                 .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP,
3794         };
3795         struct rte_flow_action actions[] = {
3796                 [3] = { .type = RTE_FLOW_ACTION_TYPE_END, },
3797         };
3798
3799         /* Fill the register fileds in the flow. */
3800         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3801         if (ret < 0)
3802                 return NULL;
3803         tag_spec.id = ret;
3804         ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error);
3805         if (ret < 0)
3806                 return NULL;
3807         cp_mreg.src = ret;
3808         /* Provide the full width of FLAG specific value. */
3809         if (mark_id == (priv->sh->dv_regc0_mask & MLX5_FLOW_MARK_DEFAULT))
3810                 tag_spec.data = MLX5_FLOW_MARK_DEFAULT;
3811         /* Build a new flow. */
3812         if (mark_id != MLX5_DEFAULT_COPY_ID) {
3813                 items[0] = (struct rte_flow_item){
3814                         .type = (enum rte_flow_item_type)
3815                                 MLX5_RTE_FLOW_ITEM_TYPE_TAG,
3816                         .spec = &tag_spec,
3817                 };
3818                 items[1] = (struct rte_flow_item){
3819                         .type = RTE_FLOW_ITEM_TYPE_END,
3820                 };
3821                 actions[0] = (struct rte_flow_action){
3822                         .type = (enum rte_flow_action_type)
3823                                 MLX5_RTE_FLOW_ACTION_TYPE_MARK,
3824                         .conf = &ftag,
3825                 };
3826                 actions[1] = (struct rte_flow_action){
3827                         .type = (enum rte_flow_action_type)
3828                                 MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
3829                         .conf = &cp_mreg,
3830                 };
3831                 actions[2] = (struct rte_flow_action){
3832                         .type = RTE_FLOW_ACTION_TYPE_JUMP,
3833                         .conf = &jump,
3834                 };
3835                 actions[3] = (struct rte_flow_action){
3836                         .type = RTE_FLOW_ACTION_TYPE_END,
3837                 };
3838         } else {
3839                 /* Default rule, wildcard match. */
3840                 attr.priority = MLX5_FLOW_PRIO_RSVD;
3841                 items[0] = (struct rte_flow_item){
3842                         .type = RTE_FLOW_ITEM_TYPE_END,
3843                 };
3844                 actions[0] = (struct rte_flow_action){
3845                         .type = (enum rte_flow_action_type)
3846                                 MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
3847                         .conf = &cp_mreg,
3848                 };
3849                 actions[1] = (struct rte_flow_action){
3850                         .type = RTE_FLOW_ACTION_TYPE_JUMP,
3851                         .conf = &jump,
3852                 };
3853                 actions[2] = (struct rte_flow_action){
3854                         .type = RTE_FLOW_ACTION_TYPE_END,
3855                 };
3856         }
3857         /* Build a new entry. */
3858         mcp_res = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MCP], &idx);
3859         if (!mcp_res) {
3860                 rte_errno = ENOMEM;
3861                 return NULL;
3862         }
3863         mcp_res->idx = idx;
3864         mcp_res->mark_id = mark_id;
3865         /*
3866          * The copy Flows are not included in any list. There
3867          * ones are referenced from other Flows and can not
3868          * be applied, removed, deleted in ardbitrary order
3869          * by list traversing.
3870          */
3871         mcp_res->rix_flow = flow_list_create(dev, NULL, &attr, items,
3872                                          actions, false, error);
3873         if (!mcp_res->rix_flow) {
3874                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], idx);
3875                 return NULL;
3876         }
3877         return &mcp_res->hlist_ent;
3878 }
3879
3880 /**
3881  * Add a flow of copying flow metadata registers in RX_CP_TBL.
3882  *
3883  * As mark_id is unique, if there's already a registered flow for the mark_id,
3884  * return by increasing the reference counter of the resource. Otherwise, create
3885  * the resource (mcp_res) and flow.
3886  *
3887  * Flow looks like,
3888  *   - If ingress port is ANY and reg_c[1] is mark_id,
3889  *     flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL.
3890  *
3891  * For default flow (zero mark_id), flow is like,
3892  *   - If ingress port is ANY,
3893  *     reg_b := reg_c[0] and jump to RX_ACT_TBL.
3894  *
3895  * @param dev
3896  *   Pointer to Ethernet device.
3897  * @param mark_id
3898  *   ID of MARK action, zero means default flow for META.
3899  * @param[out] error
3900  *   Perform verbose error reporting if not NULL.
3901  *
3902  * @return
3903  *   Associated resource on success, NULL otherwise and rte_errno is set.
3904  */
3905 static struct mlx5_flow_mreg_copy_resource *
3906 flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id,
3907                           struct rte_flow_error *error)
3908 {
3909         struct mlx5_priv *priv = dev->data->dev_private;
3910         struct mlx5_hlist_entry *entry;
3911         struct mlx5_flow_cb_ctx ctx = {
3912                 .dev = dev,
3913                 .error = error,
3914         };
3915
3916         /* Check if already registered. */
3917         MLX5_ASSERT(priv->mreg_cp_tbl);
3918         entry = mlx5_hlist_register(priv->mreg_cp_tbl, mark_id, &ctx);
3919         if (!entry)
3920                 return NULL;
3921         return container_of(entry, struct mlx5_flow_mreg_copy_resource,
3922                             hlist_ent);
3923 }
3924
3925 void
3926 flow_dv_mreg_remove_cb(struct mlx5_hlist *list, struct mlx5_hlist_entry *entry)
3927 {
3928         struct mlx5_flow_mreg_copy_resource *mcp_res =
3929                 container_of(entry, typeof(*mcp_res), hlist_ent);
3930         struct rte_eth_dev *dev = list->ctx;
3931         struct mlx5_priv *priv = dev->data->dev_private;
3932
3933         MLX5_ASSERT(mcp_res->rix_flow);
3934         flow_list_destroy(dev, NULL, mcp_res->rix_flow);
3935         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx);
3936 }
3937
3938 /**
3939  * Release flow in RX_CP_TBL.
3940  *
3941  * @param dev
3942  *   Pointer to Ethernet device.
3943  * @flow
3944  *   Parent flow for wich copying is provided.
3945  */
3946 static void
3947 flow_mreg_del_copy_action(struct rte_eth_dev *dev,
3948                           struct rte_flow *flow)
3949 {
3950         struct mlx5_flow_mreg_copy_resource *mcp_res;
3951         struct mlx5_priv *priv = dev->data->dev_private;
3952
3953         if (!flow->rix_mreg_copy)
3954                 return;
3955         mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP],
3956                                  flow->rix_mreg_copy);
3957         if (!mcp_res || !priv->mreg_cp_tbl)
3958                 return;
3959         MLX5_ASSERT(mcp_res->rix_flow);
3960         mlx5_hlist_unregister(priv->mreg_cp_tbl, &mcp_res->hlist_ent);
3961         flow->rix_mreg_copy = 0;
3962 }
3963
3964 /**
3965  * Remove the default copy action from RX_CP_TBL.
3966  *
3967  * This functions is called in the mlx5_dev_start(). No thread safe
3968  * is guaranteed.
3969  *
3970  * @param dev
3971  *   Pointer to Ethernet device.
3972  */
3973 static void
3974 flow_mreg_del_default_copy_action(struct rte_eth_dev *dev)
3975 {
3976         struct mlx5_hlist_entry *entry;
3977         struct mlx5_priv *priv = dev->data->dev_private;
3978
3979         /* Check if default flow is registered. */
3980         if (!priv->mreg_cp_tbl)
3981                 return;
3982         entry = mlx5_hlist_lookup(priv->mreg_cp_tbl,
3983                                   MLX5_DEFAULT_COPY_ID, NULL);
3984         if (!entry)
3985                 return;
3986         mlx5_hlist_unregister(priv->mreg_cp_tbl, entry);
3987 }
3988
3989 /**
3990  * Add the default copy action in in RX_CP_TBL.
3991  *
3992  * This functions is called in the mlx5_dev_start(). No thread safe
3993  * is guaranteed.
3994  *
3995  * @param dev
3996  *   Pointer to Ethernet device.
3997  * @param[out] error
3998  *   Perform verbose error reporting if not NULL.
3999  *
4000  * @return
4001  *   0 for success, negative value otherwise and rte_errno is set.
4002  */
4003 static int
4004 flow_mreg_add_default_copy_action(struct rte_eth_dev *dev,
4005                                   struct rte_flow_error *error)
4006 {
4007         struct mlx5_priv *priv = dev->data->dev_private;
4008         struct mlx5_flow_mreg_copy_resource *mcp_res;
4009
4010         /* Check whether extensive metadata feature is engaged. */
4011         if (!priv->config.dv_flow_en ||
4012             priv->config.dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
4013             !mlx5_flow_ext_mreg_supported(dev) ||
4014             !priv->sh->dv_regc0_mask)
4015                 return 0;
4016         /*
4017          * Add default mreg copy flow may be called multiple time, but
4018          * only be called once in stop. Avoid register it twice.
4019          */
4020         if (mlx5_hlist_lookup(priv->mreg_cp_tbl, MLX5_DEFAULT_COPY_ID, NULL))
4021                 return 0;
4022         mcp_res = flow_mreg_add_copy_action(dev, MLX5_DEFAULT_COPY_ID, error);
4023         if (!mcp_res)
4024                 return -rte_errno;
4025         return 0;
4026 }
4027
4028 /**
4029  * Add a flow of copying flow metadata registers in RX_CP_TBL.
4030  *
4031  * All the flow having Q/RSS action should be split by
4032  * flow_mreg_split_qrss_prep() to pass by RX_CP_TBL. A flow in the RX_CP_TBL
4033  * performs the following,
4034  *   - CQE->flow_tag := reg_c[1] (MARK)
4035  *   - CQE->flow_table_metadata (reg_b) := reg_c[0] (META)
4036  * As CQE's flow_tag is not a register, it can't be simply copied from reg_c[1]
4037  * but there should be a flow per each MARK ID set by MARK action.
4038  *
4039  * For the aforementioned reason, if there's a MARK action in flow's action
4040  * list, a corresponding flow should be added to the RX_CP_TBL in order to copy
4041  * the MARK ID to CQE's flow_tag like,
4042  *   - If reg_c[1] is mark_id,
4043  *     flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL.
4044  *
4045  * For SET_META action which stores value in reg_c[0], as the destination is
4046  * also a flow metadata register (reg_b), adding a default flow is enough. Zero
4047  * MARK ID means the default flow. The default flow looks like,
4048  *   - For all flow, reg_b := reg_c[0] and jump to RX_ACT_TBL.
4049  *
4050  * @param dev
4051  *   Pointer to Ethernet device.
4052  * @param flow
4053  *   Pointer to flow structure.
4054  * @param[in] actions
4055  *   Pointer to the list of actions.
4056  * @param[out] error
4057  *   Perform verbose error reporting if not NULL.
4058  *
4059  * @return
4060  *   0 on success, negative value otherwise and rte_errno is set.
4061  */
4062 static int
4063 flow_mreg_update_copy_table(struct rte_eth_dev *dev,
4064                             struct rte_flow *flow,
4065                             const struct rte_flow_action *actions,
4066                             struct rte_flow_error *error)
4067 {
4068         struct mlx5_priv *priv = dev->data->dev_private;
4069         struct mlx5_dev_config *config = &priv->config;
4070         struct mlx5_flow_mreg_copy_resource *mcp_res;
4071         const struct rte_flow_action_mark *mark;
4072
4073         /* Check whether extensive metadata feature is engaged. */
4074         if (!config->dv_flow_en ||
4075             config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
4076             !mlx5_flow_ext_mreg_supported(dev) ||
4077             !priv->sh->dv_regc0_mask)
4078                 return 0;
4079         /* Find MARK action. */
4080         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
4081                 switch (actions->type) {
4082                 case RTE_FLOW_ACTION_TYPE_FLAG:
4083                         mcp_res = flow_mreg_add_copy_action
4084                                 (dev, MLX5_FLOW_MARK_DEFAULT, error);
4085                         if (!mcp_res)
4086                                 return -rte_errno;
4087                         flow->rix_mreg_copy = mcp_res->idx;
4088                         return 0;
4089                 case RTE_FLOW_ACTION_TYPE_MARK:
4090                         mark = (const struct rte_flow_action_mark *)
4091                                 actions->conf;
4092                         mcp_res =
4093                                 flow_mreg_add_copy_action(dev, mark->id, error);
4094                         if (!mcp_res)
4095                                 return -rte_errno;
4096                         flow->rix_mreg_copy = mcp_res->idx;
4097                         return 0;
4098                 default:
4099                         break;
4100                 }
4101         }
4102         return 0;
4103 }
4104
4105 #define MLX5_MAX_SPLIT_ACTIONS 24
4106 #define MLX5_MAX_SPLIT_ITEMS 24
4107
4108 /**
4109  * Split the hairpin flow.
4110  * Since HW can't support encap and push-vlan on Rx, we move these
4111  * actions to Tx.
4112  * If the count action is after the encap then we also
4113  * move the count action. in this case the count will also measure
4114  * the outer bytes.
4115  *
4116  * @param dev
4117  *   Pointer to Ethernet device.
4118  * @param[in] actions
4119  *   Associated actions (list terminated by the END action).
4120  * @param[out] actions_rx
4121  *   Rx flow actions.
4122  * @param[out] actions_tx
4123  *   Tx flow actions..
4124  * @param[out] pattern_tx
4125  *   The pattern items for the Tx flow.
4126  * @param[out] flow_id
4127  *   The flow ID connected to this flow.
4128  *
4129  * @return
4130  *   0 on success.
4131  */
4132 static int
4133 flow_hairpin_split(struct rte_eth_dev *dev,
4134                    const struct rte_flow_action actions[],
4135                    struct rte_flow_action actions_rx[],
4136                    struct rte_flow_action actions_tx[],
4137                    struct rte_flow_item pattern_tx[],
4138                    uint32_t flow_id)
4139 {
4140         const struct rte_flow_action_raw_encap *raw_encap;
4141         const struct rte_flow_action_raw_decap *raw_decap;
4142         struct mlx5_rte_flow_action_set_tag *set_tag;
4143         struct rte_flow_action *tag_action;
4144         struct mlx5_rte_flow_item_tag *tag_item;
4145         struct rte_flow_item *item;
4146         char *addr;
4147         int encap = 0;
4148
4149         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
4150                 switch (actions->type) {
4151                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
4152                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
4153                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
4154                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
4155                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
4156                         rte_memcpy(actions_tx, actions,
4157                                sizeof(struct rte_flow_action));
4158                         actions_tx++;
4159                         break;
4160                 case RTE_FLOW_ACTION_TYPE_COUNT:
4161                         if (encap) {
4162                                 rte_memcpy(actions_tx, actions,
4163                                            sizeof(struct rte_flow_action));
4164                                 actions_tx++;
4165                         } else {
4166                                 rte_memcpy(actions_rx, actions,
4167                                            sizeof(struct rte_flow_action));
4168                                 actions_rx++;
4169                         }
4170                         break;
4171                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
4172                         raw_encap = actions->conf;
4173                         if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE) {
4174                                 memcpy(actions_tx, actions,
4175                                        sizeof(struct rte_flow_action));
4176                                 actions_tx++;
4177                                 encap = 1;
4178                         } else {
4179                                 rte_memcpy(actions_rx, actions,
4180                                            sizeof(struct rte_flow_action));
4181                                 actions_rx++;
4182                         }
4183                         break;
4184                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
4185                         raw_decap = actions->conf;
4186                         if (raw_decap->size < MLX5_ENCAPSULATION_DECISION_SIZE) {
4187                                 memcpy(actions_tx, actions,
4188                                        sizeof(struct rte_flow_action));
4189                                 actions_tx++;
4190                         } else {
4191                                 rte_memcpy(actions_rx, actions,
4192                                            sizeof(struct rte_flow_action));
4193                                 actions_rx++;
4194                         }
4195                         break;
4196                 default:
4197                         rte_memcpy(actions_rx, actions,
4198                                    sizeof(struct rte_flow_action));
4199                         actions_rx++;
4200                         break;
4201                 }
4202         }
4203         /* Add set meta action and end action for the Rx flow. */
4204         tag_action = actions_rx;
4205         tag_action->type = (enum rte_flow_action_type)
4206                            MLX5_RTE_FLOW_ACTION_TYPE_TAG;
4207         actions_rx++;
4208         rte_memcpy(actions_rx, actions, sizeof(struct rte_flow_action));
4209         actions_rx++;
4210         set_tag = (void *)actions_rx;
4211         set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_RX, 0, NULL);
4212         MLX5_ASSERT(set_tag->id > REG_NON);
4213         set_tag->data = flow_id;
4214         tag_action->conf = set_tag;
4215         /* Create Tx item list. */
4216         rte_memcpy(actions_tx, actions, sizeof(struct rte_flow_action));
4217         addr = (void *)&pattern_tx[2];
4218         item = pattern_tx;
4219         item->type = (enum rte_flow_item_type)
4220                      MLX5_RTE_FLOW_ITEM_TYPE_TAG;
4221         tag_item = (void *)addr;
4222         tag_item->data = flow_id;
4223         tag_item->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_TX, 0, NULL);
4224         MLX5_ASSERT(set_tag->id > REG_NON);
4225         item->spec = tag_item;
4226         addr += sizeof(struct mlx5_rte_flow_item_tag);
4227         tag_item = (void *)addr;
4228         tag_item->data = UINT32_MAX;
4229         tag_item->id = UINT16_MAX;
4230         item->mask = tag_item;
4231         item->last = NULL;
4232         item++;
4233         item->type = RTE_FLOW_ITEM_TYPE_END;
4234         return 0;
4235 }
4236
4237 /**
4238  * The last stage of splitting chain, just creates the subflow
4239  * without any modification.
4240  *
4241  * @param[in] dev
4242  *   Pointer to Ethernet device.
4243  * @param[in] flow
4244  *   Parent flow structure pointer.
4245  * @param[in, out] sub_flow
4246  *   Pointer to return the created subflow, may be NULL.
4247  * @param[in] attr
4248  *   Flow rule attributes.
4249  * @param[in] items
4250  *   Pattern specification (list terminated by the END pattern item).
4251  * @param[in] actions
4252  *   Associated actions (list terminated by the END action).
4253  * @param[in] flow_split_info
4254  *   Pointer to flow split info structure.
4255  * @param[out] error
4256  *   Perform verbose error reporting if not NULL.
4257  * @return
4258  *   0 on success, negative value otherwise
4259  */
4260 static int
4261 flow_create_split_inner(struct rte_eth_dev *dev,
4262                         struct rte_flow *flow,
4263                         struct mlx5_flow **sub_flow,
4264                         const struct rte_flow_attr *attr,
4265                         const struct rte_flow_item items[],
4266                         const struct rte_flow_action actions[],
4267                         struct mlx5_flow_split_info *flow_split_info,
4268                         struct rte_flow_error *error)
4269 {
4270         struct mlx5_flow *dev_flow;
4271
4272         dev_flow = flow_drv_prepare(dev, flow, attr, items, actions,
4273                                     flow_split_info->flow_idx, error);
4274         if (!dev_flow)
4275                 return -rte_errno;
4276         dev_flow->flow = flow;
4277         dev_flow->external = flow_split_info->external;
4278         dev_flow->skip_scale = flow_split_info->skip_scale;
4279         /* Subflow object was created, we must include one in the list. */
4280         SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
4281                       dev_flow->handle, next);
4282         /*
4283          * If dev_flow is as one of the suffix flow, some actions in suffix
4284          * flow may need some user defined item layer flags, and pass the
4285          * Metadate rxq mark flag to suffix flow as well.
4286          */
4287         if (flow_split_info->prefix_layers)
4288                 dev_flow->handle->layers = flow_split_info->prefix_layers;
4289         if (flow_split_info->prefix_mark)
4290                 dev_flow->handle->mark = 1;
4291         if (sub_flow)
4292                 *sub_flow = dev_flow;
4293         return flow_drv_translate(dev, dev_flow, attr, items, actions, error);
4294 }
4295
4296 /**
4297  * Split the meter flow.
4298  *
4299  * As meter flow will split to three sub flow, other than meter
4300  * action, the other actions make sense to only meter accepts
4301  * the packet. If it need to be dropped, no other additional
4302  * actions should be take.
4303  *
4304  * One kind of special action which decapsulates the L3 tunnel
4305  * header will be in the prefix sub flow, as not to take the
4306  * L3 tunnel header into account.
4307  *
4308  * @param dev
4309  *   Pointer to Ethernet device.
4310  * @param[in] items
4311  *   Pattern specification (list terminated by the END pattern item).
4312  * @param[out] sfx_items
4313  *   Suffix flow match items (list terminated by the END pattern item).
4314  * @param[in] actions
4315  *   Associated actions (list terminated by the END action).
4316  * @param[out] actions_sfx
4317  *   Suffix flow actions.
4318  * @param[out] actions_pre
4319  *   Prefix flow actions.
4320  * @param[out] pattern_sfx
4321  *   The pattern items for the suffix flow.
4322  * @param[out] tag_sfx
4323  *   Pointer to suffix flow tag.
4324  *
4325  * @return
4326  *   0 on success.
4327  */
4328 static int
4329 flow_meter_split_prep(struct rte_eth_dev *dev,
4330                  const struct rte_flow_item items[],
4331                  struct rte_flow_item sfx_items[],
4332                  const struct rte_flow_action actions[],
4333                  struct rte_flow_action actions_sfx[],
4334                  struct rte_flow_action actions_pre[])
4335 {
4336         struct mlx5_priv *priv = dev->data->dev_private;
4337         struct rte_flow_action *tag_action = NULL;
4338         struct rte_flow_item *tag_item;
4339         struct mlx5_rte_flow_action_set_tag *set_tag;
4340         struct rte_flow_error error;
4341         const struct rte_flow_action_raw_encap *raw_encap;
4342         const struct rte_flow_action_raw_decap *raw_decap;
4343         struct mlx5_rte_flow_item_tag *tag_spec;
4344         struct mlx5_rte_flow_item_tag *tag_mask;
4345         uint32_t tag_id = 0;
4346         bool copy_vlan = false;
4347
4348         /* Prepare the actions for prefix and suffix flow. */
4349         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
4350                 struct rte_flow_action **action_cur = NULL;
4351
4352                 switch (actions->type) {
4353                 case RTE_FLOW_ACTION_TYPE_METER:
4354                         /* Add the extra tag action first. */
4355                         tag_action = actions_pre;
4356                         tag_action->type = (enum rte_flow_action_type)
4357                                            MLX5_RTE_FLOW_ACTION_TYPE_TAG;
4358                         actions_pre++;
4359                         action_cur = &actions_pre;
4360                         break;
4361                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
4362                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
4363                         action_cur = &actions_pre;
4364                         break;
4365                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
4366                         raw_encap = actions->conf;
4367                         if (raw_encap->size < MLX5_ENCAPSULATION_DECISION_SIZE)
4368                                 action_cur = &actions_pre;
4369                         break;
4370                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
4371                         raw_decap = actions->conf;
4372                         if (raw_decap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
4373                                 action_cur = &actions_pre;
4374                         break;
4375                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
4376                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
4377                         copy_vlan = true;
4378                         break;
4379                 default:
4380                         break;
4381                 }
4382                 if (!action_cur)
4383                         action_cur = &actions_sfx;
4384                 memcpy(*action_cur, actions, sizeof(struct rte_flow_action));
4385                 (*action_cur)++;
4386         }
4387         /* Add end action to the actions. */
4388         actions_sfx->type = RTE_FLOW_ACTION_TYPE_END;
4389         actions_pre->type = RTE_FLOW_ACTION_TYPE_END;
4390         actions_pre++;
4391         /* Set the tag. */
4392         set_tag = (void *)actions_pre;
4393         set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_MTR_SFX, 0, &error);
4394         mlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
4395                           &tag_id);
4396         if (tag_id >= (1 << (sizeof(tag_id) * 8 - MLX5_MTR_COLOR_BITS))) {
4397                 DRV_LOG(ERR, "Port %u meter flow id exceed max limit.",
4398                         dev->data->port_id);
4399                 mlx5_ipool_free(priv->sh->ipool
4400                                 [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], tag_id);
4401                 return 0;
4402         } else if (!tag_id) {
4403                 return 0;
4404         }
4405         set_tag->data = tag_id << MLX5_MTR_COLOR_BITS;
4406         assert(tag_action);
4407         tag_action->conf = set_tag;
4408         /* Prepare the suffix subflow items. */
4409         tag_item = sfx_items++;
4410         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
4411                 int item_type = items->type;
4412
4413                 switch (item_type) {
4414                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
4415                         memcpy(sfx_items, items, sizeof(*sfx_items));
4416                         sfx_items++;
4417                         break;
4418                 case RTE_FLOW_ITEM_TYPE_VLAN:
4419                         if (copy_vlan) {
4420                                 memcpy(sfx_items, items, sizeof(*sfx_items));
4421                                 /*
4422                                  * Convert to internal match item, it is used
4423                                  * for vlan push and set vid.
4424                                  */
4425                                 sfx_items->type = (enum rte_flow_item_type)
4426                                                   MLX5_RTE_FLOW_ITEM_TYPE_VLAN;
4427                                 sfx_items++;
4428                         }
4429                         break;
4430                 default:
4431                         break;
4432                 }
4433         }
4434         sfx_items->type = RTE_FLOW_ITEM_TYPE_END;
4435         sfx_items++;
4436         tag_spec = (struct mlx5_rte_flow_item_tag *)sfx_items;
4437         tag_spec->data = tag_id << MLX5_MTR_COLOR_BITS;
4438         tag_spec->id = mlx5_flow_get_reg_id(dev, MLX5_MTR_SFX, 0, &error);
4439         tag_mask = tag_spec + 1;
4440         tag_mask->data = 0xffffff00;
4441         tag_item->type = (enum rte_flow_item_type)
4442                          MLX5_RTE_FLOW_ITEM_TYPE_TAG;
4443         tag_item->spec = tag_spec;
4444         tag_item->last = NULL;
4445         tag_item->mask = tag_mask;
4446         return tag_id;
4447 }
4448
4449 /**
4450  * Split action list having QUEUE/RSS for metadata register copy.
4451  *
4452  * Once Q/RSS action is detected in user's action list, the flow action
4453  * should be split in order to copy metadata registers, which will happen in
4454  * RX_CP_TBL like,
4455  *   - CQE->flow_tag := reg_c[1] (MARK)
4456  *   - CQE->flow_table_metadata (reg_b) := reg_c[0] (META)
4457  * The Q/RSS action will be performed on RX_ACT_TBL after passing by RX_CP_TBL.
4458  * This is because the last action of each flow must be a terminal action
4459  * (QUEUE, RSS or DROP).
4460  *
4461  * Flow ID must be allocated to identify actions in the RX_ACT_TBL and it is
4462  * stored and kept in the mlx5_flow structure per each sub_flow.
4463  *
4464  * The Q/RSS action is replaced with,
4465  *   - SET_TAG, setting the allocated flow ID to reg_c[2].
4466  * And the following JUMP action is added at the end,
4467  *   - JUMP, to RX_CP_TBL.
4468  *
4469  * A flow to perform remained Q/RSS action will be created in RX_ACT_TBL by
4470  * flow_create_split_metadata() routine. The flow will look like,
4471  *   - If flow ID matches (reg_c[2]), perform Q/RSS.
4472  *
4473  * @param dev
4474  *   Pointer to Ethernet device.
4475  * @param[out] split_actions
4476  *   Pointer to store split actions to jump to CP_TBL.
4477  * @param[in] actions
4478  *   Pointer to the list of original flow actions.
4479  * @param[in] qrss
4480  *   Pointer to the Q/RSS action.
4481  * @param[in] actions_n
4482  *   Number of original actions.
4483  * @param[out] error
4484  *   Perform verbose error reporting if not NULL.
4485  *
4486  * @return
4487  *   non-zero unique flow_id on success, otherwise 0 and
4488  *   error/rte_error are set.
4489  */
4490 static uint32_t
4491 flow_mreg_split_qrss_prep(struct rte_eth_dev *dev,
4492                           struct rte_flow_action *split_actions,
4493                           const struct rte_flow_action *actions,
4494                           const struct rte_flow_action *qrss,
4495                           int actions_n, struct rte_flow_error *error)
4496 {
4497         struct mlx5_priv *priv = dev->data->dev_private;
4498         struct mlx5_rte_flow_action_set_tag *set_tag;
4499         struct rte_flow_action_jump *jump;
4500         const int qrss_idx = qrss - actions;
4501         uint32_t flow_id = 0;
4502         int ret = 0;
4503
4504         /*
4505          * Given actions will be split
4506          * - Replace QUEUE/RSS action with SET_TAG to set flow ID.
4507          * - Add jump to mreg CP_TBL.
4508          * As a result, there will be one more action.
4509          */
4510         ++actions_n;
4511         memcpy(split_actions, actions, sizeof(*split_actions) * actions_n);
4512         set_tag = (void *)(split_actions + actions_n);
4513         /*
4514          * If tag action is not set to void(it means we are not the meter
4515          * suffix flow), add the tag action. Since meter suffix flow already
4516          * has the tag added.
4517          */
4518         if (split_actions[qrss_idx].type != RTE_FLOW_ACTION_TYPE_VOID) {
4519                 /*
4520                  * Allocate the new subflow ID. This one is unique within
4521                  * device and not shared with representors. Otherwise,
4522                  * we would have to resolve multi-thread access synch
4523                  * issue. Each flow on the shared device is appended
4524                  * with source vport identifier, so the resulting
4525                  * flows will be unique in the shared (by master and
4526                  * representors) domain even if they have coinciding
4527                  * IDs.
4528                  */
4529                 mlx5_ipool_malloc(priv->sh->ipool
4530                                   [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], &flow_id);
4531                 if (!flow_id)
4532                         return rte_flow_error_set(error, ENOMEM,
4533                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4534                                                   NULL, "can't allocate id "
4535                                                   "for split Q/RSS subflow");
4536                 /* Internal SET_TAG action to set flow ID. */
4537                 *set_tag = (struct mlx5_rte_flow_action_set_tag){
4538                         .data = flow_id,
4539                 };
4540                 ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0, error);
4541                 if (ret < 0)
4542                         return ret;
4543                 set_tag->id = ret;
4544                 /* Construct new actions array. */
4545                 /* Replace QUEUE/RSS action. */
4546                 split_actions[qrss_idx] = (struct rte_flow_action){
4547                         .type = (enum rte_flow_action_type)
4548                                 MLX5_RTE_FLOW_ACTION_TYPE_TAG,
4549                         .conf = set_tag,
4550                 };
4551         }
4552         /* JUMP action to jump to mreg copy table (CP_TBL). */
4553         jump = (void *)(set_tag + 1);
4554         *jump = (struct rte_flow_action_jump){
4555                 .group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
4556         };
4557         split_actions[actions_n - 2] = (struct rte_flow_action){
4558                 .type = RTE_FLOW_ACTION_TYPE_JUMP,
4559                 .conf = jump,
4560         };
4561         split_actions[actions_n - 1] = (struct rte_flow_action){
4562                 .type = RTE_FLOW_ACTION_TYPE_END,
4563         };
4564         return flow_id;
4565 }
4566
4567 /**
4568  * Extend the given action list for Tx metadata copy.
4569  *
4570  * Copy the given action list to the ext_actions and add flow metadata register
4571  * copy action in order to copy reg_a set by WQE to reg_c[0].
4572  *
4573  * @param[out] ext_actions
4574  *   Pointer to the extended action list.
4575  * @param[in] actions
4576  *   Pointer to the list of actions.
4577  * @param[in] actions_n
4578  *   Number of actions in the list.
4579  * @param[out] error
4580  *   Perform verbose error reporting if not NULL.
4581  * @param[in] encap_idx
4582  *   The encap action inndex.
4583  *
4584  * @return
4585  *   0 on success, negative value otherwise
4586  */
4587 static int
4588 flow_mreg_tx_copy_prep(struct rte_eth_dev *dev,
4589                        struct rte_flow_action *ext_actions,
4590                        const struct rte_flow_action *actions,
4591                        int actions_n, struct rte_flow_error *error,
4592                        int encap_idx)
4593 {
4594         struct mlx5_flow_action_copy_mreg *cp_mreg =
4595                 (struct mlx5_flow_action_copy_mreg *)
4596                         (ext_actions + actions_n + 1);
4597         int ret;
4598
4599         ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error);
4600         if (ret < 0)
4601                 return ret;
4602         cp_mreg->dst = ret;
4603         ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_TX, 0, error);
4604         if (ret < 0)
4605                 return ret;
4606         cp_mreg->src = ret;
4607         if (encap_idx != 0)
4608                 memcpy(ext_actions, actions, sizeof(*ext_actions) * encap_idx);
4609         if (encap_idx == actions_n - 1) {
4610                 ext_actions[actions_n - 1] = (struct rte_flow_action){
4611                         .type = (enum rte_flow_action_type)
4612                                 MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
4613                         .conf = cp_mreg,
4614                 };
4615                 ext_actions[actions_n] = (struct rte_flow_action){
4616                         .type = RTE_FLOW_ACTION_TYPE_END,
4617                 };
4618         } else {
4619                 ext_actions[encap_idx] = (struct rte_flow_action){
4620                         .type = (enum rte_flow_action_type)
4621                                 MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
4622                         .conf = cp_mreg,
4623                 };
4624                 memcpy(ext_actions + encap_idx + 1, actions + encap_idx,
4625                                 sizeof(*ext_actions) * (actions_n - encap_idx));
4626         }
4627         return 0;
4628 }
4629
4630 /**
4631  * Check the match action from the action list.
4632  *
4633  * @param[in] actions
4634  *   Pointer to the list of actions.
4635  * @param[in] attr
4636  *   Flow rule attributes.
4637  * @param[in] action
4638  *   The action to be check if exist.
4639  * @param[out] match_action_pos
4640  *   Pointer to the position of the matched action if exists, otherwise is -1.
4641  * @param[out] qrss_action_pos
4642  *   Pointer to the position of the Queue/RSS action if exists, otherwise is -1.
4643  *
4644  * @return
4645  *   > 0 the total number of actions.
4646  *   0 if not found match action in action list.
4647  */
4648 static int
4649 flow_check_match_action(const struct rte_flow_action actions[],
4650                         const struct rte_flow_attr *attr,
4651                         enum rte_flow_action_type action,
4652                         int *match_action_pos, int *qrss_action_pos)
4653 {
4654         const struct rte_flow_action_sample *sample;
4655         int actions_n = 0;
4656         uint32_t ratio = 0;
4657         int sub_type = 0;
4658         int flag = 0;
4659
4660         *match_action_pos = -1;
4661         *qrss_action_pos = -1;
4662         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
4663                 if (actions->type == action) {
4664                         flag = 1;
4665                         *match_action_pos = actions_n;
4666                 }
4667                 if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE ||
4668                     actions->type == RTE_FLOW_ACTION_TYPE_RSS)
4669                         *qrss_action_pos = actions_n;
4670                 if (actions->type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
4671                         sample = actions->conf;
4672                         ratio = sample->ratio;
4673                         sub_type = ((const struct rte_flow_action *)
4674                                         (sample->actions))->type;
4675                 }
4676                 actions_n++;
4677         }
4678         if (flag && action == RTE_FLOW_ACTION_TYPE_SAMPLE && attr->transfer) {
4679                 if (ratio == 1) {
4680                         /* FDB mirroring uses the destination array to implement
4681                          * instead of FLOW_SAMPLER object.
4682                          */
4683                         if (sub_type != RTE_FLOW_ACTION_TYPE_END)
4684                                 flag = 0;
4685                 }
4686         }
4687         /* Count RTE_FLOW_ACTION_TYPE_END. */
4688         return flag ? actions_n + 1 : 0;
4689 }
4690
4691 #define SAMPLE_SUFFIX_ITEM 2
4692
4693 /**
4694  * Split the sample flow.
4695  *
4696  * As sample flow will split to two sub flow, sample flow with
4697  * sample action, the other actions will move to new suffix flow.
4698  *
4699  * Also add unique tag id with tag action in the sample flow,
4700  * the same tag id will be as match in the suffix flow.
4701  *
4702  * @param dev
4703  *   Pointer to Ethernet device.
4704  * @param[in] add_tag
4705  *   Add extra tag action flag.
4706  * @param[out] sfx_items
4707  *   Suffix flow match items (list terminated by the END pattern item).
4708  * @param[in] actions
4709  *   Associated actions (list terminated by the END action).
4710  * @param[out] actions_sfx
4711  *   Suffix flow actions.
4712  * @param[out] actions_pre
4713  *   Prefix flow actions.
4714  * @param[in] actions_n
4715  *  The total number of actions.
4716  * @param[in] sample_action_pos
4717  *   The sample action position.
4718  * @param[in] qrss_action_pos
4719  *   The Queue/RSS action position.
4720  * @param[out] error
4721  *   Perform verbose error reporting if not NULL.
4722  *
4723  * @return
4724  *   0 on success, or unique flow_id, a negative errno value
4725  *   otherwise and rte_errno is set.
4726  */
4727 static int
4728 flow_sample_split_prep(struct rte_eth_dev *dev,
4729                        int add_tag,
4730                        struct rte_flow_item sfx_items[],
4731                        const struct rte_flow_action actions[],
4732                        struct rte_flow_action actions_sfx[],
4733                        struct rte_flow_action actions_pre[],
4734                        int actions_n,
4735                        int sample_action_pos,
4736                        int qrss_action_pos,
4737                        struct rte_flow_error *error)
4738 {
4739         struct mlx5_priv *priv = dev->data->dev_private;
4740         struct mlx5_rte_flow_action_set_tag *set_tag;
4741         struct mlx5_rte_flow_item_tag *tag_spec;
4742         struct mlx5_rte_flow_item_tag *tag_mask;
4743         uint32_t tag_id = 0;
4744         int index;
4745         int ret;
4746
4747         if (sample_action_pos < 0)
4748                 return rte_flow_error_set(error, EINVAL,
4749                                           RTE_FLOW_ERROR_TYPE_ACTION,
4750                                           NULL, "invalid position of sample "
4751                                           "action in list");
4752         /* For CX5, add an extra tag action for NIC-RX and E-Switch ingress.
4753          * For CX6DX and above, metadata registers Cx preserve their value,
4754          * add an extra tag action for NIC-RX and E-Switch ingress and egress.
4755          */
4756         if (add_tag) {
4757                 /* Prepare the prefix tag action. */
4758                 set_tag = (void *)(actions_pre + actions_n + 1);
4759                 ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, 0, error);
4760                 if (ret < 0)
4761                         return ret;
4762                 set_tag->id = ret;
4763                 mlx5_ipool_malloc(priv->sh->ipool
4764                                   [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], &tag_id);
4765                 set_tag->data = tag_id;
4766                 /* Prepare the suffix subflow items. */
4767                 tag_spec = (void *)(sfx_items + SAMPLE_SUFFIX_ITEM);
4768                 tag_spec->data = tag_id;
4769                 tag_spec->id = set_tag->id;
4770                 tag_mask = tag_spec + 1;
4771                 tag_mask->data = UINT32_MAX;
4772                 sfx_items[0] = (struct rte_flow_item){
4773                         .type = (enum rte_flow_item_type)
4774                                 MLX5_RTE_FLOW_ITEM_TYPE_TAG,
4775                         .spec = tag_spec,
4776                         .last = NULL,
4777                         .mask = tag_mask,
4778                 };
4779                 sfx_items[1] = (struct rte_flow_item){
4780                         .type = (enum rte_flow_item_type)
4781                                 RTE_FLOW_ITEM_TYPE_END,
4782                 };
4783         }
4784         /* Prepare the actions for prefix and suffix flow. */
4785         if (qrss_action_pos >= 0 && qrss_action_pos < sample_action_pos) {
4786                 index = qrss_action_pos;
4787                 /* Put the preceding the Queue/RSS action into prefix flow. */
4788                 if (index != 0)
4789                         memcpy(actions_pre, actions,
4790                                sizeof(struct rte_flow_action) * index);
4791                 /* Put others preceding the sample action into prefix flow. */
4792                 if (sample_action_pos > index + 1)
4793                         memcpy(actions_pre + index, actions + index + 1,
4794                                sizeof(struct rte_flow_action) *
4795                                (sample_action_pos - index - 1));
4796                 index = sample_action_pos - 1;
4797                 /* Put Queue/RSS action into Suffix flow. */
4798                 memcpy(actions_sfx, actions + qrss_action_pos,
4799                        sizeof(struct rte_flow_action));
4800                 actions_sfx++;
4801         } else {
4802                 index = sample_action_pos;
4803                 if (index != 0)
4804                         memcpy(actions_pre, actions,
4805                                sizeof(struct rte_flow_action) * index);
4806         }
4807         if (add_tag) {
4808                 actions_pre[index++] =
4809                         (struct rte_flow_action){
4810                         .type = (enum rte_flow_action_type)
4811                                 MLX5_RTE_FLOW_ACTION_TYPE_TAG,
4812                         .conf = set_tag,
4813                 };
4814         }
4815         memcpy(actions_pre + index, actions + sample_action_pos,
4816                sizeof(struct rte_flow_action));
4817         index += 1;
4818         actions_pre[index] = (struct rte_flow_action){
4819                 .type = (enum rte_flow_action_type)
4820                         RTE_FLOW_ACTION_TYPE_END,
4821         };
4822         /* Put the actions after sample into Suffix flow. */
4823         memcpy(actions_sfx, actions + sample_action_pos + 1,
4824                sizeof(struct rte_flow_action) *
4825                (actions_n - sample_action_pos - 1));
4826         return tag_id;
4827 }
4828
4829 /**
4830  * The splitting for metadata feature.
4831  *
4832  * - Q/RSS action on NIC Rx should be split in order to pass by
4833  *   the mreg copy table (RX_CP_TBL) and then it jumps to the
4834  *   action table (RX_ACT_TBL) which has the split Q/RSS action.
4835  *
4836  * - All the actions on NIC Tx should have a mreg copy action to
4837  *   copy reg_a from WQE to reg_c[0].
4838  *
4839  * @param dev
4840  *   Pointer to Ethernet device.
4841  * @param[in] flow
4842  *   Parent flow structure pointer.
4843  * @param[in] attr
4844  *   Flow rule attributes.
4845  * @param[in] items
4846  *   Pattern specification (list terminated by the END pattern item).
4847  * @param[in] actions
4848  *   Associated actions (list terminated by the END action).
4849  * @param[in] flow_split_info
4850  *   Pointer to flow split info structure.
4851  * @param[out] error
4852  *   Perform verbose error reporting if not NULL.
4853  * @return
4854  *   0 on success, negative value otherwise
4855  */
4856 static int
4857 flow_create_split_metadata(struct rte_eth_dev *dev,
4858                            struct rte_flow *flow,
4859                            const struct rte_flow_attr *attr,
4860                            const struct rte_flow_item items[],
4861                            const struct rte_flow_action actions[],
4862                            struct mlx5_flow_split_info *flow_split_info,
4863                            struct rte_flow_error *error)
4864 {
4865         struct mlx5_priv *priv = dev->data->dev_private;
4866         struct mlx5_dev_config *config = &priv->config;
4867         const struct rte_flow_action *qrss = NULL;
4868         struct rte_flow_action *ext_actions = NULL;
4869         struct mlx5_flow *dev_flow = NULL;
4870         uint32_t qrss_id = 0;
4871         int mtr_sfx = 0;
4872         size_t act_size;
4873         int actions_n;
4874         int encap_idx;
4875         int ret;
4876
4877         /* Check whether extensive metadata feature is engaged. */
4878         if (!config->dv_flow_en ||
4879             config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
4880             !mlx5_flow_ext_mreg_supported(dev))
4881                 return flow_create_split_inner(dev, flow, NULL, attr, items,
4882                                                actions, flow_split_info, error);
4883         actions_n = flow_parse_metadata_split_actions_info(actions, &qrss,
4884                                                            &encap_idx);
4885         if (qrss) {
4886                 /* Exclude hairpin flows from splitting. */
4887                 if (qrss->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
4888                         const struct rte_flow_action_queue *queue;
4889
4890                         queue = qrss->conf;
4891                         if (mlx5_rxq_get_type(dev, queue->index) ==
4892                             MLX5_RXQ_TYPE_HAIRPIN)
4893                                 qrss = NULL;
4894                 } else if (qrss->type == RTE_FLOW_ACTION_TYPE_RSS) {
4895                         const struct rte_flow_action_rss *rss;
4896
4897                         rss = qrss->conf;
4898                         if (mlx5_rxq_get_type(dev, rss->queue[0]) ==
4899                             MLX5_RXQ_TYPE_HAIRPIN)
4900                                 qrss = NULL;
4901                 }
4902         }
4903         if (qrss) {
4904                 /* Check if it is in meter suffix table. */
4905                 mtr_sfx = attr->group == (attr->transfer ?
4906                           (MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) :
4907                           MLX5_FLOW_TABLE_LEVEL_SUFFIX);
4908                 /*
4909                  * Q/RSS action on NIC Rx should be split in order to pass by
4910                  * the mreg copy table (RX_CP_TBL) and then it jumps to the
4911                  * action table (RX_ACT_TBL) which has the split Q/RSS action.
4912                  */
4913                 act_size = sizeof(struct rte_flow_action) * (actions_n + 1) +
4914                            sizeof(struct rte_flow_action_set_tag) +
4915                            sizeof(struct rte_flow_action_jump);
4916                 ext_actions = mlx5_malloc(MLX5_MEM_ZERO, act_size, 0,
4917                                           SOCKET_ID_ANY);
4918                 if (!ext_actions)
4919                         return rte_flow_error_set(error, ENOMEM,
4920                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4921                                                   NULL, "no memory to split "
4922                                                   "metadata flow");
4923                 /*
4924                  * If we are the suffix flow of meter, tag already exist.
4925                  * Set the tag action to void.
4926                  */
4927                 if (mtr_sfx)
4928                         ext_actions[qrss - actions].type =
4929                                                 RTE_FLOW_ACTION_TYPE_VOID;
4930                 else
4931                         ext_actions[qrss - actions].type =
4932                                                 (enum rte_flow_action_type)
4933                                                 MLX5_RTE_FLOW_ACTION_TYPE_TAG;
4934                 /*
4935                  * Create the new actions list with removed Q/RSS action
4936                  * and appended set tag and jump to register copy table
4937                  * (RX_CP_TBL). We should preallocate unique tag ID here
4938                  * in advance, because it is needed for set tag action.
4939                  */
4940                 qrss_id = flow_mreg_split_qrss_prep(dev, ext_actions, actions,
4941                                                     qrss, actions_n, error);
4942                 if (!mtr_sfx && !qrss_id) {
4943                         ret = -rte_errno;
4944                         goto exit;
4945                 }
4946         } else if (attr->egress && !attr->transfer) {
4947                 /*
4948                  * All the actions on NIC Tx should have a metadata register
4949                  * copy action to copy reg_a from WQE to reg_c[meta]
4950                  */
4951                 act_size = sizeof(struct rte_flow_action) * (actions_n + 1) +
4952                            sizeof(struct mlx5_flow_action_copy_mreg);
4953                 ext_actions = mlx5_malloc(MLX5_MEM_ZERO, act_size, 0,
4954                                           SOCKET_ID_ANY);
4955                 if (!ext_actions)
4956                         return rte_flow_error_set(error, ENOMEM,
4957                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4958                                                   NULL, "no memory to split "
4959                                                   "metadata flow");
4960                 /* Create the action list appended with copy register. */
4961                 ret = flow_mreg_tx_copy_prep(dev, ext_actions, actions,
4962                                              actions_n, error, encap_idx);
4963                 if (ret < 0)
4964                         goto exit;
4965         }
4966         /* Add the unmodified original or prefix subflow. */
4967         ret = flow_create_split_inner(dev, flow, &dev_flow, attr,
4968                                       items, ext_actions ? ext_actions :
4969                                       actions, flow_split_info, error);
4970         if (ret < 0)
4971                 goto exit;
4972         MLX5_ASSERT(dev_flow);
4973         if (qrss) {
4974                 const struct rte_flow_attr q_attr = {
4975                         .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP,
4976                         .ingress = 1,
4977                 };
4978                 /* Internal PMD action to set register. */
4979                 struct mlx5_rte_flow_item_tag q_tag_spec = {
4980                         .data = qrss_id,
4981                         .id = REG_NON,
4982                 };
4983                 struct rte_flow_item q_items[] = {
4984                         {
4985                                 .type = (enum rte_flow_item_type)
4986                                         MLX5_RTE_FLOW_ITEM_TYPE_TAG,
4987                                 .spec = &q_tag_spec,
4988                                 .last = NULL,
4989                                 .mask = NULL,
4990                         },
4991                         {
4992                                 .type = RTE_FLOW_ITEM_TYPE_END,
4993                         },
4994                 };
4995                 struct rte_flow_action q_actions[] = {
4996                         {
4997                                 .type = qrss->type,
4998                                 .conf = qrss->conf,
4999                         },
5000                         {
5001                                 .type = RTE_FLOW_ACTION_TYPE_END,
5002                         },
5003                 };
5004                 uint64_t layers = flow_get_prefix_layer_flags(dev_flow);
5005
5006                 /*
5007                  * Configure the tag item only if there is no meter subflow.
5008                  * Since tag is already marked in the meter suffix subflow
5009                  * we can just use the meter suffix items as is.
5010                  */
5011                 if (qrss_id) {
5012                         /* Not meter subflow. */
5013                         MLX5_ASSERT(!mtr_sfx);
5014                         /*
5015                          * Put unique id in prefix flow due to it is destroyed
5016                          * after suffix flow and id will be freed after there
5017                          * is no actual flows with this id and identifier
5018                          * reallocation becomes possible (for example, for
5019                          * other flows in other threads).
5020                          */
5021                         dev_flow->handle->split_flow_id = qrss_id;
5022                         ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0,
5023                                                    error);
5024                         if (ret < 0)
5025                                 goto exit;
5026                         q_tag_spec.id = ret;
5027                 }
5028                 dev_flow = NULL;
5029                 /* Add suffix subflow to execute Q/RSS. */
5030                 flow_split_info->prefix_layers = layers;
5031                 flow_split_info->prefix_mark = 0;
5032                 ret = flow_create_split_inner(dev, flow, &dev_flow,
5033                                               &q_attr, mtr_sfx ? items :
5034                                               q_items, q_actions,
5035                                               flow_split_info, error);
5036                 if (ret < 0)
5037                         goto exit;
5038                 /* qrss ID should be freed if failed. */
5039                 qrss_id = 0;
5040                 MLX5_ASSERT(dev_flow);
5041         }
5042
5043 exit:
5044         /*
5045          * We do not destroy the partially created sub_flows in case of error.
5046          * These ones are included into parent flow list and will be destroyed
5047          * by flow_drv_destroy.
5048          */
5049         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
5050                         qrss_id);
5051         mlx5_free(ext_actions);
5052         return ret;
5053 }
5054
5055 /**
5056  * The splitting for meter feature.
5057  *
5058  * - The meter flow will be split to two flows as prefix and
5059  *   suffix flow. The packets make sense only it pass the prefix
5060  *   meter action.
5061  *
5062  * - Reg_C_5 is used for the packet to match betweend prefix and
5063  *   suffix flow.
5064  *
5065  * @param dev
5066  *   Pointer to Ethernet device.
5067  * @param[in] flow
5068  *   Parent flow structure pointer.
5069  * @param[in] attr
5070  *   Flow rule attributes.
5071  * @param[in] items
5072  *   Pattern specification (list terminated by the END pattern item).
5073  * @param[in] actions
5074  *   Associated actions (list terminated by the END action).
5075  * @param[in] flow_split_info
5076  *   Pointer to flow split info structure.
5077  * @param[out] error
5078  *   Perform verbose error reporting if not NULL.
5079  * @return
5080  *   0 on success, negative value otherwise
5081  */
5082 static int
5083 flow_create_split_meter(struct rte_eth_dev *dev,
5084                         struct rte_flow *flow,
5085                         const struct rte_flow_attr *attr,
5086                         const struct rte_flow_item items[],
5087                         const struct rte_flow_action actions[],
5088                         struct mlx5_flow_split_info *flow_split_info,
5089                         struct rte_flow_error *error)
5090 {
5091         struct mlx5_priv *priv = dev->data->dev_private;
5092         struct rte_flow_action *sfx_actions = NULL;
5093         struct rte_flow_action *pre_actions = NULL;
5094         struct rte_flow_item *sfx_items = NULL;
5095         struct mlx5_flow *dev_flow = NULL;
5096         struct rte_flow_attr sfx_attr = *attr;
5097         uint32_t mtr = 0;
5098         uint32_t mtr_tag_id = 0;
5099         size_t act_size;
5100         size_t item_size;
5101         int actions_n = 0;
5102         int ret;
5103
5104         if (priv->mtr_en)
5105                 actions_n = flow_check_meter_action(actions, &mtr);
5106         if (mtr) {
5107                 /* The five prefix actions: meter, decap, encap, tag, end. */
5108                 act_size = sizeof(struct rte_flow_action) * (actions_n + 5) +
5109                            sizeof(struct mlx5_rte_flow_action_set_tag);
5110                 /* tag, vlan, port id, end. */
5111 #define METER_SUFFIX_ITEM 4
5112                 item_size = sizeof(struct rte_flow_item) * METER_SUFFIX_ITEM +
5113                             sizeof(struct mlx5_rte_flow_item_tag) * 2;
5114                 sfx_actions = mlx5_malloc(MLX5_MEM_ZERO, (act_size + item_size),
5115                                           0, SOCKET_ID_ANY);
5116                 if (!sfx_actions)
5117                         return rte_flow_error_set(error, ENOMEM,
5118                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5119                                                   NULL, "no memory to split "
5120                                                   "meter flow");
5121                 sfx_items = (struct rte_flow_item *)((char *)sfx_actions +
5122                              act_size);
5123                 pre_actions = sfx_actions + actions_n;
5124                 mtr_tag_id = flow_meter_split_prep(dev, items, sfx_items,
5125                                                    actions, sfx_actions,
5126                                                    pre_actions);
5127                 if (!mtr_tag_id) {
5128                         ret = -rte_errno;
5129                         goto exit;
5130                 }
5131                 /* Add the prefix subflow. */
5132                 flow_split_info->prefix_mark = 0;
5133                 ret = flow_create_split_inner(dev, flow, &dev_flow,
5134                                               attr, items, pre_actions,
5135                                               flow_split_info, error);
5136                 if (ret) {
5137                         ret = -rte_errno;
5138                         goto exit;
5139                 }
5140                 dev_flow->handle->split_flow_id = mtr_tag_id;
5141                 /* Setting the sfx group atrr. */
5142                 sfx_attr.group = sfx_attr.transfer ?
5143                                 (MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) :
5144                                  MLX5_FLOW_TABLE_LEVEL_SUFFIX;
5145                 flow_split_info->prefix_layers =
5146                                 flow_get_prefix_layer_flags(dev_flow);
5147                 flow_split_info->prefix_mark = dev_flow->handle->mark;
5148         }
5149         /* Add the prefix subflow. */
5150         ret = flow_create_split_metadata(dev, flow,
5151                                          &sfx_attr, sfx_items ?
5152                                          sfx_items : items,
5153                                          sfx_actions ? sfx_actions : actions,
5154                                          flow_split_info, error);
5155 exit:
5156         if (sfx_actions)
5157                 mlx5_free(sfx_actions);
5158         return ret;
5159 }
5160
5161 /**
5162  * The splitting for sample feature.
5163  *
5164  * Once Sample action is detected in the action list, the flow actions should
5165  * be split into prefix sub flow and suffix sub flow.
5166  *
5167  * The original items remain in the prefix sub flow, all actions preceding the
5168  * sample action and the sample action itself will be copied to the prefix
5169  * sub flow, the actions following the sample action will be copied to the
5170  * suffix sub flow, Queue action always be located in the suffix sub flow.
5171  *
5172  * In order to make the packet from prefix sub flow matches with suffix sub
5173  * flow, an extra tag action be added into prefix sub flow, and the suffix sub
5174  * flow uses tag item with the unique flow id.
5175  *
5176  * @param dev
5177  *   Pointer to Ethernet device.
5178  * @param[in] flow
5179  *   Parent flow structure pointer.
5180  * @param[in] attr
5181  *   Flow rule attributes.
5182  * @param[in] items
5183  *   Pattern specification (list terminated by the END pattern item).
5184  * @param[in] actions
5185  *   Associated actions (list terminated by the END action).
5186  * @param[in] flow_split_info
5187  *   Pointer to flow split info structure.
5188  * @param[out] error
5189  *   Perform verbose error reporting if not NULL.
5190  * @return
5191  *   0 on success, negative value otherwise
5192  */
5193 static int
5194 flow_create_split_sample(struct rte_eth_dev *dev,
5195                          struct rte_flow *flow,
5196                          const struct rte_flow_attr *attr,
5197                          const struct rte_flow_item items[],
5198                          const struct rte_flow_action actions[],
5199                          struct mlx5_flow_split_info *flow_split_info,
5200                          struct rte_flow_error *error)
5201 {
5202         struct mlx5_priv *priv = dev->data->dev_private;
5203         struct rte_flow_action *sfx_actions = NULL;
5204         struct rte_flow_action *pre_actions = NULL;
5205         struct rte_flow_item *sfx_items = NULL;
5206         struct mlx5_flow *dev_flow = NULL;
5207         struct rte_flow_attr sfx_attr = *attr;
5208 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
5209         struct mlx5_flow_dv_sample_resource *sample_res;
5210         struct mlx5_flow_tbl_data_entry *sfx_tbl_data;
5211         struct mlx5_flow_tbl_resource *sfx_tbl;
5212 #endif
5213         size_t act_size;
5214         size_t item_size;
5215         uint32_t fdb_tx = 0;
5216         int32_t tag_id = 0;
5217         int actions_n = 0;
5218         int sample_action_pos;
5219         int qrss_action_pos;
5220         int add_tag = 0;
5221         int ret = 0;
5222
5223         if (priv->sampler_en)
5224                 actions_n = flow_check_match_action(actions, attr,
5225                                         RTE_FLOW_ACTION_TYPE_SAMPLE,
5226                                         &sample_action_pos, &qrss_action_pos);
5227         if (actions_n) {
5228                 /* The prefix actions must includes sample, tag, end. */
5229                 act_size = sizeof(struct rte_flow_action) * (actions_n * 2 + 1)
5230                            + sizeof(struct mlx5_rte_flow_action_set_tag);
5231                 item_size = sizeof(struct rte_flow_item) * SAMPLE_SUFFIX_ITEM +
5232                             sizeof(struct mlx5_rte_flow_item_tag) * 2;
5233                 sfx_actions = mlx5_malloc(MLX5_MEM_ZERO, (act_size +
5234                                           item_size), 0, SOCKET_ID_ANY);
5235                 if (!sfx_actions)
5236                         return rte_flow_error_set(error, ENOMEM,
5237                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5238                                                   NULL, "no memory to split "
5239                                                   "sample flow");
5240                 /* The representor_id is -1 for uplink. */
5241                 fdb_tx = (attr->transfer && priv->representor_id != -1);
5242                 /*
5243                  * When reg_c_preserve is set, metadata registers Cx preserve
5244                  * their value even through packet duplication.
5245                  */
5246                 add_tag = (!fdb_tx || priv->config.hca_attr.reg_c_preserve);
5247                 if (add_tag)
5248                         sfx_items = (struct rte_flow_item *)((char *)sfx_actions
5249                                         + act_size);
5250                 pre_actions = sfx_actions + actions_n;
5251                 tag_id = flow_sample_split_prep(dev, add_tag, sfx_items,
5252                                                 actions, sfx_actions,
5253                                                 pre_actions, actions_n,
5254                                                 sample_action_pos,
5255                                                 qrss_action_pos, error);
5256                 if (tag_id < 0 || (add_tag && !tag_id)) {
5257                         ret = -rte_errno;
5258                         goto exit;
5259                 }
5260                 /* Add the prefix subflow. */
5261                 ret = flow_create_split_inner(dev, flow, &dev_flow, attr,
5262                                               items, pre_actions,
5263                                               flow_split_info, error);
5264                 if (ret) {
5265                         ret = -rte_errno;
5266                         goto exit;
5267                 }
5268                 dev_flow->handle->split_flow_id = tag_id;
5269 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
5270                 /* Set the sfx group attr. */
5271                 sample_res = (struct mlx5_flow_dv_sample_resource *)
5272                                         dev_flow->dv.sample_res;
5273                 sfx_tbl = (struct mlx5_flow_tbl_resource *)
5274                                         sample_res->normal_path_tbl;
5275                 sfx_tbl_data = container_of(sfx_tbl,
5276                                         struct mlx5_flow_tbl_data_entry, tbl);
5277                 sfx_attr.group = sfx_attr.transfer ?
5278                                         (sfx_tbl_data->table_id - 1) :
5279                                          sfx_tbl_data->table_id;
5280                 flow_split_info->prefix_layers =
5281                                 flow_get_prefix_layer_flags(dev_flow);
5282                 flow_split_info->prefix_mark = dev_flow->handle->mark;
5283                 /* Suffix group level already be scaled with factor, set
5284                  * skip_scale to 1 to avoid scale again in translation.
5285                  */
5286                 flow_split_info->skip_scale = 1;
5287 #endif
5288         }
5289         /* Add the suffix subflow. */
5290         ret = flow_create_split_meter(dev, flow, &sfx_attr,
5291                                       sfx_items ? sfx_items : items,
5292                                       sfx_actions ? sfx_actions : actions,
5293                                       flow_split_info, error);
5294 exit:
5295         if (sfx_actions)
5296                 mlx5_free(sfx_actions);
5297         return ret;
5298 }
5299
5300 /**
5301  * Split the flow to subflow set. The splitters might be linked
5302  * in the chain, like this:
5303  * flow_create_split_outer() calls:
5304  *   flow_create_split_meter() calls:
5305  *     flow_create_split_metadata(meter_subflow_0) calls:
5306  *       flow_create_split_inner(metadata_subflow_0)
5307  *       flow_create_split_inner(metadata_subflow_1)
5308  *       flow_create_split_inner(metadata_subflow_2)
5309  *     flow_create_split_metadata(meter_subflow_1) calls:
5310  *       flow_create_split_inner(metadata_subflow_0)
5311  *       flow_create_split_inner(metadata_subflow_1)
5312  *       flow_create_split_inner(metadata_subflow_2)
5313  *
5314  * This provide flexible way to add new levels of flow splitting.
5315  * The all of successfully created subflows are included to the
5316  * parent flow dev_flow list.
5317  *
5318  * @param dev
5319  *   Pointer to Ethernet device.
5320  * @param[in] flow
5321  *   Parent flow structure pointer.
5322  * @param[in] attr
5323  *   Flow rule attributes.
5324  * @param[in] items
5325  *   Pattern specification (list terminated by the END pattern item).
5326  * @param[in] actions
5327  *   Associated actions (list terminated by the END action).
5328  * @param[in] flow_split_info
5329  *   Pointer to flow split info structure.
5330  * @param[out] error
5331  *   Perform verbose error reporting if not NULL.
5332  * @return
5333  *   0 on success, negative value otherwise
5334  */
5335 static int
5336 flow_create_split_outer(struct rte_eth_dev *dev,
5337                         struct rte_flow *flow,
5338                         const struct rte_flow_attr *attr,
5339                         const struct rte_flow_item items[],
5340                         const struct rte_flow_action actions[],
5341                         struct mlx5_flow_split_info *flow_split_info,
5342                         struct rte_flow_error *error)
5343 {
5344         int ret;
5345
5346         ret = flow_create_split_sample(dev, flow, attr, items,
5347                                        actions, flow_split_info, error);
5348         MLX5_ASSERT(ret <= 0);
5349         return ret;
5350 }
5351
5352 static struct mlx5_flow_tunnel *
5353 flow_tunnel_from_rule(struct rte_eth_dev *dev,
5354                       const struct rte_flow_attr *attr,
5355                       const struct rte_flow_item items[],
5356                       const struct rte_flow_action actions[])
5357 {
5358         struct mlx5_flow_tunnel *tunnel;
5359
5360 #pragma GCC diagnostic push
5361 #pragma GCC diagnostic ignored "-Wcast-qual"
5362         if (is_flow_tunnel_match_rule(dev, attr, items, actions))
5363                 tunnel = (struct mlx5_flow_tunnel *)items[0].spec;
5364         else if (is_flow_tunnel_steer_rule(dev, attr, items, actions))
5365                 tunnel = (struct mlx5_flow_tunnel *)actions[0].conf;
5366         else
5367                 tunnel = NULL;
5368 #pragma GCC diagnostic pop
5369
5370         return tunnel;
5371 }
5372
5373 /**
5374  * Adjust flow RSS workspace if needed.
5375  *
5376  * @param wks
5377  *   Pointer to thread flow work space.
5378  * @param rss_desc
5379  *   Pointer to RSS descriptor.
5380  * @param[in] nrssq_num
5381  *   New RSS queue number.
5382  *
5383  * @return
5384  *   0 on success, -1 otherwise and rte_errno is set.
5385  */
5386 static int
5387 flow_rss_workspace_adjust(struct mlx5_flow_workspace *wks,
5388                           struct mlx5_flow_rss_desc *rss_desc,
5389                           uint32_t nrssq_num)
5390 {
5391         if (likely(nrssq_num <= wks->rssq_num))
5392                 return 0;
5393         rss_desc->queue = realloc(rss_desc->queue,
5394                           sizeof(*rss_desc->queue) * RTE_ALIGN(nrssq_num, 2));
5395         if (!rss_desc->queue) {
5396                 rte_errno = ENOMEM;
5397                 return -1;
5398         }
5399         wks->rssq_num = RTE_ALIGN(nrssq_num, 2);
5400         return 0;
5401 }
5402
5403 /**
5404  * Create a flow and add it to @p list.
5405  *
5406  * @param dev
5407  *   Pointer to Ethernet device.
5408  * @param list
5409  *   Pointer to a TAILQ flow list. If this parameter NULL,
5410  *   no list insertion occurred, flow is just created,
5411  *   this is caller's responsibility to track the
5412  *   created flow.
5413  * @param[in] attr
5414  *   Flow rule attributes.
5415  * @param[in] items
5416  *   Pattern specification (list terminated by the END pattern item).
5417  * @param[in] actions
5418  *   Associated actions (list terminated by the END action).
5419  * @param[in] external
5420  *   This flow rule is created by request external to PMD.
5421  * @param[out] error
5422  *   Perform verbose error reporting if not NULL.
5423  *
5424  * @return
5425  *   A flow index on success, 0 otherwise and rte_errno is set.
5426  */
5427 static uint32_t
5428 flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
5429                  const struct rte_flow_attr *attr,
5430                  const struct rte_flow_item items[],
5431                  const struct rte_flow_action original_actions[],
5432                  bool external, struct rte_flow_error *error)
5433 {
5434         struct mlx5_priv *priv = dev->data->dev_private;
5435         struct rte_flow *flow = NULL;
5436         struct mlx5_flow *dev_flow;
5437         const struct rte_flow_action_rss *rss = NULL;
5438         struct mlx5_translated_shared_action
5439                 shared_actions[MLX5_MAX_SHARED_ACTIONS];
5440         int shared_actions_n = MLX5_MAX_SHARED_ACTIONS;
5441         union {
5442                 struct mlx5_flow_expand_rss buf;
5443                 uint8_t buffer[2048];
5444         } expand_buffer;
5445         union {
5446                 struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS];
5447                 uint8_t buffer[2048];
5448         } actions_rx;
5449         union {
5450                 struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS];
5451                 uint8_t buffer[2048];
5452         } actions_hairpin_tx;
5453         union {
5454                 struct rte_flow_item items[MLX5_MAX_SPLIT_ITEMS];
5455                 uint8_t buffer[2048];
5456         } items_tx;
5457         struct mlx5_flow_expand_rss *buf = &expand_buffer.buf;
5458         struct mlx5_flow_rss_desc *rss_desc;
5459         const struct rte_flow_action *p_actions_rx;
5460         uint32_t i;
5461         uint32_t idx = 0;
5462         int hairpin_flow;
5463         struct rte_flow_attr attr_tx = { .priority = 0 };
5464         const struct rte_flow_action *actions;
5465         struct rte_flow_action *translated_actions = NULL;
5466         struct mlx5_flow_tunnel *tunnel;
5467         struct tunnel_default_miss_ctx default_miss_ctx = { 0, };
5468         struct mlx5_flow_workspace *wks = mlx5_flow_push_thread_workspace();
5469         struct mlx5_flow_split_info flow_split_info = {
5470                 .external = !!external,
5471                 .skip_scale = 0,
5472                 .flow_idx = 0,
5473                 .prefix_mark = 0,
5474                 .prefix_layers = 0
5475         };
5476         int ret;
5477
5478         MLX5_ASSERT(wks);
5479         rss_desc = &wks->rss_desc;
5480         ret = flow_shared_actions_translate(dev, original_actions,
5481                                             shared_actions,
5482                                             &shared_actions_n,
5483                                             &translated_actions, error);
5484         if (ret < 0) {
5485                 MLX5_ASSERT(translated_actions == NULL);
5486                 return 0;
5487         }
5488         actions = translated_actions ? translated_actions : original_actions;
5489         p_actions_rx = actions;
5490         hairpin_flow = flow_check_hairpin_split(dev, attr, actions);
5491         ret = flow_drv_validate(dev, attr, items, p_actions_rx,
5492                                 external, hairpin_flow, error);
5493         if (ret < 0)
5494                 goto error_before_hairpin_split;
5495         flow = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], &idx);
5496         if (!flow) {
5497                 rte_errno = ENOMEM;
5498                 goto error_before_hairpin_split;
5499         }
5500         if (hairpin_flow > 0) {
5501                 if (hairpin_flow > MLX5_MAX_SPLIT_ACTIONS) {
5502                         rte_errno = EINVAL;
5503                         goto error_before_hairpin_split;
5504                 }
5505                 flow_hairpin_split(dev, actions, actions_rx.actions,
5506                                    actions_hairpin_tx.actions, items_tx.items,
5507                                    idx);
5508                 p_actions_rx = actions_rx.actions;
5509         }
5510         flow_split_info.flow_idx = idx;
5511         flow->drv_type = flow_get_drv_type(dev, attr);
5512         MLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN &&
5513                     flow->drv_type < MLX5_FLOW_TYPE_MAX);
5514         memset(rss_desc, 0, offsetof(struct mlx5_flow_rss_desc, queue));
5515         /* RSS Action only works on NIC RX domain */
5516         if (attr->ingress && !attr->transfer)
5517                 rss = flow_get_rss_action(p_actions_rx);
5518         if (rss) {
5519                 if (flow_rss_workspace_adjust(wks, rss_desc, rss->queue_num))
5520                         return 0;
5521                 /*
5522                  * The following information is required by
5523                  * mlx5_flow_hashfields_adjust() in advance.
5524                  */
5525                 rss_desc->level = rss->level;
5526                 /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
5527                 rss_desc->types = !rss->types ? ETH_RSS_IP : rss->types;
5528         }
5529         flow->dev_handles = 0;
5530         if (rss && rss->types) {
5531                 unsigned int graph_root;
5532
5533                 graph_root = find_graph_root(items, rss->level);
5534                 ret = mlx5_flow_expand_rss(buf, sizeof(expand_buffer.buffer),
5535                                            items, rss->types,
5536                                            mlx5_support_expansion, graph_root);
5537                 MLX5_ASSERT(ret > 0 &&
5538                        (unsigned int)ret < sizeof(expand_buffer.buffer));
5539         } else {
5540                 buf->entries = 1;
5541                 buf->entry[0].pattern = (void *)(uintptr_t)items;
5542         }
5543         rss_desc->shared_rss = flow_get_shared_rss_action(dev, shared_actions,
5544                                                       shared_actions_n);
5545         for (i = 0; i < buf->entries; ++i) {
5546                 /* Initialize flow split data. */
5547                 flow_split_info.prefix_layers = 0;
5548                 flow_split_info.prefix_mark = 0;
5549                 flow_split_info.skip_scale = 0;
5550                 /*
5551                  * The splitter may create multiple dev_flows,
5552                  * depending on configuration. In the simplest
5553                  * case it just creates unmodified original flow.
5554                  */
5555                 ret = flow_create_split_outer(dev, flow, attr,
5556                                               buf->entry[i].pattern,
5557                                               p_actions_rx, &flow_split_info,
5558                                               error);
5559                 if (ret < 0)
5560                         goto error;
5561                 if (is_flow_tunnel_steer_rule(dev, attr,
5562                                               buf->entry[i].pattern,
5563                                               p_actions_rx)) {
5564                         ret = flow_tunnel_add_default_miss(dev, flow, attr,
5565                                                            p_actions_rx,
5566                                                            idx,
5567                                                            &default_miss_ctx,
5568                                                            error);
5569                         if (ret < 0) {
5570                                 mlx5_free(default_miss_ctx.queue);
5571                                 goto error;
5572                         }
5573                 }
5574         }
5575         /* Create the tx flow. */
5576         if (hairpin_flow) {
5577                 attr_tx.group = MLX5_HAIRPIN_TX_TABLE;
5578                 attr_tx.ingress = 0;
5579                 attr_tx.egress = 1;
5580                 dev_flow = flow_drv_prepare(dev, flow, &attr_tx, items_tx.items,
5581                                          actions_hairpin_tx.actions,
5582                                          idx, error);
5583                 if (!dev_flow)
5584                         goto error;
5585                 dev_flow->flow = flow;
5586                 dev_flow->external = 0;
5587                 SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
5588                               dev_flow->handle, next);
5589                 ret = flow_drv_translate(dev, dev_flow, &attr_tx,
5590                                          items_tx.items,
5591                                          actions_hairpin_tx.actions, error);
5592                 if (ret < 0)
5593                         goto error;
5594         }
5595         /*
5596          * Update the metadata register copy table. If extensive
5597          * metadata feature is enabled and registers are supported
5598          * we might create the extra rte_flow for each unique
5599          * MARK/FLAG action ID.
5600          *
5601          * The table is updated for ingress Flows only, because
5602          * the egress Flows belong to the different device and
5603          * copy table should be updated in peer NIC Rx domain.
5604          */
5605         if (attr->ingress &&
5606             (external || attr->group != MLX5_FLOW_MREG_CP_TABLE_GROUP)) {
5607                 ret = flow_mreg_update_copy_table(dev, flow, actions, error);
5608                 if (ret)
5609                         goto error;
5610         }
5611         /*
5612          * If the flow is external (from application) OR device is started,
5613          * OR mreg discover, then apply immediately.
5614          */
5615         if (external || dev->data->dev_started ||
5616             (attr->group == MLX5_FLOW_MREG_CP_TABLE_GROUP &&
5617              attr->priority == MLX5_FLOW_PRIO_RSVD)) {
5618                 ret = flow_drv_apply(dev, flow, error);
5619                 if (ret < 0)
5620                         goto error;
5621         }
5622         if (list) {
5623                 rte_spinlock_lock(&priv->flow_list_lock);
5624                 ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list, idx,
5625                              flow, next);
5626                 rte_spinlock_unlock(&priv->flow_list_lock);
5627         }
5628         flow_rxq_flags_set(dev, flow);
5629         rte_free(translated_actions);
5630         tunnel = flow_tunnel_from_rule(dev, attr, items, actions);
5631         if (tunnel) {
5632                 flow->tunnel = 1;
5633                 flow->tunnel_id = tunnel->tunnel_id;
5634                 __atomic_add_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED);
5635                 mlx5_free(default_miss_ctx.queue);
5636         }
5637         mlx5_flow_pop_thread_workspace();
5638         return idx;
5639 error:
5640         MLX5_ASSERT(flow);
5641         ret = rte_errno; /* Save rte_errno before cleanup. */
5642         flow_mreg_del_copy_action(dev, flow);
5643         flow_drv_destroy(dev, flow);
5644         if (rss_desc->shared_rss)
5645                 __atomic_sub_fetch(&((struct mlx5_shared_action_rss *)
5646                         mlx5_ipool_get
5647                         (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
5648                         rss_desc->shared_rss))->refcnt, 1, __ATOMIC_RELAXED);
5649         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], idx);
5650         rte_errno = ret; /* Restore rte_errno. */
5651         ret = rte_errno;
5652         rte_errno = ret;
5653         mlx5_flow_pop_thread_workspace();
5654 error_before_hairpin_split:
5655         rte_free(translated_actions);
5656         return 0;
5657 }
5658
5659 /**
5660  * Create a dedicated flow rule on e-switch table 0 (root table), to direct all
5661  * incoming packets to table 1.
5662  *
5663  * Other flow rules, requested for group n, will be created in
5664  * e-switch table n+1.
5665  * Jump action to e-switch group n will be created to group n+1.
5666  *
5667  * Used when working in switchdev mode, to utilise advantages of table 1
5668  * and above.
5669  *
5670  * @param dev
5671  *   Pointer to Ethernet device.
5672  *
5673  * @return
5674  *   Pointer to flow on success, NULL otherwise and rte_errno is set.
5675  */
5676 struct rte_flow *
5677 mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev)
5678 {
5679         const struct rte_flow_attr attr = {
5680                 .group = 0,
5681                 .priority = 0,
5682                 .ingress = 1,
5683                 .egress = 0,
5684                 .transfer = 1,
5685         };
5686         const struct rte_flow_item pattern = {
5687                 .type = RTE_FLOW_ITEM_TYPE_END,
5688         };
5689         struct rte_flow_action_jump jump = {
5690                 .group = 1,
5691         };
5692         const struct rte_flow_action actions[] = {
5693                 {
5694                         .type = RTE_FLOW_ACTION_TYPE_JUMP,
5695                         .conf = &jump,
5696                 },
5697                 {
5698                         .type = RTE_FLOW_ACTION_TYPE_END,
5699                 },
5700         };
5701         struct mlx5_priv *priv = dev->data->dev_private;
5702         struct rte_flow_error error;
5703
5704         return (void *)(uintptr_t)flow_list_create(dev, &priv->ctrl_flows,
5705                                                    &attr, &pattern,
5706                                                    actions, false, &error);
5707 }
5708
5709 /**
5710  * Validate a flow supported by the NIC.
5711  *
5712  * @see rte_flow_validate()
5713  * @see rte_flow_ops
5714  */
5715 int
5716 mlx5_flow_validate(struct rte_eth_dev *dev,
5717                    const struct rte_flow_attr *attr,
5718                    const struct rte_flow_item items[],
5719                    const struct rte_flow_action original_actions[],
5720                    struct rte_flow_error *error)
5721 {
5722         int hairpin_flow;
5723         struct mlx5_translated_shared_action
5724                 shared_actions[MLX5_MAX_SHARED_ACTIONS];
5725         int shared_actions_n = MLX5_MAX_SHARED_ACTIONS;
5726         const struct rte_flow_action *actions;
5727         struct rte_flow_action *translated_actions = NULL;
5728         int ret = flow_shared_actions_translate(dev, original_actions,
5729                                                 shared_actions,
5730                                                 &shared_actions_n,
5731                                                 &translated_actions, error);
5732
5733         if (ret)
5734                 return ret;
5735         actions = translated_actions ? translated_actions : original_actions;
5736         hairpin_flow = flow_check_hairpin_split(dev, attr, actions);
5737         ret = flow_drv_validate(dev, attr, items, actions,
5738                                 true, hairpin_flow, error);
5739         rte_free(translated_actions);
5740         return ret;
5741 }
5742
5743 /**
5744  * Create a flow.
5745  *
5746  * @see rte_flow_create()
5747  * @see rte_flow_ops
5748  */
5749 struct rte_flow *
5750 mlx5_flow_create(struct rte_eth_dev *dev,
5751                  const struct rte_flow_attr *attr,
5752                  const struct rte_flow_item items[],
5753                  const struct rte_flow_action actions[],
5754                  struct rte_flow_error *error)
5755 {
5756         struct mlx5_priv *priv = dev->data->dev_private;
5757
5758         /*
5759          * If the device is not started yet, it is not allowed to created a
5760          * flow from application. PMD default flows and traffic control flows
5761          * are not affected.
5762          */
5763         if (unlikely(!dev->data->dev_started)) {
5764                 DRV_LOG(DEBUG, "port %u is not started when "
5765                         "inserting a flow", dev->data->port_id);
5766                 rte_flow_error_set(error, ENODEV,
5767                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5768                                    NULL,
5769                                    "port not started");
5770                 return NULL;
5771         }
5772
5773         return (void *)(uintptr_t)flow_list_create(dev, &priv->flows,
5774                                   attr, items, actions, true, error);
5775 }
5776
5777 /**
5778  * Destroy a flow in a list.
5779  *
5780  * @param dev
5781  *   Pointer to Ethernet device.
5782  * @param list
5783  *   Pointer to the Indexed flow list. If this parameter NULL,
5784  *   there is no flow removal from the list. Be noted that as
5785  *   flow is add to the indexed list, memory of the indexed
5786  *   list points to maybe changed as flow destroyed.
5787  * @param[in] flow_idx
5788  *   Index of flow to destroy.
5789  */
5790 static void
5791 flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,
5792                   uint32_t flow_idx)
5793 {
5794         struct mlx5_priv *priv = dev->data->dev_private;
5795         struct rte_flow *flow = mlx5_ipool_get(priv->sh->ipool
5796                                                [MLX5_IPOOL_RTE_FLOW], flow_idx);
5797
5798         if (!flow)
5799                 return;
5800         /*
5801          * Update RX queue flags only if port is started, otherwise it is
5802          * already clean.
5803          */
5804         if (dev->data->dev_started)
5805                 flow_rxq_flags_trim(dev, flow);
5806         flow_drv_destroy(dev, flow);
5807         if (list) {
5808                 rte_spinlock_lock(&priv->flow_list_lock);
5809                 ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list,
5810                              flow_idx, flow, next);
5811                 rte_spinlock_unlock(&priv->flow_list_lock);
5812         }
5813         if (flow->tunnel) {
5814                 struct mlx5_flow_tunnel *tunnel;
5815
5816                 tunnel = mlx5_find_tunnel_id(dev, flow->tunnel_id);
5817                 RTE_VERIFY(tunnel);
5818                 if (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED))
5819                         mlx5_flow_tunnel_free(dev, tunnel);
5820         }
5821         flow_mreg_del_copy_action(dev, flow);
5822         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], flow_idx);
5823 }
5824
5825 /**
5826  * Destroy all flows.
5827  *
5828  * @param dev
5829  *   Pointer to Ethernet device.
5830  * @param list
5831  *   Pointer to the Indexed flow list.
5832  * @param active
5833  *   If flushing is called avtively.
5834  */
5835 void
5836 mlx5_flow_list_flush(struct rte_eth_dev *dev, uint32_t *list, bool active)
5837 {
5838         uint32_t num_flushed = 0;
5839
5840         while (*list) {
5841                 flow_list_destroy(dev, list, *list);
5842                 num_flushed++;
5843         }
5844         if (active) {
5845                 DRV_LOG(INFO, "port %u: %u flows flushed before stopping",
5846                         dev->data->port_id, num_flushed);
5847         }
5848 }
5849
5850 /**
5851  * Stop all default actions for flows.
5852  *
5853  * @param dev
5854  *   Pointer to Ethernet device.
5855  */
5856 void
5857 mlx5_flow_stop_default(struct rte_eth_dev *dev)
5858 {
5859         flow_mreg_del_default_copy_action(dev);
5860         flow_rxq_flags_clear(dev);
5861 }
5862
5863 /**
5864  * Start all default actions for flows.
5865  *
5866  * @param dev
5867  *   Pointer to Ethernet device.
5868  * @return
5869  *   0 on success, a negative errno value otherwise and rte_errno is set.
5870  */
5871 int
5872 mlx5_flow_start_default(struct rte_eth_dev *dev)
5873 {
5874         struct rte_flow_error error;
5875
5876         /* Make sure default copy action (reg_c[0] -> reg_b) is created. */
5877         return flow_mreg_add_default_copy_action(dev, &error);
5878 }
5879
5880 /**
5881  * Release key of thread specific flow workspace data.
5882  */
5883 void
5884 flow_release_workspace(void *data)
5885 {
5886         struct mlx5_flow_workspace *wks = data;
5887         struct mlx5_flow_workspace *next;
5888
5889         while (wks) {
5890                 next = wks->next;
5891                 free(wks->rss_desc.queue);
5892                 free(wks);
5893                 wks = next;
5894         }
5895 }
5896
5897 /**
5898  * Get thread specific current flow workspace.
5899  *
5900  * @return pointer to thread specific flow workspace data, NULL on error.
5901  */
5902 struct mlx5_flow_workspace*
5903 mlx5_flow_get_thread_workspace(void)
5904 {
5905         struct mlx5_flow_workspace *data;
5906
5907         data = mlx5_flow_os_get_specific_workspace();
5908         MLX5_ASSERT(data && data->inuse);
5909         if (!data || !data->inuse)
5910                 DRV_LOG(ERR, "flow workspace not initialized.");
5911         return data;
5912 }
5913
5914 /**
5915  * Allocate and init new flow workspace.
5916  *
5917  * @return pointer to flow workspace data, NULL on error.
5918  */
5919 static struct mlx5_flow_workspace*
5920 flow_alloc_thread_workspace(void)
5921 {
5922         struct mlx5_flow_workspace *data = calloc(1, sizeof(*data));
5923
5924         if (!data) {
5925                 DRV_LOG(ERR, "Failed to allocate flow workspace "
5926                         "memory.");
5927                 return NULL;
5928         }
5929         data->rss_desc.queue = calloc(1,
5930                         sizeof(uint16_t) * MLX5_RSSQ_DEFAULT_NUM);
5931         if (!data->rss_desc.queue)
5932                 goto err;
5933         data->rssq_num = MLX5_RSSQ_DEFAULT_NUM;
5934         return data;
5935 err:
5936         if (data->rss_desc.queue)
5937                 free(data->rss_desc.queue);
5938         free(data);
5939         return NULL;
5940 }
5941
5942 /**
5943  * Get new thread specific flow workspace.
5944  *
5945  * If current workspace inuse, create new one and set as current.
5946  *
5947  * @return pointer to thread specific flow workspace data, NULL on error.
5948  */
5949 static struct mlx5_flow_workspace*
5950 mlx5_flow_push_thread_workspace(void)
5951 {
5952         struct mlx5_flow_workspace *curr;
5953         struct mlx5_flow_workspace *data;
5954
5955         curr = mlx5_flow_os_get_specific_workspace();
5956         if (!curr) {
5957                 data = flow_alloc_thread_workspace();
5958                 if (!data)
5959                         return NULL;
5960         } else if (!curr->inuse) {
5961                 data = curr;
5962         } else if (curr->next) {
5963                 data = curr->next;
5964         } else {
5965                 data = flow_alloc_thread_workspace();
5966                 if (!data)
5967                         return NULL;
5968                 curr->next = data;
5969                 data->prev = curr;
5970         }
5971         data->inuse = 1;
5972         data->flow_idx = 0;
5973         /* Set as current workspace */
5974         if (mlx5_flow_os_set_specific_workspace(data))
5975                 DRV_LOG(ERR, "Failed to set flow workspace to thread.");
5976         return data;
5977 }
5978
5979 /**
5980  * Close current thread specific flow workspace.
5981  *
5982  * If previous workspace available, set it as current.
5983  *
5984  * @return pointer to thread specific flow workspace data, NULL on error.
5985  */
5986 static void
5987 mlx5_flow_pop_thread_workspace(void)
5988 {
5989         struct mlx5_flow_workspace *data = mlx5_flow_get_thread_workspace();
5990
5991         if (!data)
5992                 return;
5993         if (!data->inuse) {
5994                 DRV_LOG(ERR, "Failed to close unused flow workspace.");
5995                 return;
5996         }
5997         data->inuse = 0;
5998         if (!data->prev)
5999                 return;
6000         if (mlx5_flow_os_set_specific_workspace(data->prev))
6001                 DRV_LOG(ERR, "Failed to set flow workspace to thread.");
6002 }
6003
6004 /**
6005  * Verify the flow list is empty
6006  *
6007  * @param dev
6008  *  Pointer to Ethernet device.
6009  *
6010  * @return the number of flows not released.
6011  */
6012 int
6013 mlx5_flow_verify(struct rte_eth_dev *dev)
6014 {
6015         struct mlx5_priv *priv = dev->data->dev_private;
6016         struct rte_flow *flow;
6017         uint32_t idx;
6018         int ret = 0;
6019
6020         ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], priv->flows, idx,
6021                       flow, next) {
6022                 DRV_LOG(DEBUG, "port %u flow %p still referenced",
6023                         dev->data->port_id, (void *)flow);
6024                 ++ret;
6025         }
6026         return ret;
6027 }
6028
6029 /**
6030  * Enable default hairpin egress flow.
6031  *
6032  * @param dev
6033  *   Pointer to Ethernet device.
6034  * @param queue
6035  *   The queue index.
6036  *
6037  * @return
6038  *   0 on success, a negative errno value otherwise and rte_errno is set.
6039  */
6040 int
6041 mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev,
6042                             uint32_t queue)
6043 {
6044         struct mlx5_priv *priv = dev->data->dev_private;
6045         const struct rte_flow_attr attr = {
6046                 .egress = 1,
6047                 .priority = 0,
6048         };
6049         struct mlx5_rte_flow_item_tx_queue queue_spec = {
6050                 .queue = queue,
6051         };
6052         struct mlx5_rte_flow_item_tx_queue queue_mask = {
6053                 .queue = UINT32_MAX,
6054         };
6055         struct rte_flow_item items[] = {
6056                 {
6057                         .type = (enum rte_flow_item_type)
6058                                 MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE,
6059                         .spec = &queue_spec,
6060                         .last = NULL,
6061                         .mask = &queue_mask,
6062                 },
6063                 {
6064                         .type = RTE_FLOW_ITEM_TYPE_END,
6065                 },
6066         };
6067         struct rte_flow_action_jump jump = {
6068                 .group = MLX5_HAIRPIN_TX_TABLE,
6069         };
6070         struct rte_flow_action actions[2];
6071         uint32_t flow_idx;
6072         struct rte_flow_error error;
6073
6074         actions[0].type = RTE_FLOW_ACTION_TYPE_JUMP;
6075         actions[0].conf = &jump;
6076         actions[1].type = RTE_FLOW_ACTION_TYPE_END;
6077         flow_idx = flow_list_create(dev, &priv->ctrl_flows,
6078                                 &attr, items, actions, false, &error);
6079         if (!flow_idx) {
6080                 DRV_LOG(DEBUG,
6081                         "Failed to create ctrl flow: rte_errno(%d),"
6082                         " type(%d), message(%s)",
6083                         rte_errno, error.type,
6084                         error.message ? error.message : " (no stated reason)");
6085                 return -rte_errno;
6086         }
6087         return 0;
6088 }
6089
6090 /**
6091  * Enable a control flow configured from the control plane.
6092  *
6093  * @param dev
6094  *   Pointer to Ethernet device.
6095  * @param eth_spec
6096  *   An Ethernet flow spec to apply.
6097  * @param eth_mask
6098  *   An Ethernet flow mask to apply.
6099  * @param vlan_spec
6100  *   A VLAN flow spec to apply.
6101  * @param vlan_mask
6102  *   A VLAN flow mask to apply.
6103  *
6104  * @return
6105  *   0 on success, a negative errno value otherwise and rte_errno is set.
6106  */
6107 int
6108 mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
6109                     struct rte_flow_item_eth *eth_spec,
6110                     struct rte_flow_item_eth *eth_mask,
6111                     struct rte_flow_item_vlan *vlan_spec,
6112                     struct rte_flow_item_vlan *vlan_mask)
6113 {
6114         struct mlx5_priv *priv = dev->data->dev_private;
6115         const struct rte_flow_attr attr = {
6116                 .ingress = 1,
6117                 .priority = MLX5_FLOW_PRIO_RSVD,
6118         };
6119         struct rte_flow_item items[] = {
6120                 {
6121                         .type = RTE_FLOW_ITEM_TYPE_ETH,
6122                         .spec = eth_spec,
6123                         .last = NULL,
6124                         .mask = eth_mask,
6125                 },
6126                 {
6127                         .type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN :
6128                                               RTE_FLOW_ITEM_TYPE_END,
6129                         .spec = vlan_spec,
6130                         .last = NULL,
6131                         .mask = vlan_mask,
6132                 },
6133                 {
6134                         .type = RTE_FLOW_ITEM_TYPE_END,
6135                 },
6136         };
6137         uint16_t queue[priv->reta_idx_n];
6138         struct rte_flow_action_rss action_rss = {
6139                 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
6140                 .level = 0,
6141                 .types = priv->rss_conf.rss_hf,
6142                 .key_len = priv->rss_conf.rss_key_len,
6143                 .queue_num = priv->reta_idx_n,
6144                 .key = priv->rss_conf.rss_key,
6145                 .queue = queue,
6146         };
6147         struct rte_flow_action actions[] = {
6148                 {
6149                         .type = RTE_FLOW_ACTION_TYPE_RSS,
6150                         .conf = &action_rss,
6151                 },
6152                 {
6153                         .type = RTE_FLOW_ACTION_TYPE_END,
6154                 },
6155         };
6156         uint32_t flow_idx;
6157         struct rte_flow_error error;
6158         unsigned int i;
6159
6160         if (!priv->reta_idx_n || !priv->rxqs_n) {
6161                 return 0;
6162         }
6163         if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
6164                 action_rss.types = 0;
6165         for (i = 0; i != priv->reta_idx_n; ++i)
6166                 queue[i] = (*priv->reta_idx)[i];
6167         flow_idx = flow_list_create(dev, &priv->ctrl_flows,
6168                                 &attr, items, actions, false, &error);
6169         if (!flow_idx)
6170                 return -rte_errno;
6171         return 0;
6172 }
6173
6174 /**
6175  * Enable a flow control configured from the control plane.
6176  *
6177  * @param dev
6178  *   Pointer to Ethernet device.
6179  * @param eth_spec
6180  *   An Ethernet flow spec to apply.
6181  * @param eth_mask
6182  *   An Ethernet flow mask to apply.
6183  *
6184  * @return
6185  *   0 on success, a negative errno value otherwise and rte_errno is set.
6186  */
6187 int
6188 mlx5_ctrl_flow(struct rte_eth_dev *dev,
6189                struct rte_flow_item_eth *eth_spec,
6190                struct rte_flow_item_eth *eth_mask)
6191 {
6192         return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL);
6193 }
6194
6195 /**
6196  * Create default miss flow rule matching lacp traffic
6197  *
6198  * @param dev
6199  *   Pointer to Ethernet device.
6200  * @param eth_spec
6201  *   An Ethernet flow spec to apply.
6202  *
6203  * @return
6204  *   0 on success, a negative errno value otherwise and rte_errno is set.
6205  */
6206 int
6207 mlx5_flow_lacp_miss(struct rte_eth_dev *dev)
6208 {
6209         struct mlx5_priv *priv = dev->data->dev_private;
6210         /*
6211          * The LACP matching is done by only using ether type since using
6212          * a multicast dst mac causes kernel to give low priority to this flow.
6213          */
6214         static const struct rte_flow_item_eth lacp_spec = {
6215                 .type = RTE_BE16(0x8809),
6216         };
6217         static const struct rte_flow_item_eth lacp_mask = {
6218                 .type = 0xffff,
6219         };
6220         const struct rte_flow_attr attr = {
6221                 .ingress = 1,
6222         };
6223         struct rte_flow_item items[] = {
6224                 {
6225                         .type = RTE_FLOW_ITEM_TYPE_ETH,
6226                         .spec = &lacp_spec,
6227                         .mask = &lacp_mask,
6228                 },
6229                 {
6230                         .type = RTE_FLOW_ITEM_TYPE_END,
6231                 },
6232         };
6233         struct rte_flow_action actions[] = {
6234                 {
6235                         .type = (enum rte_flow_action_type)
6236                                 MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS,
6237                 },
6238                 {
6239                         .type = RTE_FLOW_ACTION_TYPE_END,
6240                 },
6241         };
6242         struct rte_flow_error error;
6243         uint32_t flow_idx = flow_list_create(dev, &priv->ctrl_flows,
6244                                 &attr, items, actions, false, &error);
6245
6246         if (!flow_idx)
6247                 return -rte_errno;
6248         return 0;
6249 }
6250
6251 /**
6252  * Destroy a flow.
6253  *
6254  * @see rte_flow_destroy()
6255  * @see rte_flow_ops
6256  */
6257 int
6258 mlx5_flow_destroy(struct rte_eth_dev *dev,
6259                   struct rte_flow *flow,
6260                   struct rte_flow_error *error __rte_unused)
6261 {
6262         struct mlx5_priv *priv = dev->data->dev_private;
6263
6264         flow_list_destroy(dev, &priv->flows, (uintptr_t)(void *)flow);
6265         return 0;
6266 }
6267
6268 /**
6269  * Destroy all flows.
6270  *
6271  * @see rte_flow_flush()
6272  * @see rte_flow_ops
6273  */
6274 int
6275 mlx5_flow_flush(struct rte_eth_dev *dev,
6276                 struct rte_flow_error *error __rte_unused)
6277 {
6278         struct mlx5_priv *priv = dev->data->dev_private;
6279
6280         mlx5_flow_list_flush(dev, &priv->flows, false);
6281         return 0;
6282 }
6283
6284 /**
6285  * Isolated mode.
6286  *
6287  * @see rte_flow_isolate()
6288  * @see rte_flow_ops
6289  */
6290 int
6291 mlx5_flow_isolate(struct rte_eth_dev *dev,
6292                   int enable,
6293                   struct rte_flow_error *error)
6294 {
6295         struct mlx5_priv *priv = dev->data->dev_private;
6296
6297         if (dev->data->dev_started) {
6298                 rte_flow_error_set(error, EBUSY,
6299                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6300                                    NULL,
6301                                    "port must be stopped first");
6302                 return -rte_errno;
6303         }
6304         priv->isolated = !!enable;
6305         if (enable)
6306                 dev->dev_ops = &mlx5_dev_ops_isolate;
6307         else
6308                 dev->dev_ops = &mlx5_dev_ops;
6309
6310         dev->rx_descriptor_status = mlx5_rx_descriptor_status;
6311         dev->tx_descriptor_status = mlx5_tx_descriptor_status;
6312
6313         return 0;
6314 }
6315
6316 /**
6317  * Query a flow.
6318  *
6319  * @see rte_flow_query()
6320  * @see rte_flow_ops
6321  */
6322 static int
6323 flow_drv_query(struct rte_eth_dev *dev,
6324                uint32_t flow_idx,
6325                const struct rte_flow_action *actions,
6326                void *data,
6327                struct rte_flow_error *error)
6328 {
6329         struct mlx5_priv *priv = dev->data->dev_private;
6330         const struct mlx5_flow_driver_ops *fops;
6331         struct rte_flow *flow = mlx5_ipool_get(priv->sh->ipool
6332                                                [MLX5_IPOOL_RTE_FLOW],
6333                                                flow_idx);
6334         enum mlx5_flow_drv_type ftype;
6335
6336         if (!flow) {
6337                 return rte_flow_error_set(error, ENOENT,
6338                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6339                           NULL,
6340                           "invalid flow handle");
6341         }
6342         ftype = flow->drv_type;
6343         MLX5_ASSERT(ftype > MLX5_FLOW_TYPE_MIN && ftype < MLX5_FLOW_TYPE_MAX);
6344         fops = flow_get_drv_ops(ftype);
6345
6346         return fops->query(dev, flow, actions, data, error);
6347 }
6348
6349 /**
6350  * Query a flow.
6351  *
6352  * @see rte_flow_query()
6353  * @see rte_flow_ops
6354  */
6355 int
6356 mlx5_flow_query(struct rte_eth_dev *dev,
6357                 struct rte_flow *flow,
6358                 const struct rte_flow_action *actions,
6359                 void *data,
6360                 struct rte_flow_error *error)
6361 {
6362         int ret;
6363
6364         ret = flow_drv_query(dev, (uintptr_t)(void *)flow, actions, data,
6365                              error);
6366         if (ret < 0)
6367                 return ret;
6368         return 0;
6369 }
6370
6371 /**
6372  * Manage filter operations.
6373  *
6374  * @param dev
6375  *   Pointer to Ethernet device structure.
6376  * @param filter_type
6377  *   Filter type.
6378  * @param filter_op
6379  *   Operation to perform.
6380  * @param arg
6381  *   Pointer to operation-specific structure.
6382  *
6383  * @return
6384  *   0 on success, a negative errno value otherwise and rte_errno is set.
6385  */
6386 int
6387 mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
6388                      enum rte_filter_type filter_type,
6389                      enum rte_filter_op filter_op,
6390                      void *arg)
6391 {
6392         switch (filter_type) {
6393         case RTE_ETH_FILTER_GENERIC:
6394                 if (filter_op != RTE_ETH_FILTER_GET) {
6395                         rte_errno = EINVAL;
6396                         return -rte_errno;
6397                 }
6398                 *(const void **)arg = &mlx5_flow_ops;
6399                 return 0;
6400         default:
6401                 DRV_LOG(ERR, "port %u filter type (%d) not supported",
6402                         dev->data->port_id, filter_type);
6403                 rte_errno = ENOTSUP;
6404                 return -rte_errno;
6405         }
6406         return 0;
6407 }
6408
6409 /**
6410  * Create the needed meter and suffix tables.
6411  *
6412  * @param[in] dev
6413  *   Pointer to Ethernet device.
6414  * @param[in] fm
6415  *   Pointer to the flow meter.
6416  *
6417  * @return
6418  *   Pointer to table set on success, NULL otherwise.
6419  */
6420 struct mlx5_meter_domains_infos *
6421 mlx5_flow_create_mtr_tbls(struct rte_eth_dev *dev,
6422                           const struct mlx5_flow_meter *fm)
6423 {
6424         const struct mlx5_flow_driver_ops *fops;
6425
6426         fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
6427         return fops->create_mtr_tbls(dev, fm);
6428 }
6429
6430 /**
6431  * Destroy the meter table set.
6432  *
6433  * @param[in] dev
6434  *   Pointer to Ethernet device.
6435  * @param[in] tbl
6436  *   Pointer to the meter table set.
6437  *
6438  * @return
6439  *   0 on success.
6440  */
6441 int
6442 mlx5_flow_destroy_mtr_tbls(struct rte_eth_dev *dev,
6443                            struct mlx5_meter_domains_infos *tbls)
6444 {
6445         const struct mlx5_flow_driver_ops *fops;
6446
6447         fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
6448         return fops->destroy_mtr_tbls(dev, tbls);
6449 }
6450
6451 /**
6452  * Create policer rules.
6453  *
6454  * @param[in] dev
6455  *   Pointer to Ethernet device.
6456  * @param[in] fm
6457  *   Pointer to flow meter structure.
6458  * @param[in] attr
6459  *   Pointer to flow attributes.
6460  *
6461  * @return
6462  *   0 on success, -1 otherwise.
6463  */
6464 int
6465 mlx5_flow_create_policer_rules(struct rte_eth_dev *dev,
6466                                struct mlx5_flow_meter *fm,
6467                                const struct rte_flow_attr *attr)
6468 {
6469         const struct mlx5_flow_driver_ops *fops;
6470
6471         fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
6472         return fops->create_policer_rules(dev, fm, attr);
6473 }
6474
6475 /**
6476  * Destroy policer rules.
6477  *
6478  * @param[in] fm
6479  *   Pointer to flow meter structure.
6480  * @param[in] attr
6481  *   Pointer to flow attributes.
6482  *
6483  * @return
6484  *   0 on success, -1 otherwise.
6485  */
6486 int
6487 mlx5_flow_destroy_policer_rules(struct rte_eth_dev *dev,
6488                                 struct mlx5_flow_meter *fm,
6489                                 const struct rte_flow_attr *attr)
6490 {
6491         const struct mlx5_flow_driver_ops *fops;
6492
6493         fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
6494         return fops->destroy_policer_rules(dev, fm, attr);
6495 }
6496
6497 /**
6498  * Allocate a counter.
6499  *
6500  * @param[in] dev
6501  *   Pointer to Ethernet device structure.
6502  *
6503  * @return
6504  *   Index to allocated counter  on success, 0 otherwise.
6505  */
6506 uint32_t
6507 mlx5_counter_alloc(struct rte_eth_dev *dev)
6508 {
6509         const struct mlx5_flow_driver_ops *fops;
6510         struct rte_flow_attr attr = { .transfer = 0 };
6511
6512         if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
6513                 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
6514                 return fops->counter_alloc(dev);
6515         }
6516         DRV_LOG(ERR,
6517                 "port %u counter allocate is not supported.",
6518                  dev->data->port_id);
6519         return 0;
6520 }
6521
6522 /**
6523  * Free a counter.
6524  *
6525  * @param[in] dev
6526  *   Pointer to Ethernet device structure.
6527  * @param[in] cnt
6528  *   Index to counter to be free.
6529  */
6530 void
6531 mlx5_counter_free(struct rte_eth_dev *dev, uint32_t cnt)
6532 {
6533         const struct mlx5_flow_driver_ops *fops;
6534         struct rte_flow_attr attr = { .transfer = 0 };
6535
6536         if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
6537                 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
6538                 fops->counter_free(dev, cnt);
6539                 return;
6540         }
6541         DRV_LOG(ERR,
6542                 "port %u counter free is not supported.",
6543                  dev->data->port_id);
6544 }
6545
6546 /**
6547  * Query counter statistics.
6548  *
6549  * @param[in] dev
6550  *   Pointer to Ethernet device structure.
6551  * @param[in] cnt
6552  *   Index to counter to query.
6553  * @param[in] clear
6554  *   Set to clear counter statistics.
6555  * @param[out] pkts
6556  *   The counter hits packets number to save.
6557  * @param[out] bytes
6558  *   The counter hits bytes number to save.
6559  *
6560  * @return
6561  *   0 on success, a negative errno value otherwise.
6562  */
6563 int
6564 mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt,
6565                    bool clear, uint64_t *pkts, uint64_t *bytes)
6566 {
6567         const struct mlx5_flow_driver_ops *fops;
6568         struct rte_flow_attr attr = { .transfer = 0 };
6569
6570         if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
6571                 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
6572                 return fops->counter_query(dev, cnt, clear, pkts, bytes);
6573         }
6574         DRV_LOG(ERR,
6575                 "port %u counter query is not supported.",
6576                  dev->data->port_id);
6577         return -ENOTSUP;
6578 }
6579
6580 /**
6581  * Allocate a new memory for the counter values wrapped by all the needed
6582  * management.
6583  *
6584  * @param[in] sh
6585  *   Pointer to mlx5_dev_ctx_shared object.
6586  *
6587  * @return
6588  *   0 on success, a negative errno value otherwise.
6589  */
6590 static int
6591 mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh)
6592 {
6593         struct mlx5_devx_mkey_attr mkey_attr;
6594         struct mlx5_counter_stats_mem_mng *mem_mng;
6595         volatile struct flow_counter_stats *raw_data;
6596         int raws_n = MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES;
6597         int size = (sizeof(struct flow_counter_stats) *
6598                         MLX5_COUNTERS_PER_POOL +
6599                         sizeof(struct mlx5_counter_stats_raw)) * raws_n +
6600                         sizeof(struct mlx5_counter_stats_mem_mng);
6601         size_t pgsize = rte_mem_page_size();
6602         uint8_t *mem;
6603         int i;
6604
6605         if (pgsize == (size_t)-1) {
6606                 DRV_LOG(ERR, "Failed to get mem page size");
6607                 rte_errno = ENOMEM;
6608                 return -ENOMEM;
6609         }
6610         mem = mlx5_malloc(MLX5_MEM_ZERO, size, pgsize, SOCKET_ID_ANY);
6611         if (!mem) {
6612                 rte_errno = ENOMEM;
6613                 return -ENOMEM;
6614         }
6615         mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1;
6616         size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n;
6617         mem_mng->umem = mlx5_os_umem_reg(sh->ctx, mem, size,
6618                                                  IBV_ACCESS_LOCAL_WRITE);
6619         if (!mem_mng->umem) {
6620                 rte_errno = errno;
6621                 mlx5_free(mem);
6622                 return -rte_errno;
6623         }
6624         mkey_attr.addr = (uintptr_t)mem;
6625         mkey_attr.size = size;
6626         mkey_attr.umem_id = mlx5_os_get_umem_id(mem_mng->umem);
6627         mkey_attr.pd = sh->pdn;
6628         mkey_attr.log_entity_size = 0;
6629         mkey_attr.pg_access = 0;
6630         mkey_attr.klm_array = NULL;
6631         mkey_attr.klm_num = 0;
6632         mkey_attr.relaxed_ordering_write = sh->cmng.relaxed_ordering_write;
6633         mkey_attr.relaxed_ordering_read = sh->cmng.relaxed_ordering_read;
6634         mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);
6635         if (!mem_mng->dm) {
6636                 mlx5_os_umem_dereg(mem_mng->umem);
6637                 rte_errno = errno;
6638                 mlx5_free(mem);
6639                 return -rte_errno;
6640         }
6641         mem_mng->raws = (struct mlx5_counter_stats_raw *)(mem + size);
6642         raw_data = (volatile struct flow_counter_stats *)mem;
6643         for (i = 0; i < raws_n; ++i) {
6644                 mem_mng->raws[i].mem_mng = mem_mng;
6645                 mem_mng->raws[i].data = raw_data + i * MLX5_COUNTERS_PER_POOL;
6646         }
6647         for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i)
6648                 LIST_INSERT_HEAD(&sh->cmng.free_stat_raws,
6649                                  mem_mng->raws + MLX5_CNT_CONTAINER_RESIZE + i,
6650                                  next);
6651         LIST_INSERT_HEAD(&sh->cmng.mem_mngs, mem_mng, next);
6652         sh->cmng.mem_mng = mem_mng;
6653         return 0;
6654 }
6655
6656 /**
6657  * Set the statistic memory to the new counter pool.
6658  *
6659  * @param[in] sh
6660  *   Pointer to mlx5_dev_ctx_shared object.
6661  * @param[in] pool
6662  *   Pointer to the pool to set the statistic memory.
6663  *
6664  * @return
6665  *   0 on success, a negative errno value otherwise.
6666  */
6667 static int
6668 mlx5_flow_set_counter_stat_mem(struct mlx5_dev_ctx_shared *sh,
6669                                struct mlx5_flow_counter_pool *pool)
6670 {
6671         struct mlx5_flow_counter_mng *cmng = &sh->cmng;
6672         /* Resize statistic memory once used out. */
6673         if (!(pool->index % MLX5_CNT_CONTAINER_RESIZE) &&
6674             mlx5_flow_create_counter_stat_mem_mng(sh)) {
6675                 DRV_LOG(ERR, "Cannot resize counter stat mem.");
6676                 return -1;
6677         }
6678         rte_spinlock_lock(&pool->sl);
6679         pool->raw = cmng->mem_mng->raws + pool->index %
6680                     MLX5_CNT_CONTAINER_RESIZE;
6681         rte_spinlock_unlock(&pool->sl);
6682         pool->raw_hw = NULL;
6683         return 0;
6684 }
6685
6686 #define MLX5_POOL_QUERY_FREQ_US 1000000
6687
6688 /**
6689  * Set the periodic procedure for triggering asynchronous batch queries for all
6690  * the counter pools.
6691  *
6692  * @param[in] sh
6693  *   Pointer to mlx5_dev_ctx_shared object.
6694  */
6695 void
6696 mlx5_set_query_alarm(struct mlx5_dev_ctx_shared *sh)
6697 {
6698         uint32_t pools_n, us;
6699
6700         pools_n = __atomic_load_n(&sh->cmng.n_valid, __ATOMIC_RELAXED);
6701         us = MLX5_POOL_QUERY_FREQ_US / pools_n;
6702         DRV_LOG(DEBUG, "Set alarm for %u pools each %u us", pools_n, us);
6703         if (rte_eal_alarm_set(us, mlx5_flow_query_alarm, sh)) {
6704                 sh->cmng.query_thread_on = 0;
6705                 DRV_LOG(ERR, "Cannot reinitialize query alarm");
6706         } else {
6707                 sh->cmng.query_thread_on = 1;
6708         }
6709 }
6710
6711 /**
6712  * The periodic procedure for triggering asynchronous batch queries for all the
6713  * counter pools. This function is probably called by the host thread.
6714  *
6715  * @param[in] arg
6716  *   The parameter for the alarm process.
6717  */
6718 void
6719 mlx5_flow_query_alarm(void *arg)
6720 {
6721         struct mlx5_dev_ctx_shared *sh = arg;
6722         int ret;
6723         uint16_t pool_index = sh->cmng.pool_index;
6724         struct mlx5_flow_counter_mng *cmng = &sh->cmng;
6725         struct mlx5_flow_counter_pool *pool;
6726         uint16_t n_valid;
6727
6728         if (sh->cmng.pending_queries >= MLX5_MAX_PENDING_QUERIES)
6729                 goto set_alarm;
6730         rte_spinlock_lock(&cmng->pool_update_sl);
6731         pool = cmng->pools[pool_index];
6732         n_valid = cmng->n_valid;
6733         rte_spinlock_unlock(&cmng->pool_update_sl);
6734         /* Set the statistic memory to the new created pool. */
6735         if ((!pool->raw && mlx5_flow_set_counter_stat_mem(sh, pool)))
6736                 goto set_alarm;
6737         if (pool->raw_hw)
6738                 /* There is a pool query in progress. */
6739                 goto set_alarm;
6740         pool->raw_hw =
6741                 LIST_FIRST(&sh->cmng.free_stat_raws);
6742         if (!pool->raw_hw)
6743                 /* No free counter statistics raw memory. */
6744                 goto set_alarm;
6745         /*
6746          * Identify the counters released between query trigger and query
6747          * handle more efficiently. The counter released in this gap period
6748          * should wait for a new round of query as the new arrived packets
6749          * will not be taken into account.
6750          */
6751         pool->query_gen++;
6752         ret = mlx5_devx_cmd_flow_counter_query(pool->min_dcs, 0,
6753                                                MLX5_COUNTERS_PER_POOL,
6754                                                NULL, NULL,
6755                                                pool->raw_hw->mem_mng->dm->id,
6756                                                (void *)(uintptr_t)
6757                                                pool->raw_hw->data,
6758                                                sh->devx_comp,
6759                                                (uint64_t)(uintptr_t)pool);
6760         if (ret) {
6761                 DRV_LOG(ERR, "Failed to trigger asynchronous query for dcs ID"
6762                         " %d", pool->min_dcs->id);
6763                 pool->raw_hw = NULL;
6764                 goto set_alarm;
6765         }
6766         LIST_REMOVE(pool->raw_hw, next);
6767         sh->cmng.pending_queries++;
6768         pool_index++;
6769         if (pool_index >= n_valid)
6770                 pool_index = 0;
6771 set_alarm:
6772         sh->cmng.pool_index = pool_index;
6773         mlx5_set_query_alarm(sh);
6774 }
6775
6776 /**
6777  * Check and callback event for new aged flow in the counter pool
6778  *
6779  * @param[in] sh
6780  *   Pointer to mlx5_dev_ctx_shared object.
6781  * @param[in] pool
6782  *   Pointer to Current counter pool.
6783  */
6784 static void
6785 mlx5_flow_aging_check(struct mlx5_dev_ctx_shared *sh,
6786                    struct mlx5_flow_counter_pool *pool)
6787 {
6788         struct mlx5_priv *priv;
6789         struct mlx5_flow_counter *cnt;
6790         struct mlx5_age_info *age_info;
6791         struct mlx5_age_param *age_param;
6792         struct mlx5_counter_stats_raw *cur = pool->raw_hw;
6793         struct mlx5_counter_stats_raw *prev = pool->raw;
6794         const uint64_t curr_time = MLX5_CURR_TIME_SEC;
6795         const uint32_t time_delta = curr_time - pool->time_of_last_age_check;
6796         uint16_t expected = AGE_CANDIDATE;
6797         uint32_t i;
6798
6799         pool->time_of_last_age_check = curr_time;
6800         for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
6801                 cnt = MLX5_POOL_GET_CNT(pool, i);
6802                 age_param = MLX5_CNT_TO_AGE(cnt);
6803                 if (__atomic_load_n(&age_param->state,
6804                                     __ATOMIC_RELAXED) != AGE_CANDIDATE)
6805                         continue;
6806                 if (cur->data[i].hits != prev->data[i].hits) {
6807                         __atomic_store_n(&age_param->sec_since_last_hit, 0,
6808                                          __ATOMIC_RELAXED);
6809                         continue;
6810                 }
6811                 if (__atomic_add_fetch(&age_param->sec_since_last_hit,
6812                                        time_delta,
6813                                        __ATOMIC_RELAXED) <= age_param->timeout)
6814                         continue;
6815                 /**
6816                  * Hold the lock first, or if between the
6817                  * state AGE_TMOUT and tailq operation the
6818                  * release happened, the release procedure
6819                  * may delete a non-existent tailq node.
6820                  */
6821                 priv = rte_eth_devices[age_param->port_id].data->dev_private;
6822                 age_info = GET_PORT_AGE_INFO(priv);
6823                 rte_spinlock_lock(&age_info->aged_sl);
6824                 if (__atomic_compare_exchange_n(&age_param->state, &expected,
6825                                                 AGE_TMOUT, false,
6826                                                 __ATOMIC_RELAXED,
6827                                                 __ATOMIC_RELAXED)) {
6828                         TAILQ_INSERT_TAIL(&age_info->aged_counters, cnt, next);
6829                         MLX5_AGE_SET(age_info, MLX5_AGE_EVENT_NEW);
6830                 }
6831                 rte_spinlock_unlock(&age_info->aged_sl);
6832         }
6833         mlx5_age_event_prepare(sh);
6834 }
6835
6836 /**
6837  * Handler for the HW respond about ready values from an asynchronous batch
6838  * query. This function is probably called by the host thread.
6839  *
6840  * @param[in] sh
6841  *   The pointer to the shared device context.
6842  * @param[in] async_id
6843  *   The Devx async ID.
6844  * @param[in] status
6845  *   The status of the completion.
6846  */
6847 void
6848 mlx5_flow_async_pool_query_handle(struct mlx5_dev_ctx_shared *sh,
6849                                   uint64_t async_id, int status)
6850 {
6851         struct mlx5_flow_counter_pool *pool =
6852                 (struct mlx5_flow_counter_pool *)(uintptr_t)async_id;
6853         struct mlx5_counter_stats_raw *raw_to_free;
6854         uint8_t query_gen = pool->query_gen ^ 1;
6855         struct mlx5_flow_counter_mng *cmng = &sh->cmng;
6856         enum mlx5_counter_type cnt_type =
6857                 pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
6858                                 MLX5_COUNTER_TYPE_ORIGIN;
6859
6860         if (unlikely(status)) {
6861                 raw_to_free = pool->raw_hw;
6862         } else {
6863                 raw_to_free = pool->raw;
6864                 if (pool->is_aged)
6865                         mlx5_flow_aging_check(sh, pool);
6866                 rte_spinlock_lock(&pool->sl);
6867                 pool->raw = pool->raw_hw;
6868                 rte_spinlock_unlock(&pool->sl);
6869                 /* Be sure the new raw counters data is updated in memory. */
6870                 rte_io_wmb();
6871                 if (!TAILQ_EMPTY(&pool->counters[query_gen])) {
6872                         rte_spinlock_lock(&cmng->csl[cnt_type]);
6873                         TAILQ_CONCAT(&cmng->counters[cnt_type],
6874                                      &pool->counters[query_gen], next);
6875                         rte_spinlock_unlock(&cmng->csl[cnt_type]);
6876                 }
6877         }
6878         LIST_INSERT_HEAD(&sh->cmng.free_stat_raws, raw_to_free, next);
6879         pool->raw_hw = NULL;
6880         sh->cmng.pending_queries--;
6881 }
6882
6883 static int
6884 flow_group_to_table(uint32_t port_id, uint32_t group, uint32_t *table,
6885                     const struct flow_grp_info *grp_info,
6886                     struct rte_flow_error *error)
6887 {
6888         if (grp_info->transfer && grp_info->external &&
6889             grp_info->fdb_def_rule) {
6890                 if (group == UINT32_MAX)
6891                         return rte_flow_error_set
6892                                                 (error, EINVAL,
6893                                                  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
6894                                                  NULL,
6895                                                  "group index not supported");
6896                 *table = group + 1;
6897         } else {
6898                 *table = group;
6899         }
6900         DRV_LOG(DEBUG, "port %u group=%#x table=%#x", port_id, group, *table);
6901         return 0;
6902 }
6903
6904 /**
6905  * Translate the rte_flow group index to HW table value.
6906  *
6907  * If tunnel offload is disabled, all group ids converted to flow table
6908  * id using the standard method.
6909  * If tunnel offload is enabled, group id can be converted using the
6910  * standard or tunnel conversion method. Group conversion method
6911  * selection depends on flags in `grp_info` parameter:
6912  * - Internal (grp_info.external == 0) groups conversion uses the
6913  *   standard method.
6914  * - Group ids in JUMP action converted with the tunnel conversion.
6915  * - Group id in rule attribute conversion depends on a rule type and
6916  *   group id value:
6917  *   ** non zero group attributes converted with the tunnel method
6918  *   ** zero group attribute in non-tunnel rule is converted using the
6919  *      standard method - there's only one root table
6920  *   ** zero group attribute in steer tunnel rule is converted with the
6921  *      standard method - single root table
6922  *   ** zero group attribute in match tunnel rule is a special OvS
6923  *      case: that value is used for portability reasons. That group
6924  *      id is converted with the tunnel conversion method.
6925  *
6926  * @param[in] dev
6927  *   Port device
6928  * @param[in] tunnel
6929  *   PMD tunnel offload object
6930  * @param[in] group
6931  *   rte_flow group index value.
6932  * @param[out] table
6933  *   HW table value.
6934  * @param[in] grp_info
6935  *   flags used for conversion
6936  * @param[out] error
6937  *   Pointer to error structure.
6938  *
6939  * @return
6940  *   0 on success, a negative errno value otherwise and rte_errno is set.
6941  */
6942 int
6943 mlx5_flow_group_to_table(struct rte_eth_dev *dev,
6944                          const struct mlx5_flow_tunnel *tunnel,
6945                          uint32_t group, uint32_t *table,
6946                          const struct flow_grp_info *grp_info,
6947                          struct rte_flow_error *error)
6948 {
6949         int ret;
6950         bool standard_translation;
6951
6952         if (!grp_info->skip_scale && grp_info->external &&
6953             group < MLX5_MAX_TABLES_EXTERNAL)
6954                 group *= MLX5_FLOW_TABLE_FACTOR;
6955         if (is_tunnel_offload_active(dev)) {
6956                 standard_translation = !grp_info->external ||
6957                                         grp_info->std_tbl_fix;
6958         } else {
6959                 standard_translation = true;
6960         }
6961         DRV_LOG(DEBUG,
6962                 "port %u group=%u transfer=%d external=%d fdb_def_rule=%d translate=%s",
6963                 dev->data->port_id, group, grp_info->transfer,
6964                 grp_info->external, grp_info->fdb_def_rule,
6965                 standard_translation ? "STANDARD" : "TUNNEL");
6966         if (standard_translation)
6967                 ret = flow_group_to_table(dev->data->port_id, group, table,
6968                                           grp_info, error);
6969         else
6970                 ret = tunnel_flow_group_to_flow_table(dev, tunnel, group,
6971                                                       table, error);
6972
6973         return ret;
6974 }
6975
6976 /**
6977  * Discover availability of metadata reg_c's.
6978  *
6979  * Iteratively use test flows to check availability.
6980  *
6981  * @param[in] dev
6982  *   Pointer to the Ethernet device structure.
6983  *
6984  * @return
6985  *   0 on success, a negative errno value otherwise and rte_errno is set.
6986  */
6987 int
6988 mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev)
6989 {
6990         struct mlx5_priv *priv = dev->data->dev_private;
6991         struct mlx5_dev_config *config = &priv->config;
6992         enum modify_reg idx;
6993         int n = 0;
6994
6995         /* reg_c[0] and reg_c[1] are reserved. */
6996         config->flow_mreg_c[n++] = REG_C_0;
6997         config->flow_mreg_c[n++] = REG_C_1;
6998         /* Discover availability of other reg_c's. */
6999         for (idx = REG_C_2; idx <= REG_C_7; ++idx) {
7000                 struct rte_flow_attr attr = {
7001                         .group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
7002                         .priority = MLX5_FLOW_PRIO_RSVD,
7003                         .ingress = 1,
7004                 };
7005                 struct rte_flow_item items[] = {
7006                         [0] = {
7007                                 .type = RTE_FLOW_ITEM_TYPE_END,
7008                         },
7009                 };
7010                 struct rte_flow_action actions[] = {
7011                         [0] = {
7012                                 .type = (enum rte_flow_action_type)
7013                                         MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
7014                                 .conf = &(struct mlx5_flow_action_copy_mreg){
7015                                         .src = REG_C_1,
7016                                         .dst = idx,
7017                                 },
7018                         },
7019                         [1] = {
7020                                 .type = RTE_FLOW_ACTION_TYPE_JUMP,
7021                                 .conf = &(struct rte_flow_action_jump){
7022                                         .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP,
7023                                 },
7024                         },
7025                         [2] = {
7026                                 .type = RTE_FLOW_ACTION_TYPE_END,
7027                         },
7028                 };
7029                 uint32_t flow_idx;
7030                 struct rte_flow *flow;
7031                 struct rte_flow_error error;
7032
7033                 if (!config->dv_flow_en)
7034                         break;
7035                 /* Create internal flow, validation skips copy action. */
7036                 flow_idx = flow_list_create(dev, NULL, &attr, items,
7037                                             actions, false, &error);
7038                 flow = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW],
7039                                       flow_idx);
7040                 if (!flow)
7041                         continue;
7042                 config->flow_mreg_c[n++] = idx;
7043                 flow_list_destroy(dev, NULL, flow_idx);
7044         }
7045         for (; n < MLX5_MREG_C_NUM; ++n)
7046                 config->flow_mreg_c[n] = REG_NON;
7047         return 0;
7048 }
7049
7050 /**
7051  * Dump flow raw hw data to file
7052  *
7053  * @param[in] dev
7054  *    The pointer to Ethernet device.
7055  * @param[in] file
7056  *   A pointer to a file for output.
7057  * @param[out] error
7058  *   Perform verbose error reporting if not NULL. PMDs initialize this
7059  *   structure in case of error only.
7060  * @return
7061  *   0 on success, a nagative value otherwise.
7062  */
7063 int
7064 mlx5_flow_dev_dump(struct rte_eth_dev *dev,
7065                    FILE *file,
7066                    struct rte_flow_error *error __rte_unused)
7067 {
7068         struct mlx5_priv *priv = dev->data->dev_private;
7069         struct mlx5_dev_ctx_shared *sh = priv->sh;
7070
7071         if (!priv->config.dv_flow_en) {
7072                 if (fputs("device dv flow disabled\n", file) <= 0)
7073                         return -errno;
7074                 return -ENOTSUP;
7075         }
7076         return mlx5_devx_cmd_flow_dump(sh->fdb_domain, sh->rx_domain,
7077                                        sh->tx_domain, file);
7078 }
7079
7080 /**
7081  * Get aged-out flows.
7082  *
7083  * @param[in] dev
7084  *   Pointer to the Ethernet device structure.
7085  * @param[in] context
7086  *   The address of an array of pointers to the aged-out flows contexts.
7087  * @param[in] nb_countexts
7088  *   The length of context array pointers.
7089  * @param[out] error
7090  *   Perform verbose error reporting if not NULL. Initialized in case of
7091  *   error only.
7092  *
7093  * @return
7094  *   how many contexts get in success, otherwise negative errno value.
7095  *   if nb_contexts is 0, return the amount of all aged contexts.
7096  *   if nb_contexts is not 0 , return the amount of aged flows reported
7097  *   in the context array.
7098  */
7099 int
7100 mlx5_flow_get_aged_flows(struct rte_eth_dev *dev, void **contexts,
7101                         uint32_t nb_contexts, struct rte_flow_error *error)
7102 {
7103         const struct mlx5_flow_driver_ops *fops;
7104         struct rte_flow_attr attr = { .transfer = 0 };
7105
7106         if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
7107                 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
7108                 return fops->get_aged_flows(dev, contexts, nb_contexts,
7109                                                     error);
7110         }
7111         DRV_LOG(ERR,
7112                 "port %u get aged flows is not supported.",
7113                  dev->data->port_id);
7114         return -ENOTSUP;
7115 }
7116
7117 /* Wrapper for driver action_validate op callback */
7118 static int
7119 flow_drv_action_validate(struct rte_eth_dev *dev,
7120                          const struct rte_flow_shared_action_conf *conf,
7121                          const struct rte_flow_action *action,
7122                          const struct mlx5_flow_driver_ops *fops,
7123                          struct rte_flow_error *error)
7124 {
7125         static const char err_msg[] = "shared action validation unsupported";
7126
7127         if (!fops->action_validate) {
7128                 DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
7129                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
7130                                    NULL, err_msg);
7131                 return -rte_errno;
7132         }
7133         return fops->action_validate(dev, conf, action, error);
7134 }
7135
7136 /**
7137  * Destroys the shared action by handle.
7138  *
7139  * @param dev
7140  *   Pointer to Ethernet device structure.
7141  * @param[in] action
7142  *   Handle for the shared action to be destroyed.
7143  * @param[out] error
7144  *   Perform verbose error reporting if not NULL. PMDs initialize this
7145  *   structure in case of error only.
7146  *
7147  * @return
7148  *   0 on success, a negative errno value otherwise and rte_errno is set.
7149  *
7150  * @note: wrapper for driver action_create op callback.
7151  */
7152 static int
7153 mlx5_shared_action_destroy(struct rte_eth_dev *dev,
7154                            struct rte_flow_shared_action *action,
7155                            struct rte_flow_error *error)
7156 {
7157         static const char err_msg[] = "shared action destruction unsupported";
7158         struct rte_flow_attr attr = { .transfer = 0 };
7159         const struct mlx5_flow_driver_ops *fops =
7160                         flow_get_drv_ops(flow_get_drv_type(dev, &attr));
7161
7162         if (!fops->action_destroy) {
7163                 DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
7164                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
7165                                    NULL, err_msg);
7166                 return -rte_errno;
7167         }
7168         return fops->action_destroy(dev, action, error);
7169 }
7170
7171 /* Wrapper for driver action_destroy op callback */
7172 static int
7173 flow_drv_action_update(struct rte_eth_dev *dev,
7174                        struct rte_flow_shared_action *action,
7175                        const void *action_conf,
7176                        const struct mlx5_flow_driver_ops *fops,
7177                        struct rte_flow_error *error)
7178 {
7179         static const char err_msg[] = "shared action update unsupported";
7180
7181         if (!fops->action_update) {
7182                 DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
7183                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
7184                                    NULL, err_msg);
7185                 return -rte_errno;
7186         }
7187         return fops->action_update(dev, action, action_conf, error);
7188 }
7189
7190 /* Wrapper for driver action_destroy op callback */
7191 static int
7192 flow_drv_action_query(struct rte_eth_dev *dev,
7193                       const struct rte_flow_shared_action *action,
7194                       void *data,
7195                       const struct mlx5_flow_driver_ops *fops,
7196                       struct rte_flow_error *error)
7197 {
7198         static const char err_msg[] = "shared action query unsupported";
7199
7200         if (!fops->action_query) {
7201                 DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
7202                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
7203                                    NULL, err_msg);
7204                 return -rte_errno;
7205         }
7206         return fops->action_query(dev, action, data, error);
7207 }
7208
7209 /**
7210  * Create shared action for reuse in multiple flow rules.
7211  *
7212  * @param dev
7213  *   Pointer to Ethernet device structure.
7214  * @param[in] action
7215  *   Action configuration for shared action creation.
7216  * @param[out] error
7217  *   Perform verbose error reporting if not NULL. PMDs initialize this
7218  *   structure in case of error only.
7219  * @return
7220  *   A valid handle in case of success, NULL otherwise and rte_errno is set.
7221  */
7222 static struct rte_flow_shared_action *
7223 mlx5_shared_action_create(struct rte_eth_dev *dev,
7224                           const struct rte_flow_shared_action_conf *conf,
7225                           const struct rte_flow_action *action,
7226                           struct rte_flow_error *error)
7227 {
7228         static const char err_msg[] = "shared action creation unsupported";
7229         struct rte_flow_attr attr = { .transfer = 0 };
7230         const struct mlx5_flow_driver_ops *fops =
7231                         flow_get_drv_ops(flow_get_drv_type(dev, &attr));
7232
7233         if (flow_drv_action_validate(dev, conf, action, fops, error))
7234                 return NULL;
7235         if (!fops->action_create) {
7236                 DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
7237                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
7238                                    NULL, err_msg);
7239                 return NULL;
7240         }
7241         return fops->action_create(dev, conf, action, error);
7242 }
7243
7244 /**
7245  * Updates inplace the shared action configuration pointed by *action* handle
7246  * with the configuration provided as *action* argument.
7247  * The update of the shared action configuration effects all flow rules reusing
7248  * the action via handle.
7249  *
7250  * @param dev
7251  *   Pointer to Ethernet device structure.
7252  * @param[in] shared_action
7253  *   Handle for the shared action to be updated.
7254  * @param[in] action
7255  *   Action specification used to modify the action pointed by handle.
7256  *   *action* should be of same type with the action pointed by the *action*
7257  *   handle argument, otherwise considered as invalid.
7258  * @param[out] error
7259  *   Perform verbose error reporting if not NULL. PMDs initialize this
7260  *   structure in case of error only.
7261  *
7262  * @return
7263  *   0 on success, a negative errno value otherwise and rte_errno is set.
7264  */
7265 static int
7266 mlx5_shared_action_update(struct rte_eth_dev *dev,
7267                 struct rte_flow_shared_action *shared_action,
7268                 const struct rte_flow_action *action,
7269                 struct rte_flow_error *error)
7270 {
7271         struct rte_flow_attr attr = { .transfer = 0 };
7272         const struct mlx5_flow_driver_ops *fops =
7273                         flow_get_drv_ops(flow_get_drv_type(dev, &attr));
7274         int ret;
7275
7276         ret = flow_drv_action_validate(dev, NULL, action, fops, error);
7277         if (ret)
7278                 return ret;
7279         return flow_drv_action_update(dev, shared_action, action->conf, fops,
7280                                       error);
7281 }
7282
7283 /**
7284  * Query the shared action by handle.
7285  *
7286  * This function allows retrieving action-specific data such as counters.
7287  * Data is gathered by special action which may be present/referenced in
7288  * more than one flow rule definition.
7289  *
7290  * \see RTE_FLOW_ACTION_TYPE_COUNT
7291  *
7292  * @param dev
7293  *   Pointer to Ethernet device structure.
7294  * @param[in] action
7295  *   Handle for the shared action to query.
7296  * @param[in, out] data
7297  *   Pointer to storage for the associated query data type.
7298  * @param[out] error
7299  *   Perform verbose error reporting if not NULL. PMDs initialize this
7300  *   structure in case of error only.
7301  *
7302  * @return
7303  *   0 on success, a negative errno value otherwise and rte_errno is set.
7304  */
7305 static int
7306 mlx5_shared_action_query(struct rte_eth_dev *dev,
7307                          const struct rte_flow_shared_action *action,
7308                          void *data,
7309                          struct rte_flow_error *error)
7310 {
7311         struct rte_flow_attr attr = { .transfer = 0 };
7312         const struct mlx5_flow_driver_ops *fops =
7313                         flow_get_drv_ops(flow_get_drv_type(dev, &attr));
7314
7315         return flow_drv_action_query(dev, action, data, fops, error);
7316 }
7317
7318 /**
7319  * Destroy all shared actions.
7320  *
7321  * @param dev
7322  *   Pointer to Ethernet device.
7323  *
7324  * @return
7325  *   0 on success, a negative errno value otherwise and rte_errno is set.
7326  */
7327 int
7328 mlx5_shared_action_flush(struct rte_eth_dev *dev)
7329 {
7330         struct rte_flow_error error;
7331         struct mlx5_priv *priv = dev->data->dev_private;
7332         struct mlx5_shared_action_rss *action;
7333         int ret = 0;
7334         uint32_t idx;
7335
7336         ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
7337                       priv->rss_shared_actions, idx, action, next) {
7338                 ret |= mlx5_shared_action_destroy(dev,
7339                        (struct rte_flow_shared_action *)(uintptr_t)idx, &error);
7340         }
7341         return ret;
7342 }
7343
7344 #ifndef HAVE_MLX5DV_DR
7345 #define MLX5_DOMAIN_SYNC_FLOW ((1 << 0) | (1 << 1))
7346 #else
7347 #define MLX5_DOMAIN_SYNC_FLOW \
7348         (MLX5DV_DR_DOMAIN_SYNC_FLAGS_SW | MLX5DV_DR_DOMAIN_SYNC_FLAGS_HW)
7349 #endif
7350
7351 int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains)
7352 {
7353         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
7354         const struct mlx5_flow_driver_ops *fops;
7355         int ret;
7356         struct rte_flow_attr attr = { .transfer = 0 };
7357
7358         fops = flow_get_drv_ops(flow_get_drv_type(dev, &attr));
7359         ret = fops->sync_domain(dev, domains, MLX5_DOMAIN_SYNC_FLOW);
7360         if (ret > 0)
7361                 ret = -ret;
7362         return ret;
7363 }
7364
7365 /**
7366  * tunnel offload functionalilty is defined for DV environment only
7367  */
7368 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
7369 __extension__
7370 union tunnel_offload_mark {
7371         uint32_t val;
7372         struct {
7373                 uint32_t app_reserve:8;
7374                 uint32_t table_id:15;
7375                 uint32_t transfer:1;
7376                 uint32_t _unused_:8;
7377         };
7378 };
7379
7380 static bool
7381 mlx5_access_tunnel_offload_db
7382         (struct rte_eth_dev *dev,
7383          bool (*match)(struct rte_eth_dev *,
7384                        struct mlx5_flow_tunnel *, const void *),
7385          void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *),
7386          void (*miss)(struct rte_eth_dev *, void *),
7387          void *ctx, bool lock_op);
7388
7389 static int
7390 flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
7391                              struct rte_flow *flow,
7392                              const struct rte_flow_attr *attr,
7393                              const struct rte_flow_action *app_actions,
7394                              uint32_t flow_idx,
7395                              struct tunnel_default_miss_ctx *ctx,
7396                              struct rte_flow_error *error)
7397 {
7398         struct mlx5_priv *priv = dev->data->dev_private;
7399         struct mlx5_flow *dev_flow;
7400         struct rte_flow_attr miss_attr = *attr;
7401         const struct mlx5_flow_tunnel *tunnel = app_actions[0].conf;
7402         const struct rte_flow_item miss_items[2] = {
7403                 {
7404                         .type = RTE_FLOW_ITEM_TYPE_ETH,
7405                         .spec = NULL,
7406                         .last = NULL,
7407                         .mask = NULL
7408                 },
7409                 {
7410                         .type = RTE_FLOW_ITEM_TYPE_END,
7411                         .spec = NULL,
7412                         .last = NULL,
7413                         .mask = NULL
7414                 }
7415         };
7416         union tunnel_offload_mark mark_id;
7417         struct rte_flow_action_mark miss_mark;
7418         struct rte_flow_action miss_actions[3] = {
7419                 [0] = { .type = RTE_FLOW_ACTION_TYPE_MARK, .conf = &miss_mark },
7420                 [2] = { .type = RTE_FLOW_ACTION_TYPE_END,  .conf = NULL }
7421         };
7422         const struct rte_flow_action_jump *jump_data;
7423         uint32_t i, flow_table = 0; /* prevent compilation warning */
7424         struct flow_grp_info grp_info = {
7425                 .external = 1,
7426                 .transfer = attr->transfer,
7427                 .fdb_def_rule = !!priv->fdb_def_rule,
7428                 .std_tbl_fix = 0,
7429         };
7430         int ret;
7431
7432         if (!attr->transfer) {
7433                 uint32_t q_size;
7434
7435                 miss_actions[1].type = RTE_FLOW_ACTION_TYPE_RSS;
7436                 q_size = priv->reta_idx_n * sizeof(ctx->queue[0]);
7437                 ctx->queue = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, q_size,
7438                                          0, SOCKET_ID_ANY);
7439                 if (!ctx->queue)
7440                         return rte_flow_error_set
7441                                 (error, ENOMEM,
7442                                 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
7443                                 NULL, "invalid default miss RSS");
7444                 ctx->action_rss.func = RTE_ETH_HASH_FUNCTION_DEFAULT,
7445                 ctx->action_rss.level = 0,
7446                 ctx->action_rss.types = priv->rss_conf.rss_hf,
7447                 ctx->action_rss.key_len = priv->rss_conf.rss_key_len,
7448                 ctx->action_rss.queue_num = priv->reta_idx_n,
7449                 ctx->action_rss.key = priv->rss_conf.rss_key,
7450                 ctx->action_rss.queue = ctx->queue;
7451                 if (!priv->reta_idx_n || !priv->rxqs_n)
7452                         return rte_flow_error_set
7453                                 (error, EINVAL,
7454                                 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
7455                                 NULL, "invalid port configuration");
7456                 if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
7457                         ctx->action_rss.types = 0;
7458                 for (i = 0; i != priv->reta_idx_n; ++i)
7459                         ctx->queue[i] = (*priv->reta_idx)[i];
7460         } else {
7461                 miss_actions[1].type = RTE_FLOW_ACTION_TYPE_JUMP;
7462                 ctx->miss_jump.group = MLX5_TNL_MISS_FDB_JUMP_GRP;
7463         }
7464         miss_actions[1].conf = (typeof(miss_actions[1].conf))ctx->raw;
7465         for (; app_actions->type != RTE_FLOW_ACTION_TYPE_JUMP; app_actions++);
7466         jump_data = app_actions->conf;
7467         miss_attr.priority = MLX5_TNL_MISS_RULE_PRIORITY;
7468         miss_attr.group = jump_data->group;
7469         ret = mlx5_flow_group_to_table(dev, tunnel, jump_data->group,
7470                                        &flow_table, &grp_info, error);
7471         if (ret)
7472                 return rte_flow_error_set(error, EINVAL,
7473                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
7474                                           NULL, "invalid tunnel id");
7475         mark_id.app_reserve = 0;
7476         mark_id.table_id = tunnel_flow_tbl_to_id(flow_table);
7477         mark_id.transfer = !!attr->transfer;
7478         mark_id._unused_ = 0;
7479         miss_mark.id = mark_id.val;
7480         dev_flow = flow_drv_prepare(dev, flow, &miss_attr,
7481                                     miss_items, miss_actions, flow_idx, error);
7482         if (!dev_flow)
7483                 return -rte_errno;
7484         dev_flow->flow = flow;
7485         dev_flow->external = true;
7486         dev_flow->tunnel = tunnel;
7487         /* Subflow object was created, we must include one in the list. */
7488         SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
7489                       dev_flow->handle, next);
7490         DRV_LOG(DEBUG,
7491                 "port %u tunnel type=%d id=%u miss rule priority=%u group=%u",
7492                 dev->data->port_id, tunnel->app_tunnel.type,
7493                 tunnel->tunnel_id, miss_attr.priority, miss_attr.group);
7494         ret = flow_drv_translate(dev, dev_flow, &miss_attr, miss_items,
7495                                   miss_actions, error);
7496         if (!ret)
7497                 ret = flow_mreg_update_copy_table(dev, flow, miss_actions,
7498                                                   error);
7499
7500         return ret;
7501 }
7502
7503 static const struct mlx5_flow_tbl_data_entry  *
7504 tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark)
7505 {
7506         struct mlx5_priv *priv = dev->data->dev_private;
7507         struct mlx5_dev_ctx_shared *sh = priv->sh;
7508         struct mlx5_hlist_entry *he;
7509         union tunnel_offload_mark mbits = { .val = mark };
7510         union mlx5_flow_tbl_key table_key = {
7511                 {
7512                         .table_id = tunnel_id_to_flow_tbl(mbits.table_id),
7513                         .dummy = 0,
7514                         .domain = !!mbits.transfer,
7515                         .direction = 0,
7516                 }
7517         };
7518         he = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, NULL);
7519         return he ?
7520                container_of(he, struct mlx5_flow_tbl_data_entry, entry) : NULL;
7521 }
7522
7523 static void
7524 mlx5_flow_tunnel_grp2tbl_remove_cb(struct mlx5_hlist *list,
7525                                    struct mlx5_hlist_entry *entry)
7526 {
7527         struct mlx5_dev_ctx_shared *sh = list->ctx;
7528         struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash);
7529
7530         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
7531                         tunnel_flow_tbl_to_id(tte->flow_table));
7532         mlx5_free(tte);
7533 }
7534
7535 static int
7536 mlx5_flow_tunnel_grp2tbl_match_cb(struct mlx5_hlist *list __rte_unused,
7537                                   struct mlx5_hlist_entry *entry,
7538                                   uint64_t key, void *cb_ctx __rte_unused)
7539 {
7540         union tunnel_tbl_key tbl = {
7541                 .val = key,
7542         };
7543         struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash);
7544
7545         return tbl.tunnel_id != tte->tunnel_id || tbl.group != tte->group;
7546 }
7547
7548 static struct mlx5_hlist_entry *
7549 mlx5_flow_tunnel_grp2tbl_create_cb(struct mlx5_hlist *list, uint64_t key,
7550                                    void *ctx __rte_unused)
7551 {
7552         struct mlx5_dev_ctx_shared *sh = list->ctx;
7553         struct tunnel_tbl_entry *tte;
7554         union tunnel_tbl_key tbl = {
7555                 .val = key,
7556         };
7557
7558         tte = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO,
7559                           sizeof(*tte), 0,
7560                           SOCKET_ID_ANY);
7561         if (!tte)
7562                 goto err;
7563         mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
7564                           &tte->flow_table);
7565         if (tte->flow_table >= MLX5_MAX_TABLES) {
7566                 DRV_LOG(ERR, "Tunnel TBL ID %d exceed max limit.",
7567                         tte->flow_table);
7568                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
7569                                 tte->flow_table);
7570                 goto err;
7571         } else if (!tte->flow_table) {
7572                 goto err;
7573         }
7574         tte->flow_table = tunnel_id_to_flow_tbl(tte->flow_table);
7575         tte->tunnel_id = tbl.tunnel_id;
7576         tte->group = tbl.group;
7577         return &tte->hash;
7578 err:
7579         if (tte)
7580                 mlx5_free(tte);
7581         return NULL;
7582 }
7583
7584 static uint32_t
7585 tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev,
7586                                 const struct mlx5_flow_tunnel *tunnel,
7587                                 uint32_t group, uint32_t *table,
7588                                 struct rte_flow_error *error)
7589 {
7590         struct mlx5_hlist_entry *he;
7591         struct tunnel_tbl_entry *tte;
7592         union tunnel_tbl_key key = {
7593                 .tunnel_id = tunnel ? tunnel->tunnel_id : 0,
7594                 .group = group
7595         };
7596         struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
7597         struct mlx5_hlist *group_hash;
7598
7599         group_hash = tunnel ? tunnel->groups : thub->groups;
7600         he = mlx5_hlist_register(group_hash, key.val, NULL);
7601         if (!he)
7602                 return rte_flow_error_set(error, EINVAL,
7603                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
7604                                           NULL,
7605                                           "tunnel group index not supported");
7606         tte = container_of(he, typeof(*tte), hash);
7607         *table = tte->flow_table;
7608         DRV_LOG(DEBUG, "port %u tunnel %u group=%#x table=%#x",
7609                 dev->data->port_id, key.tunnel_id, group, *table);
7610         return 0;
7611 }
7612
7613 static void
7614 mlx5_flow_tunnel_free(struct rte_eth_dev *dev,
7615                       struct mlx5_flow_tunnel *tunnel)
7616 {
7617         struct mlx5_priv *priv = dev->data->dev_private;
7618         struct mlx5_indexed_pool *ipool;
7619
7620         DRV_LOG(DEBUG, "port %u release pmd tunnel id=0x%x",
7621                 dev->data->port_id, tunnel->tunnel_id);
7622         LIST_REMOVE(tunnel, chain);
7623         mlx5_hlist_destroy(tunnel->groups);
7624         ipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID];
7625         mlx5_ipool_free(ipool, tunnel->tunnel_id);
7626 }
7627
7628 static bool
7629 mlx5_access_tunnel_offload_db
7630         (struct rte_eth_dev *dev,
7631          bool (*match)(struct rte_eth_dev *,
7632                        struct mlx5_flow_tunnel *, const void *),
7633          void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *),
7634          void (*miss)(struct rte_eth_dev *, void *),
7635          void *ctx, bool lock_op)
7636 {
7637         bool verdict = false;
7638         struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
7639         struct mlx5_flow_tunnel *tunnel;
7640
7641         rte_spinlock_lock(&thub->sl);
7642         LIST_FOREACH(tunnel, &thub->tunnels, chain) {
7643                 verdict = match(dev, tunnel, (const void *)ctx);
7644                 if (verdict)
7645                         break;
7646         }
7647         if (!lock_op)
7648                 rte_spinlock_unlock(&thub->sl);
7649         if (verdict && hit)
7650                 hit(dev, tunnel, ctx);
7651         if (!verdict && miss)
7652                 miss(dev, ctx);
7653         if (lock_op)
7654                 rte_spinlock_unlock(&thub->sl);
7655
7656         return verdict;
7657 }
7658
7659 struct tunnel_db_find_tunnel_id_ctx {
7660         uint32_t tunnel_id;
7661         struct mlx5_flow_tunnel *tunnel;
7662 };
7663
7664 static bool
7665 find_tunnel_id_match(struct rte_eth_dev *dev,
7666                      struct mlx5_flow_tunnel *tunnel, const void *x)
7667 {
7668         const struct tunnel_db_find_tunnel_id_ctx *ctx = x;
7669
7670         RTE_SET_USED(dev);
7671         return tunnel->tunnel_id == ctx->tunnel_id;
7672 }
7673
7674 static void
7675 find_tunnel_id_hit(struct rte_eth_dev *dev,
7676                    struct mlx5_flow_tunnel *tunnel, void *x)
7677 {
7678         struct tunnel_db_find_tunnel_id_ctx *ctx = x;
7679         RTE_SET_USED(dev);
7680         ctx->tunnel = tunnel;
7681 }
7682
7683 static struct mlx5_flow_tunnel *
7684 mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id)
7685 {
7686         struct tunnel_db_find_tunnel_id_ctx ctx = {
7687                 .tunnel_id = id,
7688         };
7689
7690         mlx5_access_tunnel_offload_db(dev, find_tunnel_id_match,
7691                                       find_tunnel_id_hit, NULL, &ctx, true);
7692
7693         return ctx.tunnel;
7694 }
7695
7696 static struct mlx5_flow_tunnel *
7697 mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev,
7698                           const struct rte_flow_tunnel *app_tunnel)
7699 {
7700         struct mlx5_priv *priv = dev->data->dev_private;
7701         struct mlx5_indexed_pool *ipool;
7702         struct mlx5_flow_tunnel *tunnel;
7703         uint32_t id;
7704
7705         ipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID];
7706         tunnel = mlx5_ipool_zmalloc(ipool, &id);
7707         if (!tunnel)
7708                 return NULL;
7709         if (id >= MLX5_MAX_TUNNELS) {
7710                 mlx5_ipool_free(ipool, id);
7711                 DRV_LOG(ERR, "Tunnel ID %d exceed max limit.", id);
7712                 return NULL;
7713         }
7714         tunnel->groups = mlx5_hlist_create("tunnel groups", 1024, 0, 0,
7715                                            mlx5_flow_tunnel_grp2tbl_create_cb,
7716                                            mlx5_flow_tunnel_grp2tbl_match_cb,
7717                                            mlx5_flow_tunnel_grp2tbl_remove_cb);
7718         if (!tunnel->groups) {
7719                 mlx5_ipool_free(ipool, id);
7720                 return NULL;
7721         }
7722         tunnel->groups->ctx = priv->sh;
7723         /* initiate new PMD tunnel */
7724         memcpy(&tunnel->app_tunnel, app_tunnel, sizeof(*app_tunnel));
7725         tunnel->tunnel_id = id;
7726         tunnel->action.type = (typeof(tunnel->action.type))
7727                               MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET;
7728         tunnel->action.conf = tunnel;
7729         tunnel->item.type = (typeof(tunnel->item.type))
7730                             MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL;
7731         tunnel->item.spec = tunnel;
7732         tunnel->item.last = NULL;
7733         tunnel->item.mask = NULL;
7734
7735         DRV_LOG(DEBUG, "port %u new pmd tunnel id=0x%x",
7736                 dev->data->port_id, tunnel->tunnel_id);
7737
7738         return tunnel;
7739 }
7740
7741 struct tunnel_db_get_tunnel_ctx {
7742         const struct rte_flow_tunnel *app_tunnel;
7743         struct mlx5_flow_tunnel *tunnel;
7744 };
7745
7746 static bool get_tunnel_match(struct rte_eth_dev *dev,
7747                              struct mlx5_flow_tunnel *tunnel, const void *x)
7748 {
7749         const struct tunnel_db_get_tunnel_ctx *ctx = x;
7750
7751         RTE_SET_USED(dev);
7752         return !memcmp(ctx->app_tunnel, &tunnel->app_tunnel,
7753                        sizeof(*ctx->app_tunnel));
7754 }
7755
7756 static void get_tunnel_hit(struct rte_eth_dev *dev,
7757                            struct mlx5_flow_tunnel *tunnel, void *x)
7758 {
7759         /* called under tunnel spinlock protection */
7760         struct tunnel_db_get_tunnel_ctx *ctx = x;
7761
7762         RTE_SET_USED(dev);
7763         tunnel->refctn++;
7764         ctx->tunnel = tunnel;
7765 }
7766
7767 static void get_tunnel_miss(struct rte_eth_dev *dev, void *x)
7768 {
7769         /* called under tunnel spinlock protection */
7770         struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
7771         struct tunnel_db_get_tunnel_ctx *ctx = x;
7772
7773         rte_spinlock_unlock(&thub->sl);
7774         ctx->tunnel = mlx5_flow_tunnel_allocate(dev, ctx->app_tunnel);
7775         ctx->tunnel->refctn = 1;
7776         rte_spinlock_lock(&thub->sl);
7777         if (ctx->tunnel)
7778                 LIST_INSERT_HEAD(&thub->tunnels, ctx->tunnel, chain);
7779 }
7780
7781
7782 static int
7783 mlx5_get_flow_tunnel(struct rte_eth_dev *dev,
7784                      const struct rte_flow_tunnel *app_tunnel,
7785                      struct mlx5_flow_tunnel **tunnel)
7786 {
7787         struct tunnel_db_get_tunnel_ctx ctx = {
7788                 .app_tunnel = app_tunnel,
7789         };
7790
7791         mlx5_access_tunnel_offload_db(dev, get_tunnel_match, get_tunnel_hit,
7792                                       get_tunnel_miss, &ctx, true);
7793         *tunnel = ctx.tunnel;
7794         return ctx.tunnel ? 0 : -ENOMEM;
7795 }
7796
7797 void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id)
7798 {
7799         struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
7800
7801         if (!thub)
7802                 return;
7803         if (!LIST_EMPTY(&thub->tunnels))
7804                 DRV_LOG(WARNING, "port %u tunnels present\n", port_id);
7805         mlx5_hlist_destroy(thub->groups);
7806         mlx5_free(thub);
7807 }
7808
7809 int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh)
7810 {
7811         int err;
7812         struct mlx5_flow_tunnel_hub *thub;
7813
7814         thub = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, sizeof(*thub),
7815                            0, SOCKET_ID_ANY);
7816         if (!thub)
7817                 return -ENOMEM;
7818         LIST_INIT(&thub->tunnels);
7819         rte_spinlock_init(&thub->sl);
7820         thub->groups = mlx5_hlist_create("flow groups", MLX5_MAX_TABLES, 0,
7821                                          0, mlx5_flow_tunnel_grp2tbl_create_cb,
7822                                          mlx5_flow_tunnel_grp2tbl_match_cb,
7823                                          mlx5_flow_tunnel_grp2tbl_remove_cb);
7824         if (!thub->groups) {
7825                 err = -rte_errno;
7826                 goto err;
7827         }
7828         thub->groups->ctx = sh;
7829         sh->tunnel_hub = thub;
7830
7831         return 0;
7832
7833 err:
7834         if (thub->groups)
7835                 mlx5_hlist_destroy(thub->groups);
7836         if (thub)
7837                 mlx5_free(thub);
7838         return err;
7839 }
7840
7841 static inline bool
7842 mlx5_flow_tunnel_validate(struct rte_eth_dev *dev,
7843                           struct rte_flow_tunnel *tunnel,
7844                           const char *err_msg)
7845 {
7846         err_msg = NULL;
7847         if (!is_tunnel_offload_active(dev)) {
7848                 err_msg = "tunnel offload was not activated";
7849                 goto out;
7850         } else if (!tunnel) {
7851                 err_msg = "no application tunnel";
7852                 goto out;
7853         }
7854
7855         switch (tunnel->type) {
7856         default:
7857                 err_msg = "unsupported tunnel type";
7858                 goto out;
7859         case RTE_FLOW_ITEM_TYPE_VXLAN:
7860                 break;
7861         }
7862
7863 out:
7864         return !err_msg;
7865 }
7866
7867 static int
7868 mlx5_flow_tunnel_decap_set(struct rte_eth_dev *dev,
7869                     struct rte_flow_tunnel *app_tunnel,
7870                     struct rte_flow_action **actions,
7871                     uint32_t *num_of_actions,
7872                     struct rte_flow_error *error)
7873 {
7874         int ret;
7875         struct mlx5_flow_tunnel *tunnel;
7876         const char *err_msg = NULL;
7877         bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);
7878
7879         if (!verdict)
7880                 return rte_flow_error_set(error, EINVAL,
7881                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
7882                                           err_msg);
7883         ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
7884         if (ret < 0) {
7885                 return rte_flow_error_set(error, ret,
7886                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
7887                                           "failed to initialize pmd tunnel");
7888         }
7889         *actions = &tunnel->action;
7890         *num_of_actions = 1;
7891         return 0;
7892 }
7893
7894 static int
7895 mlx5_flow_tunnel_match(struct rte_eth_dev *dev,
7896                        struct rte_flow_tunnel *app_tunnel,
7897                        struct rte_flow_item **items,
7898                        uint32_t *num_of_items,
7899                        struct rte_flow_error *error)
7900 {
7901         int ret;
7902         struct mlx5_flow_tunnel *tunnel;
7903         const char *err_msg = NULL;
7904         bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);
7905
7906         if (!verdict)
7907                 return rte_flow_error_set(error, EINVAL,
7908                                           RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
7909                                           err_msg);
7910         ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
7911         if (ret < 0) {
7912                 return rte_flow_error_set(error, ret,
7913                                           RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
7914                                           "failed to initialize pmd tunnel");
7915         }
7916         *items = &tunnel->item;
7917         *num_of_items = 1;
7918         return 0;
7919 }
7920
7921 struct tunnel_db_element_release_ctx {
7922         struct rte_flow_item *items;
7923         struct rte_flow_action *actions;
7924         uint32_t num_elements;
7925         struct rte_flow_error *error;
7926         int ret;
7927 };
7928
7929 static bool
7930 tunnel_element_release_match(struct rte_eth_dev *dev,
7931                              struct mlx5_flow_tunnel *tunnel, const void *x)
7932 {
7933         const struct tunnel_db_element_release_ctx *ctx = x;
7934
7935         RTE_SET_USED(dev);
7936         if (ctx->num_elements != 1)
7937                 return false;
7938         else if (ctx->items)
7939                 return ctx->items == &tunnel->item;
7940         else if (ctx->actions)
7941                 return ctx->actions == &tunnel->action;
7942
7943         return false;
7944 }
7945
7946 static void
7947 tunnel_element_release_hit(struct rte_eth_dev *dev,
7948                            struct mlx5_flow_tunnel *tunnel, void *x)
7949 {
7950         struct tunnel_db_element_release_ctx *ctx = x;
7951         ctx->ret = 0;
7952         if (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED))
7953                 mlx5_flow_tunnel_free(dev, tunnel);
7954 }
7955
7956 static void
7957 tunnel_element_release_miss(struct rte_eth_dev *dev, void *x)
7958 {
7959         struct tunnel_db_element_release_ctx *ctx = x;
7960         RTE_SET_USED(dev);
7961         ctx->ret = rte_flow_error_set(ctx->error, EINVAL,
7962                                       RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
7963                                       "invalid argument");
7964 }
7965
7966 static int
7967 mlx5_flow_tunnel_item_release(struct rte_eth_dev *dev,
7968                        struct rte_flow_item *pmd_items,
7969                        uint32_t num_items, struct rte_flow_error *err)
7970 {
7971         struct tunnel_db_element_release_ctx ctx = {
7972                 .items = pmd_items,
7973                 .actions = NULL,
7974                 .num_elements = num_items,
7975                 .error = err,
7976         };
7977
7978         mlx5_access_tunnel_offload_db(dev, tunnel_element_release_match,
7979                                       tunnel_element_release_hit,
7980                                       tunnel_element_release_miss, &ctx, false);
7981
7982         return ctx.ret;
7983 }
7984
7985 static int
7986 mlx5_flow_tunnel_action_release(struct rte_eth_dev *dev,
7987                          struct rte_flow_action *pmd_actions,
7988                          uint32_t num_actions, struct rte_flow_error *err)
7989 {
7990         struct tunnel_db_element_release_ctx ctx = {
7991                 .items = NULL,
7992                 .actions = pmd_actions,
7993                 .num_elements = num_actions,
7994                 .error = err,
7995         };
7996
7997         mlx5_access_tunnel_offload_db(dev, tunnel_element_release_match,
7998                                       tunnel_element_release_hit,
7999                                       tunnel_element_release_miss, &ctx, false);
8000
8001         return ctx.ret;
8002 }
8003
8004 static int
8005 mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev,
8006                                   struct rte_mbuf *m,
8007                                   struct rte_flow_restore_info *info,
8008                                   struct rte_flow_error *err)
8009 {
8010         uint64_t ol_flags = m->ol_flags;
8011         const struct mlx5_flow_tbl_data_entry *tble;
8012         const uint64_t mask = PKT_RX_FDIR | PKT_RX_FDIR_ID;
8013
8014         if (!is_tunnel_offload_active(dev)) {
8015                 info->flags = 0;
8016                 return 0;
8017         }
8018
8019         if ((ol_flags & mask) != mask)
8020                 goto err;
8021         tble = tunnel_mark_decode(dev, m->hash.fdir.hi);
8022         if (!tble) {
8023                 DRV_LOG(DEBUG, "port %u invalid miss tunnel mark %#x",
8024                         dev->data->port_id, m->hash.fdir.hi);
8025                 goto err;
8026         }
8027         MLX5_ASSERT(tble->tunnel);
8028         memcpy(&info->tunnel, &tble->tunnel->app_tunnel, sizeof(info->tunnel));
8029         info->group_id = tble->group_id;
8030         info->flags = RTE_FLOW_RESTORE_INFO_TUNNEL |
8031                       RTE_FLOW_RESTORE_INFO_GROUP_ID |
8032                       RTE_FLOW_RESTORE_INFO_ENCAPSULATED;
8033
8034         return 0;
8035
8036 err:
8037         return rte_flow_error_set(err, EINVAL,
8038                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8039                                   "failed to get restore info");
8040 }
8041
8042 #else /* HAVE_IBV_FLOW_DV_SUPPORT */
8043 static int
8044 mlx5_flow_tunnel_decap_set(__rte_unused struct rte_eth_dev *dev,
8045                            __rte_unused struct rte_flow_tunnel *app_tunnel,
8046                            __rte_unused struct rte_flow_action **actions,
8047                            __rte_unused uint32_t *num_of_actions,
8048                            __rte_unused struct rte_flow_error *error)
8049 {
8050         return -ENOTSUP;
8051 }
8052
8053 static int
8054 mlx5_flow_tunnel_match(__rte_unused struct rte_eth_dev *dev,
8055                        __rte_unused struct rte_flow_tunnel *app_tunnel,
8056                        __rte_unused struct rte_flow_item **items,
8057                        __rte_unused uint32_t *num_of_items,
8058                        __rte_unused struct rte_flow_error *error)
8059 {
8060         return -ENOTSUP;
8061 }
8062
8063 static int
8064 mlx5_flow_tunnel_item_release(__rte_unused struct rte_eth_dev *dev,
8065                               __rte_unused struct rte_flow_item *pmd_items,
8066                               __rte_unused uint32_t num_items,
8067                               __rte_unused struct rte_flow_error *err)
8068 {
8069         return -ENOTSUP;
8070 }
8071
8072 static int
8073 mlx5_flow_tunnel_action_release(__rte_unused struct rte_eth_dev *dev,
8074                                 __rte_unused struct rte_flow_action *pmd_action,
8075                                 __rte_unused uint32_t num_actions,
8076                                 __rte_unused struct rte_flow_error *err)
8077 {
8078         return -ENOTSUP;
8079 }
8080
8081 static int
8082 mlx5_flow_tunnel_get_restore_info(__rte_unused struct rte_eth_dev *dev,
8083                                   __rte_unused struct rte_mbuf *m,
8084                                   __rte_unused struct rte_flow_restore_info *i,
8085                                   __rte_unused struct rte_flow_error *err)
8086 {
8087         return -ENOTSUP;
8088 }
8089
8090 static int
8091 flow_tunnel_add_default_miss(__rte_unused struct rte_eth_dev *dev,
8092                              __rte_unused struct rte_flow *flow,
8093                              __rte_unused const struct rte_flow_attr *attr,
8094                              __rte_unused const struct rte_flow_action *actions,
8095                              __rte_unused uint32_t flow_idx,
8096                              __rte_unused struct tunnel_default_miss_ctx *ctx,
8097                              __rte_unused struct rte_flow_error *error)
8098 {
8099         return -ENOTSUP;
8100 }
8101
8102 static struct mlx5_flow_tunnel *
8103 mlx5_find_tunnel_id(__rte_unused struct rte_eth_dev *dev,
8104                     __rte_unused uint32_t id)
8105 {
8106         return NULL;
8107 }
8108
8109 static void
8110 mlx5_flow_tunnel_free(__rte_unused struct rte_eth_dev *dev,
8111                       __rte_unused struct mlx5_flow_tunnel *tunnel)
8112 {
8113 }
8114
8115 static uint32_t
8116 tunnel_flow_group_to_flow_table(__rte_unused struct rte_eth_dev *dev,
8117                                 __rte_unused const struct mlx5_flow_tunnel *t,
8118                                 __rte_unused uint32_t group,
8119                                 __rte_unused uint32_t *table,
8120                                 struct rte_flow_error *error)
8121 {
8122         return rte_flow_error_set(error, ENOTSUP,
8123                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
8124                                   "tunnel offload requires DV support");
8125 }
8126
8127 void
8128 mlx5_release_tunnel_hub(__rte_unused struct mlx5_dev_ctx_shared *sh,
8129                         __rte_unused  uint16_t port_id)
8130 {
8131 }
8132 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
8133