net/bnxt: modify ring index logic
[dpdk.git] / drivers / net / mlx5 / mlx5_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  * Copyright 2016 Mellanox Technologies, Ltd
4  */
5
6 #include <netinet/in.h>
7 #include <sys/queue.h>
8 #include <stdalign.h>
9 #include <stdint.h>
10 #include <string.h>
11 #include <stdbool.h>
12
13 #include <rte_common.h>
14 #include <rte_ether.h>
15 #include <rte_ethdev_driver.h>
16 #include <rte_eal_paging.h>
17 #include <rte_flow.h>
18 #include <rte_cycles.h>
19 #include <rte_flow_driver.h>
20 #include <rte_malloc.h>
21 #include <rte_ip.h>
22
23 #include <mlx5_glue.h>
24 #include <mlx5_devx_cmds.h>
25 #include <mlx5_prm.h>
26 #include <mlx5_malloc.h>
27
28 #include "mlx5_defs.h"
29 #include "mlx5.h"
30 #include "mlx5_flow.h"
31 #include "mlx5_flow_os.h"
32 #include "mlx5_rxtx.h"
33 #include "mlx5_common_os.h"
34 #include "rte_pmd_mlx5.h"
35
36 struct tunnel_default_miss_ctx {
37         uint16_t *queue;
38         __extension__
39         union {
40                 struct rte_flow_action_rss action_rss;
41                 struct rte_flow_action_queue miss_queue;
42                 struct rte_flow_action_jump miss_jump;
43                 uint8_t raw[0];
44         };
45 };
46
47 static int
48 flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
49                              struct rte_flow *flow,
50                              const struct rte_flow_attr *attr,
51                              const struct rte_flow_action *app_actions,
52                              uint32_t flow_idx,
53                              struct tunnel_default_miss_ctx *ctx,
54                              struct rte_flow_error *error);
55 static struct mlx5_flow_tunnel *
56 mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id);
57 static void
58 mlx5_flow_tunnel_free(struct rte_eth_dev *dev, struct mlx5_flow_tunnel *tunnel);
59 static uint32_t
60 tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev,
61                                 const struct mlx5_flow_tunnel *tunnel,
62                                 uint32_t group, uint32_t *table,
63                                 struct rte_flow_error *error);
64
65 static struct mlx5_flow_workspace *mlx5_flow_push_thread_workspace(void);
66 static void mlx5_flow_pop_thread_workspace(void);
67
68
69 /** Device flow drivers. */
70 extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops;
71
72 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops;
73
74 const struct mlx5_flow_driver_ops *flow_drv_ops[] = {
75         [MLX5_FLOW_TYPE_MIN] = &mlx5_flow_null_drv_ops,
76 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
77         [MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops,
78 #endif
79         [MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops,
80         [MLX5_FLOW_TYPE_MAX] = &mlx5_flow_null_drv_ops
81 };
82
83 /** Helper macro to build input graph for mlx5_flow_expand_rss(). */
84 #define MLX5_FLOW_EXPAND_RSS_NEXT(...) \
85         (const int []){ \
86                 __VA_ARGS__, 0, \
87         }
88
89 /** Node object of input graph for mlx5_flow_expand_rss(). */
90 struct mlx5_flow_expand_node {
91         const int *const next;
92         /**<
93          * List of next node indexes. Index 0 is interpreted as a terminator.
94          */
95         const enum rte_flow_item_type type;
96         /**< Pattern item type of current node. */
97         uint64_t rss_types;
98         /**<
99          * RSS types bit-field associated with this node
100          * (see ETH_RSS_* definitions).
101          */
102 };
103
104 /** Object returned by mlx5_flow_expand_rss(). */
105 struct mlx5_flow_expand_rss {
106         uint32_t entries;
107         /**< Number of entries @p patterns and @p priorities. */
108         struct {
109                 struct rte_flow_item *pattern; /**< Expanded pattern array. */
110                 uint32_t priority; /**< Priority offset for each expansion. */
111         } entry[];
112 };
113
114 static enum rte_flow_item_type
115 mlx5_flow_expand_rss_item_complete(const struct rte_flow_item *item)
116 {
117         enum rte_flow_item_type ret = RTE_FLOW_ITEM_TYPE_VOID;
118         uint16_t ether_type = 0;
119         uint16_t ether_type_m;
120         uint8_t ip_next_proto = 0;
121         uint8_t ip_next_proto_m;
122
123         if (item == NULL || item->spec == NULL)
124                 return ret;
125         switch (item->type) {
126         case RTE_FLOW_ITEM_TYPE_ETH:
127                 if (item->mask)
128                         ether_type_m = ((const struct rte_flow_item_eth *)
129                                                 (item->mask))->type;
130                 else
131                         ether_type_m = rte_flow_item_eth_mask.type;
132                 if (ether_type_m != RTE_BE16(0xFFFF))
133                         break;
134                 ether_type = ((const struct rte_flow_item_eth *)
135                                 (item->spec))->type;
136                 if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4)
137                         ret = RTE_FLOW_ITEM_TYPE_IPV4;
138                 else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6)
139                         ret = RTE_FLOW_ITEM_TYPE_IPV6;
140                 else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN)
141                         ret = RTE_FLOW_ITEM_TYPE_VLAN;
142                 else
143                         ret = RTE_FLOW_ITEM_TYPE_END;
144                 break;
145         case RTE_FLOW_ITEM_TYPE_VLAN:
146                 if (item->mask)
147                         ether_type_m = ((const struct rte_flow_item_vlan *)
148                                                 (item->mask))->inner_type;
149                 else
150                         ether_type_m = rte_flow_item_vlan_mask.inner_type;
151                 if (ether_type_m != RTE_BE16(0xFFFF))
152                         break;
153                 ether_type = ((const struct rte_flow_item_vlan *)
154                                 (item->spec))->inner_type;
155                 if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4)
156                         ret = RTE_FLOW_ITEM_TYPE_IPV4;
157                 else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6)
158                         ret = RTE_FLOW_ITEM_TYPE_IPV6;
159                 else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN)
160                         ret = RTE_FLOW_ITEM_TYPE_VLAN;
161                 else
162                         ret = RTE_FLOW_ITEM_TYPE_END;
163                 break;
164         case RTE_FLOW_ITEM_TYPE_IPV4:
165                 if (item->mask)
166                         ip_next_proto_m = ((const struct rte_flow_item_ipv4 *)
167                                         (item->mask))->hdr.next_proto_id;
168                 else
169                         ip_next_proto_m =
170                                 rte_flow_item_ipv4_mask.hdr.next_proto_id;
171                 if (ip_next_proto_m != 0xFF)
172                         break;
173                 ip_next_proto = ((const struct rte_flow_item_ipv4 *)
174                                 (item->spec))->hdr.next_proto_id;
175                 if (ip_next_proto == IPPROTO_UDP)
176                         ret = RTE_FLOW_ITEM_TYPE_UDP;
177                 else if (ip_next_proto == IPPROTO_TCP)
178                         ret = RTE_FLOW_ITEM_TYPE_TCP;
179                 else if (ip_next_proto == IPPROTO_IP)
180                         ret = RTE_FLOW_ITEM_TYPE_IPV4;
181                 else if (ip_next_proto == IPPROTO_IPV6)
182                         ret = RTE_FLOW_ITEM_TYPE_IPV6;
183                 else
184                         ret = RTE_FLOW_ITEM_TYPE_END;
185                 break;
186         case RTE_FLOW_ITEM_TYPE_IPV6:
187                 if (item->mask)
188                         ip_next_proto_m = ((const struct rte_flow_item_ipv6 *)
189                                                 (item->mask))->hdr.proto;
190                 else
191                         ip_next_proto_m =
192                                 rte_flow_item_ipv6_mask.hdr.proto;
193                 if (ip_next_proto_m != 0xFF)
194                         break;
195                 ip_next_proto = ((const struct rte_flow_item_ipv6 *)
196                                 (item->spec))->hdr.proto;
197                 if (ip_next_proto == IPPROTO_UDP)
198                         ret = RTE_FLOW_ITEM_TYPE_UDP;
199                 else if (ip_next_proto == IPPROTO_TCP)
200                         ret = RTE_FLOW_ITEM_TYPE_TCP;
201                 else if (ip_next_proto == IPPROTO_IP)
202                         ret = RTE_FLOW_ITEM_TYPE_IPV4;
203                 else if (ip_next_proto == IPPROTO_IPV6)
204                         ret = RTE_FLOW_ITEM_TYPE_IPV6;
205                 else
206                         ret = RTE_FLOW_ITEM_TYPE_END;
207                 break;
208         default:
209                 ret = RTE_FLOW_ITEM_TYPE_VOID;
210                 break;
211         }
212         return ret;
213 }
214
215 /**
216  * Expand RSS flows into several possible flows according to the RSS hash
217  * fields requested and the driver capabilities.
218  *
219  * @param[out] buf
220  *   Buffer to store the result expansion.
221  * @param[in] size
222  *   Buffer size in bytes. If 0, @p buf can be NULL.
223  * @param[in] pattern
224  *   User flow pattern.
225  * @param[in] types
226  *   RSS types to expand (see ETH_RSS_* definitions).
227  * @param[in] graph
228  *   Input graph to expand @p pattern according to @p types.
229  * @param[in] graph_root_index
230  *   Index of root node in @p graph, typically 0.
231  *
232  * @return
233  *   A positive value representing the size of @p buf in bytes regardless of
234  *   @p size on success, a negative errno value otherwise and rte_errno is
235  *   set, the following errors are defined:
236  *
237  *   -E2BIG: graph-depth @p graph is too deep.
238  */
239 static int
240 mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size,
241                      const struct rte_flow_item *pattern, uint64_t types,
242                      const struct mlx5_flow_expand_node graph[],
243                      int graph_root_index)
244 {
245         const int elt_n = 8;
246         const struct rte_flow_item *item;
247         const struct mlx5_flow_expand_node *node = &graph[graph_root_index];
248         const int *next_node;
249         const int *stack[elt_n];
250         int stack_pos = 0;
251         struct rte_flow_item flow_items[elt_n];
252         unsigned int i;
253         size_t lsize;
254         size_t user_pattern_size = 0;
255         void *addr = NULL;
256         const struct mlx5_flow_expand_node *next = NULL;
257         struct rte_flow_item missed_item;
258         int missed = 0;
259         int elt = 0;
260         const struct rte_flow_item *last_item = NULL;
261
262         memset(&missed_item, 0, sizeof(missed_item));
263         lsize = offsetof(struct mlx5_flow_expand_rss, entry) +
264                 elt_n * sizeof(buf->entry[0]);
265         if (lsize <= size) {
266                 buf->entry[0].priority = 0;
267                 buf->entry[0].pattern = (void *)&buf->entry[elt_n];
268                 buf->entries = 0;
269                 addr = buf->entry[0].pattern;
270         }
271         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
272                 if (item->type != RTE_FLOW_ITEM_TYPE_VOID)
273                         last_item = item;
274                 for (i = 0; node->next && node->next[i]; ++i) {
275                         next = &graph[node->next[i]];
276                         if (next->type == item->type)
277                                 break;
278                 }
279                 if (next)
280                         node = next;
281                 user_pattern_size += sizeof(*item);
282         }
283         user_pattern_size += sizeof(*item); /* Handle END item. */
284         lsize += user_pattern_size;
285         /* Copy the user pattern in the first entry of the buffer. */
286         if (lsize <= size) {
287                 rte_memcpy(addr, pattern, user_pattern_size);
288                 addr = (void *)(((uintptr_t)addr) + user_pattern_size);
289                 buf->entries = 1;
290         }
291         /* Start expanding. */
292         memset(flow_items, 0, sizeof(flow_items));
293         user_pattern_size -= sizeof(*item);
294         /*
295          * Check if the last valid item has spec set, need complete pattern,
296          * and the pattern can be used for expansion.
297          */
298         missed_item.type = mlx5_flow_expand_rss_item_complete(last_item);
299         if (missed_item.type == RTE_FLOW_ITEM_TYPE_END) {
300                 /* Item type END indicates expansion is not required. */
301                 return lsize;
302         }
303         if (missed_item.type != RTE_FLOW_ITEM_TYPE_VOID) {
304                 next = NULL;
305                 missed = 1;
306                 for (i = 0; node->next && node->next[i]; ++i) {
307                         next = &graph[node->next[i]];
308                         if (next->type == missed_item.type) {
309                                 flow_items[0].type = missed_item.type;
310                                 flow_items[1].type = RTE_FLOW_ITEM_TYPE_END;
311                                 break;
312                         }
313                         next = NULL;
314                 }
315         }
316         if (next && missed) {
317                 elt = 2; /* missed item + item end. */
318                 node = next;
319                 lsize += elt * sizeof(*item) + user_pattern_size;
320                 if ((node->rss_types & types) && lsize <= size) {
321                         buf->entry[buf->entries].priority = 1;
322                         buf->entry[buf->entries].pattern = addr;
323                         buf->entries++;
324                         rte_memcpy(addr, buf->entry[0].pattern,
325                                    user_pattern_size);
326                         addr = (void *)(((uintptr_t)addr) + user_pattern_size);
327                         rte_memcpy(addr, flow_items, elt * sizeof(*item));
328                         addr = (void *)(((uintptr_t)addr) +
329                                         elt * sizeof(*item));
330                 }
331         }
332         memset(flow_items, 0, sizeof(flow_items));
333         next_node = node->next;
334         stack[stack_pos] = next_node;
335         node = next_node ? &graph[*next_node] : NULL;
336         while (node) {
337                 flow_items[stack_pos].type = node->type;
338                 if (node->rss_types & types) {
339                         /*
340                          * compute the number of items to copy from the
341                          * expansion and copy it.
342                          * When the stack_pos is 0, there are 1 element in it,
343                          * plus the addition END item.
344                          */
345                         elt = stack_pos + 2;
346                         flow_items[stack_pos + 1].type = RTE_FLOW_ITEM_TYPE_END;
347                         lsize += elt * sizeof(*item) + user_pattern_size;
348                         if (lsize <= size) {
349                                 size_t n = elt * sizeof(*item);
350
351                                 buf->entry[buf->entries].priority =
352                                         stack_pos + 1 + missed;
353                                 buf->entry[buf->entries].pattern = addr;
354                                 buf->entries++;
355                                 rte_memcpy(addr, buf->entry[0].pattern,
356                                            user_pattern_size);
357                                 addr = (void *)(((uintptr_t)addr) +
358                                                 user_pattern_size);
359                                 rte_memcpy(addr, &missed_item,
360                                            missed * sizeof(*item));
361                                 addr = (void *)(((uintptr_t)addr) +
362                                         missed * sizeof(*item));
363                                 rte_memcpy(addr, flow_items, n);
364                                 addr = (void *)(((uintptr_t)addr) + n);
365                         }
366                 }
367                 /* Go deeper. */
368                 if (node->next) {
369                         next_node = node->next;
370                         if (stack_pos++ == elt_n) {
371                                 rte_errno = E2BIG;
372                                 return -rte_errno;
373                         }
374                         stack[stack_pos] = next_node;
375                 } else if (*(next_node + 1)) {
376                         /* Follow up with the next possibility. */
377                         ++next_node;
378                 } else {
379                         /* Move to the next path. */
380                         if (stack_pos)
381                                 next_node = stack[--stack_pos];
382                         next_node++;
383                         stack[stack_pos] = next_node;
384                 }
385                 node = *next_node ? &graph[*next_node] : NULL;
386         };
387         /* no expanded flows but we have missed item, create one rule for it */
388         if (buf->entries == 1 && missed != 0) {
389                 elt = 2;
390                 lsize += elt * sizeof(*item) + user_pattern_size;
391                 if (lsize <= size) {
392                         buf->entry[buf->entries].priority = 1;
393                         buf->entry[buf->entries].pattern = addr;
394                         buf->entries++;
395                         flow_items[0].type = missed_item.type;
396                         flow_items[1].type = RTE_FLOW_ITEM_TYPE_END;
397                         rte_memcpy(addr, buf->entry[0].pattern,
398                                    user_pattern_size);
399                         addr = (void *)(((uintptr_t)addr) + user_pattern_size);
400                         rte_memcpy(addr, flow_items, elt * sizeof(*item));
401                 }
402         }
403         return lsize;
404 }
405
406 enum mlx5_expansion {
407         MLX5_EXPANSION_ROOT,
408         MLX5_EXPANSION_ROOT_OUTER,
409         MLX5_EXPANSION_ROOT_ETH_VLAN,
410         MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN,
411         MLX5_EXPANSION_OUTER_ETH,
412         MLX5_EXPANSION_OUTER_ETH_VLAN,
413         MLX5_EXPANSION_OUTER_VLAN,
414         MLX5_EXPANSION_OUTER_IPV4,
415         MLX5_EXPANSION_OUTER_IPV4_UDP,
416         MLX5_EXPANSION_OUTER_IPV4_TCP,
417         MLX5_EXPANSION_OUTER_IPV6,
418         MLX5_EXPANSION_OUTER_IPV6_UDP,
419         MLX5_EXPANSION_OUTER_IPV6_TCP,
420         MLX5_EXPANSION_VXLAN,
421         MLX5_EXPANSION_VXLAN_GPE,
422         MLX5_EXPANSION_GRE,
423         MLX5_EXPANSION_MPLS,
424         MLX5_EXPANSION_ETH,
425         MLX5_EXPANSION_ETH_VLAN,
426         MLX5_EXPANSION_VLAN,
427         MLX5_EXPANSION_IPV4,
428         MLX5_EXPANSION_IPV4_UDP,
429         MLX5_EXPANSION_IPV4_TCP,
430         MLX5_EXPANSION_IPV6,
431         MLX5_EXPANSION_IPV6_UDP,
432         MLX5_EXPANSION_IPV6_TCP,
433 };
434
435 /** Supported expansion of items. */
436 static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
437         [MLX5_EXPANSION_ROOT] = {
438                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
439                                                   MLX5_EXPANSION_IPV4,
440                                                   MLX5_EXPANSION_IPV6),
441                 .type = RTE_FLOW_ITEM_TYPE_END,
442         },
443         [MLX5_EXPANSION_ROOT_OUTER] = {
444                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH,
445                                                   MLX5_EXPANSION_OUTER_IPV4,
446                                                   MLX5_EXPANSION_OUTER_IPV6),
447                 .type = RTE_FLOW_ITEM_TYPE_END,
448         },
449         [MLX5_EXPANSION_ROOT_ETH_VLAN] = {
450                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH_VLAN),
451                 .type = RTE_FLOW_ITEM_TYPE_END,
452         },
453         [MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN] = {
454                 .next = MLX5_FLOW_EXPAND_RSS_NEXT
455                                                 (MLX5_EXPANSION_OUTER_ETH_VLAN),
456                 .type = RTE_FLOW_ITEM_TYPE_END,
457         },
458         [MLX5_EXPANSION_OUTER_ETH] = {
459                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
460                                                   MLX5_EXPANSION_OUTER_IPV6,
461                                                   MLX5_EXPANSION_MPLS),
462                 .type = RTE_FLOW_ITEM_TYPE_ETH,
463                 .rss_types = 0,
464         },
465         [MLX5_EXPANSION_OUTER_ETH_VLAN] = {
466                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_VLAN),
467                 .type = RTE_FLOW_ITEM_TYPE_ETH,
468                 .rss_types = 0,
469         },
470         [MLX5_EXPANSION_OUTER_VLAN] = {
471                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
472                                                   MLX5_EXPANSION_OUTER_IPV6),
473                 .type = RTE_FLOW_ITEM_TYPE_VLAN,
474         },
475         [MLX5_EXPANSION_OUTER_IPV4] = {
476                 .next = MLX5_FLOW_EXPAND_RSS_NEXT
477                         (MLX5_EXPANSION_OUTER_IPV4_UDP,
478                          MLX5_EXPANSION_OUTER_IPV4_TCP,
479                          MLX5_EXPANSION_GRE,
480                          MLX5_EXPANSION_IPV4,
481                          MLX5_EXPANSION_IPV6),
482                 .type = RTE_FLOW_ITEM_TYPE_IPV4,
483                 .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
484                         ETH_RSS_NONFRAG_IPV4_OTHER,
485         },
486         [MLX5_EXPANSION_OUTER_IPV4_UDP] = {
487                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
488                                                   MLX5_EXPANSION_VXLAN_GPE),
489                 .type = RTE_FLOW_ITEM_TYPE_UDP,
490                 .rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
491         },
492         [MLX5_EXPANSION_OUTER_IPV4_TCP] = {
493                 .type = RTE_FLOW_ITEM_TYPE_TCP,
494                 .rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
495         },
496         [MLX5_EXPANSION_OUTER_IPV6] = {
497                 .next = MLX5_FLOW_EXPAND_RSS_NEXT
498                         (MLX5_EXPANSION_OUTER_IPV6_UDP,
499                          MLX5_EXPANSION_OUTER_IPV6_TCP,
500                          MLX5_EXPANSION_IPV4,
501                          MLX5_EXPANSION_IPV6),
502                 .type = RTE_FLOW_ITEM_TYPE_IPV6,
503                 .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
504                         ETH_RSS_NONFRAG_IPV6_OTHER,
505         },
506         [MLX5_EXPANSION_OUTER_IPV6_UDP] = {
507                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
508                                                   MLX5_EXPANSION_VXLAN_GPE),
509                 .type = RTE_FLOW_ITEM_TYPE_UDP,
510                 .rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
511         },
512         [MLX5_EXPANSION_OUTER_IPV6_TCP] = {
513                 .type = RTE_FLOW_ITEM_TYPE_TCP,
514                 .rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
515         },
516         [MLX5_EXPANSION_VXLAN] = {
517                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
518                                                   MLX5_EXPANSION_IPV4,
519                                                   MLX5_EXPANSION_IPV6),
520                 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
521         },
522         [MLX5_EXPANSION_VXLAN_GPE] = {
523                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
524                                                   MLX5_EXPANSION_IPV4,
525                                                   MLX5_EXPANSION_IPV6),
526                 .type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
527         },
528         [MLX5_EXPANSION_GRE] = {
529                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4),
530                 .type = RTE_FLOW_ITEM_TYPE_GRE,
531         },
532         [MLX5_EXPANSION_MPLS] = {
533                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
534                                                   MLX5_EXPANSION_IPV6),
535                 .type = RTE_FLOW_ITEM_TYPE_MPLS,
536         },
537         [MLX5_EXPANSION_ETH] = {
538                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
539                                                   MLX5_EXPANSION_IPV6),
540                 .type = RTE_FLOW_ITEM_TYPE_ETH,
541         },
542         [MLX5_EXPANSION_ETH_VLAN] = {
543                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VLAN),
544                 .type = RTE_FLOW_ITEM_TYPE_ETH,
545         },
546         [MLX5_EXPANSION_VLAN] = {
547                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
548                                                   MLX5_EXPANSION_IPV6),
549                 .type = RTE_FLOW_ITEM_TYPE_VLAN,
550         },
551         [MLX5_EXPANSION_IPV4] = {
552                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP,
553                                                   MLX5_EXPANSION_IPV4_TCP),
554                 .type = RTE_FLOW_ITEM_TYPE_IPV4,
555                 .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
556                         ETH_RSS_NONFRAG_IPV4_OTHER,
557         },
558         [MLX5_EXPANSION_IPV4_UDP] = {
559                 .type = RTE_FLOW_ITEM_TYPE_UDP,
560                 .rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
561         },
562         [MLX5_EXPANSION_IPV4_TCP] = {
563                 .type = RTE_FLOW_ITEM_TYPE_TCP,
564                 .rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
565         },
566         [MLX5_EXPANSION_IPV6] = {
567                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP,
568                                                   MLX5_EXPANSION_IPV6_TCP),
569                 .type = RTE_FLOW_ITEM_TYPE_IPV6,
570                 .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
571                         ETH_RSS_NONFRAG_IPV6_OTHER,
572         },
573         [MLX5_EXPANSION_IPV6_UDP] = {
574                 .type = RTE_FLOW_ITEM_TYPE_UDP,
575                 .rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
576         },
577         [MLX5_EXPANSION_IPV6_TCP] = {
578                 .type = RTE_FLOW_ITEM_TYPE_TCP,
579                 .rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
580         },
581 };
582
583 static struct rte_flow_shared_action *
584 mlx5_shared_action_create(struct rte_eth_dev *dev,
585                           const struct rte_flow_shared_action_conf *conf,
586                           const struct rte_flow_action *action,
587                           struct rte_flow_error *error);
588 static int mlx5_shared_action_destroy
589                                 (struct rte_eth_dev *dev,
590                                  struct rte_flow_shared_action *shared_action,
591                                  struct rte_flow_error *error);
592 static int mlx5_shared_action_update
593                                 (struct rte_eth_dev *dev,
594                                  struct rte_flow_shared_action *shared_action,
595                                  const struct rte_flow_action *action,
596                                  struct rte_flow_error *error);
597 static int mlx5_shared_action_query
598                                 (struct rte_eth_dev *dev,
599                                  const struct rte_flow_shared_action *action,
600                                  void *data,
601                                  struct rte_flow_error *error);
602 static int
603 mlx5_flow_tunnel_decap_set(struct rte_eth_dev *dev,
604                     struct rte_flow_tunnel *app_tunnel,
605                     struct rte_flow_action **actions,
606                     uint32_t *num_of_actions,
607                     struct rte_flow_error *error);
608 static int
609 mlx5_flow_tunnel_match(struct rte_eth_dev *dev,
610                        struct rte_flow_tunnel *app_tunnel,
611                        struct rte_flow_item **items,
612                        uint32_t *num_of_items,
613                        struct rte_flow_error *error);
614 static int
615 mlx5_flow_tunnel_item_release(struct rte_eth_dev *dev,
616                               struct rte_flow_item *pmd_items,
617                               uint32_t num_items, struct rte_flow_error *err);
618 static int
619 mlx5_flow_tunnel_action_release(struct rte_eth_dev *dev,
620                                 struct rte_flow_action *pmd_actions,
621                                 uint32_t num_actions,
622                                 struct rte_flow_error *err);
623 static int
624 mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev,
625                                   struct rte_mbuf *m,
626                                   struct rte_flow_restore_info *info,
627                                   struct rte_flow_error *err);
628
629 static const struct rte_flow_ops mlx5_flow_ops = {
630         .validate = mlx5_flow_validate,
631         .create = mlx5_flow_create,
632         .destroy = mlx5_flow_destroy,
633         .flush = mlx5_flow_flush,
634         .isolate = mlx5_flow_isolate,
635         .query = mlx5_flow_query,
636         .dev_dump = mlx5_flow_dev_dump,
637         .get_aged_flows = mlx5_flow_get_aged_flows,
638         .shared_action_create = mlx5_shared_action_create,
639         .shared_action_destroy = mlx5_shared_action_destroy,
640         .shared_action_update = mlx5_shared_action_update,
641         .shared_action_query = mlx5_shared_action_query,
642         .tunnel_decap_set = mlx5_flow_tunnel_decap_set,
643         .tunnel_match = mlx5_flow_tunnel_match,
644         .tunnel_action_decap_release = mlx5_flow_tunnel_action_release,
645         .tunnel_item_release = mlx5_flow_tunnel_item_release,
646         .get_restore_info = mlx5_flow_tunnel_get_restore_info,
647 };
648
649 /* Tunnel information. */
650 struct mlx5_flow_tunnel_info {
651         uint64_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */
652         uint32_t ptype; /**< Tunnel Ptype (see RTE_PTYPE_*). */
653 };
654
655 static struct mlx5_flow_tunnel_info tunnels_info[] = {
656         {
657                 .tunnel = MLX5_FLOW_LAYER_VXLAN,
658                 .ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP,
659         },
660         {
661                 .tunnel = MLX5_FLOW_LAYER_GENEVE,
662                 .ptype = RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L4_UDP,
663         },
664         {
665                 .tunnel = MLX5_FLOW_LAYER_VXLAN_GPE,
666                 .ptype = RTE_PTYPE_TUNNEL_VXLAN_GPE | RTE_PTYPE_L4_UDP,
667         },
668         {
669                 .tunnel = MLX5_FLOW_LAYER_GRE,
670                 .ptype = RTE_PTYPE_TUNNEL_GRE,
671         },
672         {
673                 .tunnel = MLX5_FLOW_LAYER_MPLS | MLX5_FLOW_LAYER_OUTER_L4_UDP,
674                 .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_UDP | RTE_PTYPE_L4_UDP,
675         },
676         {
677                 .tunnel = MLX5_FLOW_LAYER_MPLS,
678                 .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE,
679         },
680         {
681                 .tunnel = MLX5_FLOW_LAYER_NVGRE,
682                 .ptype = RTE_PTYPE_TUNNEL_NVGRE,
683         },
684         {
685                 .tunnel = MLX5_FLOW_LAYER_IPIP,
686                 .ptype = RTE_PTYPE_TUNNEL_IP,
687         },
688         {
689                 .tunnel = MLX5_FLOW_LAYER_IPV6_ENCAP,
690                 .ptype = RTE_PTYPE_TUNNEL_IP,
691         },
692         {
693                 .tunnel = MLX5_FLOW_LAYER_GTP,
694                 .ptype = RTE_PTYPE_TUNNEL_GTPU,
695         },
696 };
697
698 /* Key of thread specific flow workspace data. */
699 static pthread_key_t key_workspace;
700
701 /* Thread specific flow workspace data once initialization data. */
702 static pthread_once_t key_workspace_init;
703
704
705 /**
706  * Translate tag ID to register.
707  *
708  * @param[in] dev
709  *   Pointer to the Ethernet device structure.
710  * @param[in] feature
711  *   The feature that request the register.
712  * @param[in] id
713  *   The request register ID.
714  * @param[out] error
715  *   Error description in case of any.
716  *
717  * @return
718  *   The request register on success, a negative errno
719  *   value otherwise and rte_errno is set.
720  */
721 int
722 mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
723                      enum mlx5_feature_name feature,
724                      uint32_t id,
725                      struct rte_flow_error *error)
726 {
727         struct mlx5_priv *priv = dev->data->dev_private;
728         struct mlx5_dev_config *config = &priv->config;
729         enum modify_reg start_reg;
730         bool skip_mtr_reg = false;
731
732         switch (feature) {
733         case MLX5_HAIRPIN_RX:
734                 return REG_B;
735         case MLX5_HAIRPIN_TX:
736                 return REG_A;
737         case MLX5_METADATA_RX:
738                 switch (config->dv_xmeta_en) {
739                 case MLX5_XMETA_MODE_LEGACY:
740                         return REG_B;
741                 case MLX5_XMETA_MODE_META16:
742                         return REG_C_0;
743                 case MLX5_XMETA_MODE_META32:
744                         return REG_C_1;
745                 }
746                 break;
747         case MLX5_METADATA_TX:
748                 return REG_A;
749         case MLX5_METADATA_FDB:
750                 switch (config->dv_xmeta_en) {
751                 case MLX5_XMETA_MODE_LEGACY:
752                         return REG_NON;
753                 case MLX5_XMETA_MODE_META16:
754                         return REG_C_0;
755                 case MLX5_XMETA_MODE_META32:
756                         return REG_C_1;
757                 }
758                 break;
759         case MLX5_FLOW_MARK:
760                 switch (config->dv_xmeta_en) {
761                 case MLX5_XMETA_MODE_LEGACY:
762                         return REG_NON;
763                 case MLX5_XMETA_MODE_META16:
764                         return REG_C_1;
765                 case MLX5_XMETA_MODE_META32:
766                         return REG_C_0;
767                 }
768                 break;
769         case MLX5_MTR_SFX:
770                 /*
771                  * If meter color and flow match share one register, flow match
772                  * should use the meter color register for match.
773                  */
774                 if (priv->mtr_reg_share)
775                         return priv->mtr_color_reg;
776                 else
777                         return priv->mtr_color_reg != REG_C_2 ? REG_C_2 :
778                                REG_C_3;
779         case MLX5_MTR_COLOR:
780         case MLX5_ASO_FLOW_HIT: /* Both features use the same REG_C. */
781                 MLX5_ASSERT(priv->mtr_color_reg != REG_NON);
782                 return priv->mtr_color_reg;
783         case MLX5_COPY_MARK:
784                 /*
785                  * Metadata COPY_MARK register using is in meter suffix sub
786                  * flow while with meter. It's safe to share the same register.
787                  */
788                 return priv->mtr_color_reg != REG_C_2 ? REG_C_2 : REG_C_3;
789         case MLX5_APP_TAG:
790                 /*
791                  * If meter is enable, it will engage the register for color
792                  * match and flow match. If meter color match is not using the
793                  * REG_C_2, need to skip the REG_C_x be used by meter color
794                  * match.
795                  * If meter is disable, free to use all available registers.
796                  */
797                 start_reg = priv->mtr_color_reg != REG_C_2 ? REG_C_2 :
798                             (priv->mtr_reg_share ? REG_C_3 : REG_C_4);
799                 skip_mtr_reg = !!(priv->mtr_en && start_reg == REG_C_2);
800                 if (id > (REG_C_7 - start_reg))
801                         return rte_flow_error_set(error, EINVAL,
802                                                   RTE_FLOW_ERROR_TYPE_ITEM,
803                                                   NULL, "invalid tag id");
804                 if (config->flow_mreg_c[id + start_reg - REG_C_0] == REG_NON)
805                         return rte_flow_error_set(error, ENOTSUP,
806                                                   RTE_FLOW_ERROR_TYPE_ITEM,
807                                                   NULL, "unsupported tag id");
808                 /*
809                  * This case means meter is using the REG_C_x great than 2.
810                  * Take care not to conflict with meter color REG_C_x.
811                  * If the available index REG_C_y >= REG_C_x, skip the
812                  * color register.
813                  */
814                 if (skip_mtr_reg && config->flow_mreg_c
815                     [id + start_reg - REG_C_0] >= priv->mtr_color_reg) {
816                         if (id >= (REG_C_7 - start_reg))
817                                 return rte_flow_error_set(error, EINVAL,
818                                                        RTE_FLOW_ERROR_TYPE_ITEM,
819                                                         NULL, "invalid tag id");
820                         if (config->flow_mreg_c
821                             [id + 1 + start_reg - REG_C_0] != REG_NON)
822                                 return config->flow_mreg_c
823                                                [id + 1 + start_reg - REG_C_0];
824                         return rte_flow_error_set(error, ENOTSUP,
825                                                   RTE_FLOW_ERROR_TYPE_ITEM,
826                                                   NULL, "unsupported tag id");
827                 }
828                 return config->flow_mreg_c[id + start_reg - REG_C_0];
829         }
830         MLX5_ASSERT(false);
831         return rte_flow_error_set(error, EINVAL,
832                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
833                                   NULL, "invalid feature name");
834 }
835
836 /**
837  * Check extensive flow metadata register support.
838  *
839  * @param dev
840  *   Pointer to rte_eth_dev structure.
841  *
842  * @return
843  *   True if device supports extensive flow metadata register, otherwise false.
844  */
845 bool
846 mlx5_flow_ext_mreg_supported(struct rte_eth_dev *dev)
847 {
848         struct mlx5_priv *priv = dev->data->dev_private;
849         struct mlx5_dev_config *config = &priv->config;
850
851         /*
852          * Having available reg_c can be regarded inclusively as supporting
853          * extensive flow metadata register, which could mean,
854          * - metadata register copy action by modify header.
855          * - 16 modify header actions is supported.
856          * - reg_c's are preserved across different domain (FDB and NIC) on
857          *   packet loopback by flow lookup miss.
858          */
859         return config->flow_mreg_c[2] != REG_NON;
860 }
861
862 /**
863  * Verify the @p item specifications (spec, last, mask) are compatible with the
864  * NIC capabilities.
865  *
866  * @param[in] item
867  *   Item specification.
868  * @param[in] mask
869  *   @p item->mask or flow default bit-masks.
870  * @param[in] nic_mask
871  *   Bit-masks covering supported fields by the NIC to compare with user mask.
872  * @param[in] size
873  *   Bit-masks size in bytes.
874  * @param[in] range_accepted
875  *   True if range of values is accepted for specific fields, false otherwise.
876  * @param[out] error
877  *   Pointer to error structure.
878  *
879  * @return
880  *   0 on success, a negative errno value otherwise and rte_errno is set.
881  */
882 int
883 mlx5_flow_item_acceptable(const struct rte_flow_item *item,
884                           const uint8_t *mask,
885                           const uint8_t *nic_mask,
886                           unsigned int size,
887                           bool range_accepted,
888                           struct rte_flow_error *error)
889 {
890         unsigned int i;
891
892         MLX5_ASSERT(nic_mask);
893         for (i = 0; i < size; ++i)
894                 if ((nic_mask[i] | mask[i]) != nic_mask[i])
895                         return rte_flow_error_set(error, ENOTSUP,
896                                                   RTE_FLOW_ERROR_TYPE_ITEM,
897                                                   item,
898                                                   "mask enables non supported"
899                                                   " bits");
900         if (!item->spec && (item->mask || item->last))
901                 return rte_flow_error_set(error, EINVAL,
902                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
903                                           "mask/last without a spec is not"
904                                           " supported");
905         if (item->spec && item->last && !range_accepted) {
906                 uint8_t spec[size];
907                 uint8_t last[size];
908                 unsigned int i;
909                 int ret;
910
911                 for (i = 0; i < size; ++i) {
912                         spec[i] = ((const uint8_t *)item->spec)[i] & mask[i];
913                         last[i] = ((const uint8_t *)item->last)[i] & mask[i];
914                 }
915                 ret = memcmp(spec, last, size);
916                 if (ret != 0)
917                         return rte_flow_error_set(error, EINVAL,
918                                                   RTE_FLOW_ERROR_TYPE_ITEM,
919                                                   item,
920                                                   "range is not valid");
921         }
922         return 0;
923 }
924
925 /**
926  * Adjust the hash fields according to the @p flow information.
927  *
928  * @param[in] dev_flow.
929  *   Pointer to the mlx5_flow.
930  * @param[in] tunnel
931  *   1 when the hash field is for a tunnel item.
932  * @param[in] layer_types
933  *   ETH_RSS_* types.
934  * @param[in] hash_fields
935  *   Item hash fields.
936  *
937  * @return
938  *   The hash fields that should be used.
939  */
940 uint64_t
941 mlx5_flow_hashfields_adjust(struct mlx5_flow_rss_desc *rss_desc,
942                             int tunnel __rte_unused, uint64_t layer_types,
943                             uint64_t hash_fields)
944 {
945 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
946         int rss_request_inner = rss_desc->level >= 2;
947
948         /* Check RSS hash level for tunnel. */
949         if (tunnel && rss_request_inner)
950                 hash_fields |= IBV_RX_HASH_INNER;
951         else if (tunnel || rss_request_inner)
952                 return 0;
953 #endif
954         /* Check if requested layer matches RSS hash fields. */
955         if (!(rss_desc->types & layer_types))
956                 return 0;
957         return hash_fields;
958 }
959
960 /**
961  * Lookup and set the ptype in the data Rx part.  A single Ptype can be used,
962  * if several tunnel rules are used on this queue, the tunnel ptype will be
963  * cleared.
964  *
965  * @param rxq_ctrl
966  *   Rx queue to update.
967  */
968 static void
969 flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl)
970 {
971         unsigned int i;
972         uint32_t tunnel_ptype = 0;
973
974         /* Look up for the ptype to use. */
975         for (i = 0; i != MLX5_FLOW_TUNNEL; ++i) {
976                 if (!rxq_ctrl->flow_tunnels_n[i])
977                         continue;
978                 if (!tunnel_ptype) {
979                         tunnel_ptype = tunnels_info[i].ptype;
980                 } else {
981                         tunnel_ptype = 0;
982                         break;
983                 }
984         }
985         rxq_ctrl->rxq.tunnel = tunnel_ptype;
986 }
987
988 /**
989  * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the devive
990  * flow.
991  *
992  * @param[in] dev
993  *   Pointer to the Ethernet device structure.
994  * @param[in] dev_handle
995  *   Pointer to device flow handle structure.
996  */
997 static void
998 flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
999                        struct mlx5_flow_handle *dev_handle)
1000 {
1001         struct mlx5_priv *priv = dev->data->dev_private;
1002         const int mark = dev_handle->mark;
1003         const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
1004         struct mlx5_hrxq *hrxq;
1005         unsigned int i;
1006
1007         if (dev_handle->fate_action != MLX5_FLOW_FATE_QUEUE)
1008                 return;
1009         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
1010                               dev_handle->rix_hrxq);
1011         if (!hrxq)
1012                 return;
1013         for (i = 0; i != hrxq->ind_table->queues_n; ++i) {
1014                 int idx = hrxq->ind_table->queues[i];
1015                 struct mlx5_rxq_ctrl *rxq_ctrl =
1016                         container_of((*priv->rxqs)[idx],
1017                                      struct mlx5_rxq_ctrl, rxq);
1018
1019                 /*
1020                  * To support metadata register copy on Tx loopback,
1021                  * this must be always enabled (metadata may arive
1022                  * from other port - not from local flows only.
1023                  */
1024                 if (priv->config.dv_flow_en &&
1025                     priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
1026                     mlx5_flow_ext_mreg_supported(dev)) {
1027                         rxq_ctrl->rxq.mark = 1;
1028                         rxq_ctrl->flow_mark_n = 1;
1029                 } else if (mark) {
1030                         rxq_ctrl->rxq.mark = 1;
1031                         rxq_ctrl->flow_mark_n++;
1032                 }
1033                 if (tunnel) {
1034                         unsigned int j;
1035
1036                         /* Increase the counter matching the flow. */
1037                         for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
1038                                 if ((tunnels_info[j].tunnel &
1039                                      dev_handle->layers) ==
1040                                     tunnels_info[j].tunnel) {
1041                                         rxq_ctrl->flow_tunnels_n[j]++;
1042                                         break;
1043                                 }
1044                         }
1045                         flow_rxq_tunnel_ptype_update(rxq_ctrl);
1046                 }
1047         }
1048 }
1049
1050 /**
1051  * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) for a flow
1052  *
1053  * @param[in] dev
1054  *   Pointer to the Ethernet device structure.
1055  * @param[in] flow
1056  *   Pointer to flow structure.
1057  */
1058 static void
1059 flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
1060 {
1061         struct mlx5_priv *priv = dev->data->dev_private;
1062         uint32_t handle_idx;
1063         struct mlx5_flow_handle *dev_handle;
1064
1065         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
1066                        handle_idx, dev_handle, next)
1067                 flow_drv_rxq_flags_set(dev, dev_handle);
1068 }
1069
1070 /**
1071  * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the
1072  * device flow if no other flow uses it with the same kind of request.
1073  *
1074  * @param dev
1075  *   Pointer to Ethernet device.
1076  * @param[in] dev_handle
1077  *   Pointer to the device flow handle structure.
1078  */
1079 static void
1080 flow_drv_rxq_flags_trim(struct rte_eth_dev *dev,
1081                         struct mlx5_flow_handle *dev_handle)
1082 {
1083         struct mlx5_priv *priv = dev->data->dev_private;
1084         const int mark = dev_handle->mark;
1085         const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
1086         struct mlx5_hrxq *hrxq;
1087         unsigned int i;
1088
1089         if (dev_handle->fate_action != MLX5_FLOW_FATE_QUEUE)
1090                 return;
1091         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
1092                               dev_handle->rix_hrxq);
1093         if (!hrxq)
1094                 return;
1095         MLX5_ASSERT(dev->data->dev_started);
1096         for (i = 0; i != hrxq->ind_table->queues_n; ++i) {
1097                 int idx = hrxq->ind_table->queues[i];
1098                 struct mlx5_rxq_ctrl *rxq_ctrl =
1099                         container_of((*priv->rxqs)[idx],
1100                                      struct mlx5_rxq_ctrl, rxq);
1101
1102                 if (priv->config.dv_flow_en &&
1103                     priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
1104                     mlx5_flow_ext_mreg_supported(dev)) {
1105                         rxq_ctrl->rxq.mark = 1;
1106                         rxq_ctrl->flow_mark_n = 1;
1107                 } else if (mark) {
1108                         rxq_ctrl->flow_mark_n--;
1109                         rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n;
1110                 }
1111                 if (tunnel) {
1112                         unsigned int j;
1113
1114                         /* Decrease the counter matching the flow. */
1115                         for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
1116                                 if ((tunnels_info[j].tunnel &
1117                                      dev_handle->layers) ==
1118                                     tunnels_info[j].tunnel) {
1119                                         rxq_ctrl->flow_tunnels_n[j]--;
1120                                         break;
1121                                 }
1122                         }
1123                         flow_rxq_tunnel_ptype_update(rxq_ctrl);
1124                 }
1125         }
1126 }
1127
1128 /**
1129  * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the
1130  * @p flow if no other flow uses it with the same kind of request.
1131  *
1132  * @param dev
1133  *   Pointer to Ethernet device.
1134  * @param[in] flow
1135  *   Pointer to the flow.
1136  */
1137 static void
1138 flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
1139 {
1140         struct mlx5_priv *priv = dev->data->dev_private;
1141         uint32_t handle_idx;
1142         struct mlx5_flow_handle *dev_handle;
1143
1144         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
1145                        handle_idx, dev_handle, next)
1146                 flow_drv_rxq_flags_trim(dev, dev_handle);
1147 }
1148
1149 /**
1150  * Clear the Mark/Flag and Tunnel ptype information in all Rx queues.
1151  *
1152  * @param dev
1153  *   Pointer to Ethernet device.
1154  */
1155 static void
1156 flow_rxq_flags_clear(struct rte_eth_dev *dev)
1157 {
1158         struct mlx5_priv *priv = dev->data->dev_private;
1159         unsigned int i;
1160
1161         for (i = 0; i != priv->rxqs_n; ++i) {
1162                 struct mlx5_rxq_ctrl *rxq_ctrl;
1163                 unsigned int j;
1164
1165                 if (!(*priv->rxqs)[i])
1166                         continue;
1167                 rxq_ctrl = container_of((*priv->rxqs)[i],
1168                                         struct mlx5_rxq_ctrl, rxq);
1169                 rxq_ctrl->flow_mark_n = 0;
1170                 rxq_ctrl->rxq.mark = 0;
1171                 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j)
1172                         rxq_ctrl->flow_tunnels_n[j] = 0;
1173                 rxq_ctrl->rxq.tunnel = 0;
1174         }
1175 }
1176
1177 /**
1178  * Set the Rx queue dynamic metadata (mask and offset) for a flow
1179  *
1180  * @param[in] dev
1181  *   Pointer to the Ethernet device structure.
1182  */
1183 void
1184 mlx5_flow_rxq_dynf_metadata_set(struct rte_eth_dev *dev)
1185 {
1186         struct mlx5_priv *priv = dev->data->dev_private;
1187         struct mlx5_rxq_data *data;
1188         unsigned int i;
1189
1190         for (i = 0; i != priv->rxqs_n; ++i) {
1191                 if (!(*priv->rxqs)[i])
1192                         continue;
1193                 data = (*priv->rxqs)[i];
1194                 if (!rte_flow_dynf_metadata_avail()) {
1195                         data->dynf_meta = 0;
1196                         data->flow_meta_mask = 0;
1197                         data->flow_meta_offset = -1;
1198                 } else {
1199                         data->dynf_meta = 1;
1200                         data->flow_meta_mask = rte_flow_dynf_metadata_mask;
1201                         data->flow_meta_offset = rte_flow_dynf_metadata_offs;
1202                 }
1203         }
1204 }
1205
1206 /*
1207  * return a pointer to the desired action in the list of actions.
1208  *
1209  * @param[in] actions
1210  *   The list of actions to search the action in.
1211  * @param[in] action
1212  *   The action to find.
1213  *
1214  * @return
1215  *   Pointer to the action in the list, if found. NULL otherwise.
1216  */
1217 const struct rte_flow_action *
1218 mlx5_flow_find_action(const struct rte_flow_action *actions,
1219                       enum rte_flow_action_type action)
1220 {
1221         if (actions == NULL)
1222                 return NULL;
1223         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++)
1224                 if (actions->type == action)
1225                         return actions;
1226         return NULL;
1227 }
1228
1229 /*
1230  * Validate the flag action.
1231  *
1232  * @param[in] action_flags
1233  *   Bit-fields that holds the actions detected until now.
1234  * @param[in] attr
1235  *   Attributes of flow that includes this action.
1236  * @param[out] error
1237  *   Pointer to error structure.
1238  *
1239  * @return
1240  *   0 on success, a negative errno value otherwise and rte_errno is set.
1241  */
1242 int
1243 mlx5_flow_validate_action_flag(uint64_t action_flags,
1244                                const struct rte_flow_attr *attr,
1245                                struct rte_flow_error *error)
1246 {
1247         if (action_flags & MLX5_FLOW_ACTION_MARK)
1248                 return rte_flow_error_set(error, EINVAL,
1249                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1250                                           "can't mark and flag in same flow");
1251         if (action_flags & MLX5_FLOW_ACTION_FLAG)
1252                 return rte_flow_error_set(error, EINVAL,
1253                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1254                                           "can't have 2 flag"
1255                                           " actions in same flow");
1256         if (attr->egress)
1257                 return rte_flow_error_set(error, ENOTSUP,
1258                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1259                                           "flag action not supported for "
1260                                           "egress");
1261         return 0;
1262 }
1263
1264 /*
1265  * Validate the mark action.
1266  *
1267  * @param[in] action
1268  *   Pointer to the queue action.
1269  * @param[in] action_flags
1270  *   Bit-fields that holds the actions detected until now.
1271  * @param[in] attr
1272  *   Attributes of flow that includes this action.
1273  * @param[out] error
1274  *   Pointer to error structure.
1275  *
1276  * @return
1277  *   0 on success, a negative errno value otherwise and rte_errno is set.
1278  */
1279 int
1280 mlx5_flow_validate_action_mark(const struct rte_flow_action *action,
1281                                uint64_t action_flags,
1282                                const struct rte_flow_attr *attr,
1283                                struct rte_flow_error *error)
1284 {
1285         const struct rte_flow_action_mark *mark = action->conf;
1286
1287         if (!mark)
1288                 return rte_flow_error_set(error, EINVAL,
1289                                           RTE_FLOW_ERROR_TYPE_ACTION,
1290                                           action,
1291                                           "configuration cannot be null");
1292         if (mark->id >= MLX5_FLOW_MARK_MAX)
1293                 return rte_flow_error_set(error, EINVAL,
1294                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1295                                           &mark->id,
1296                                           "mark id must in 0 <= id < "
1297                                           RTE_STR(MLX5_FLOW_MARK_MAX));
1298         if (action_flags & MLX5_FLOW_ACTION_FLAG)
1299                 return rte_flow_error_set(error, EINVAL,
1300                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1301                                           "can't flag and mark in same flow");
1302         if (action_flags & MLX5_FLOW_ACTION_MARK)
1303                 return rte_flow_error_set(error, EINVAL,
1304                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1305                                           "can't have 2 mark actions in same"
1306                                           " flow");
1307         if (attr->egress)
1308                 return rte_flow_error_set(error, ENOTSUP,
1309                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1310                                           "mark action not supported for "
1311                                           "egress");
1312         return 0;
1313 }
1314
1315 /*
1316  * Validate the drop action.
1317  *
1318  * @param[in] action_flags
1319  *   Bit-fields that holds the actions detected until now.
1320  * @param[in] attr
1321  *   Attributes of flow that includes this action.
1322  * @param[out] error
1323  *   Pointer to error structure.
1324  *
1325  * @return
1326  *   0 on success, a negative errno value otherwise and rte_errno is set.
1327  */
1328 int
1329 mlx5_flow_validate_action_drop(uint64_t action_flags __rte_unused,
1330                                const struct rte_flow_attr *attr,
1331                                struct rte_flow_error *error)
1332 {
1333         if (attr->egress)
1334                 return rte_flow_error_set(error, ENOTSUP,
1335                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1336                                           "drop action not supported for "
1337                                           "egress");
1338         return 0;
1339 }
1340
1341 /*
1342  * Validate the queue action.
1343  *
1344  * @param[in] action
1345  *   Pointer to the queue action.
1346  * @param[in] action_flags
1347  *   Bit-fields that holds the actions detected until now.
1348  * @param[in] dev
1349  *   Pointer to the Ethernet device structure.
1350  * @param[in] attr
1351  *   Attributes of flow that includes this action.
1352  * @param[out] error
1353  *   Pointer to error structure.
1354  *
1355  * @return
1356  *   0 on success, a negative errno value otherwise and rte_errno is set.
1357  */
1358 int
1359 mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
1360                                 uint64_t action_flags,
1361                                 struct rte_eth_dev *dev,
1362                                 const struct rte_flow_attr *attr,
1363                                 struct rte_flow_error *error)
1364 {
1365         struct mlx5_priv *priv = dev->data->dev_private;
1366         const struct rte_flow_action_queue *queue = action->conf;
1367
1368         if (action_flags & MLX5_FLOW_FATE_ACTIONS)
1369                 return rte_flow_error_set(error, EINVAL,
1370                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1371                                           "can't have 2 fate actions in"
1372                                           " same flow");
1373         if (!priv->rxqs_n)
1374                 return rte_flow_error_set(error, EINVAL,
1375                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1376                                           NULL, "No Rx queues configured");
1377         if (queue->index >= priv->rxqs_n)
1378                 return rte_flow_error_set(error, EINVAL,
1379                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1380                                           &queue->index,
1381                                           "queue index out of range");
1382         if (!(*priv->rxqs)[queue->index])
1383                 return rte_flow_error_set(error, EINVAL,
1384                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1385                                           &queue->index,
1386                                           "queue is not configured");
1387         if (attr->egress)
1388                 return rte_flow_error_set(error, ENOTSUP,
1389                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1390                                           "queue action not supported for "
1391                                           "egress");
1392         return 0;
1393 }
1394
1395 /*
1396  * Validate the rss action.
1397  *
1398  * @param[in] dev
1399  *   Pointer to the Ethernet device structure.
1400  * @param[in] action
1401  *   Pointer to the queue action.
1402  * @param[out] error
1403  *   Pointer to error structure.
1404  *
1405  * @return
1406  *   0 on success, a negative errno value otherwise and rte_errno is set.
1407  */
1408 int
1409 mlx5_validate_action_rss(struct rte_eth_dev *dev,
1410                          const struct rte_flow_action *action,
1411                          struct rte_flow_error *error)
1412 {
1413         struct mlx5_priv *priv = dev->data->dev_private;
1414         const struct rte_flow_action_rss *rss = action->conf;
1415         enum mlx5_rxq_type rxq_type = MLX5_RXQ_TYPE_UNDEFINED;
1416         unsigned int i;
1417
1418         if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT &&
1419             rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ)
1420                 return rte_flow_error_set(error, ENOTSUP,
1421                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1422                                           &rss->func,
1423                                           "RSS hash function not supported");
1424 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
1425         if (rss->level > 2)
1426 #else
1427         if (rss->level > 1)
1428 #endif
1429                 return rte_flow_error_set(error, ENOTSUP,
1430                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1431                                           &rss->level,
1432                                           "tunnel RSS is not supported");
1433         /* allow RSS key_len 0 in case of NULL (default) RSS key. */
1434         if (rss->key_len == 0 && rss->key != NULL)
1435                 return rte_flow_error_set(error, ENOTSUP,
1436                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1437                                           &rss->key_len,
1438                                           "RSS hash key length 0");
1439         if (rss->key_len > 0 && rss->key_len < MLX5_RSS_HASH_KEY_LEN)
1440                 return rte_flow_error_set(error, ENOTSUP,
1441                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1442                                           &rss->key_len,
1443                                           "RSS hash key too small");
1444         if (rss->key_len > MLX5_RSS_HASH_KEY_LEN)
1445                 return rte_flow_error_set(error, ENOTSUP,
1446                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1447                                           &rss->key_len,
1448                                           "RSS hash key too large");
1449         if (rss->queue_num > priv->config.ind_table_max_size)
1450                 return rte_flow_error_set(error, ENOTSUP,
1451                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1452                                           &rss->queue_num,
1453                                           "number of queues too large");
1454         if (rss->types & MLX5_RSS_HF_MASK)
1455                 return rte_flow_error_set(error, ENOTSUP,
1456                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1457                                           &rss->types,
1458                                           "some RSS protocols are not"
1459                                           " supported");
1460         if ((rss->types & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY)) &&
1461             !(rss->types & ETH_RSS_IP))
1462                 return rte_flow_error_set(error, EINVAL,
1463                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
1464                                           "L3 partial RSS requested but L3 RSS"
1465                                           " type not specified");
1466         if ((rss->types & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)) &&
1467             !(rss->types & (ETH_RSS_UDP | ETH_RSS_TCP)))
1468                 return rte_flow_error_set(error, EINVAL,
1469                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
1470                                           "L4 partial RSS requested but L4 RSS"
1471                                           " type not specified");
1472         if (!priv->rxqs_n)
1473                 return rte_flow_error_set(error, EINVAL,
1474                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1475                                           NULL, "No Rx queues configured");
1476         if (!rss->queue_num)
1477                 return rte_flow_error_set(error, EINVAL,
1478                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1479                                           NULL, "No queues configured");
1480         for (i = 0; i != rss->queue_num; ++i) {
1481                 struct mlx5_rxq_ctrl *rxq_ctrl;
1482
1483                 if (rss->queue[i] >= priv->rxqs_n)
1484                         return rte_flow_error_set
1485                                 (error, EINVAL,
1486                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1487                                  &rss->queue[i], "queue index out of range");
1488                 if (!(*priv->rxqs)[rss->queue[i]])
1489                         return rte_flow_error_set
1490                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1491                                  &rss->queue[i], "queue is not configured");
1492                 rxq_ctrl = container_of((*priv->rxqs)[rss->queue[i]],
1493                                         struct mlx5_rxq_ctrl, rxq);
1494                 if (i == 0)
1495                         rxq_type = rxq_ctrl->type;
1496                 if (rxq_type != rxq_ctrl->type)
1497                         return rte_flow_error_set
1498                                 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1499                                  &rss->queue[i],
1500                                  "combining hairpin and regular RSS queues is not supported");
1501         }
1502         return 0;
1503 }
1504
1505 /*
1506  * Validate the rss action.
1507  *
1508  * @param[in] action
1509  *   Pointer to the queue action.
1510  * @param[in] action_flags
1511  *   Bit-fields that holds the actions detected until now.
1512  * @param[in] dev
1513  *   Pointer to the Ethernet device structure.
1514  * @param[in] attr
1515  *   Attributes of flow that includes this action.
1516  * @param[in] item_flags
1517  *   Items that were detected.
1518  * @param[out] error
1519  *   Pointer to error structure.
1520  *
1521  * @return
1522  *   0 on success, a negative errno value otherwise and rte_errno is set.
1523  */
1524 int
1525 mlx5_flow_validate_action_rss(const struct rte_flow_action *action,
1526                               uint64_t action_flags,
1527                               struct rte_eth_dev *dev,
1528                               const struct rte_flow_attr *attr,
1529                               uint64_t item_flags,
1530                               struct rte_flow_error *error)
1531 {
1532         const struct rte_flow_action_rss *rss = action->conf;
1533         int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1534         int ret;
1535
1536         if (action_flags & MLX5_FLOW_FATE_ACTIONS)
1537                 return rte_flow_error_set(error, EINVAL,
1538                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1539                                           "can't have 2 fate actions"
1540                                           " in same flow");
1541         ret = mlx5_validate_action_rss(dev, action, error);
1542         if (ret)
1543                 return ret;
1544         if (attr->egress)
1545                 return rte_flow_error_set(error, ENOTSUP,
1546                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1547                                           "rss action not supported for "
1548                                           "egress");
1549         if (rss->level > 1 && !tunnel)
1550                 return rte_flow_error_set(error, EINVAL,
1551                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
1552                                           "inner RSS is not supported for "
1553                                           "non-tunnel flows");
1554         if ((item_flags & MLX5_FLOW_LAYER_ECPRI) &&
1555             !(item_flags & MLX5_FLOW_LAYER_INNER_L4_UDP)) {
1556                 return rte_flow_error_set(error, EINVAL,
1557                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
1558                                           "RSS on eCPRI is not supported now");
1559         }
1560         return 0;
1561 }
1562
1563 /*
1564  * Validate the default miss action.
1565  *
1566  * @param[in] action_flags
1567  *   Bit-fields that holds the actions detected until now.
1568  * @param[out] error
1569  *   Pointer to error structure.
1570  *
1571  * @return
1572  *   0 on success, a negative errno value otherwise and rte_errno is set.
1573  */
1574 int
1575 mlx5_flow_validate_action_default_miss(uint64_t action_flags,
1576                                 const struct rte_flow_attr *attr,
1577                                 struct rte_flow_error *error)
1578 {
1579         if (action_flags & MLX5_FLOW_FATE_ACTIONS)
1580                 return rte_flow_error_set(error, EINVAL,
1581                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1582                                           "can't have 2 fate actions in"
1583                                           " same flow");
1584         if (attr->egress)
1585                 return rte_flow_error_set(error, ENOTSUP,
1586                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1587                                           "default miss action not supported "
1588                                           "for egress");
1589         if (attr->group)
1590                 return rte_flow_error_set(error, ENOTSUP,
1591                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL,
1592                                           "only group 0 is supported");
1593         if (attr->transfer)
1594                 return rte_flow_error_set(error, ENOTSUP,
1595                                           RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1596                                           NULL, "transfer is not supported");
1597         return 0;
1598 }
1599
1600 /*
1601  * Validate the count action.
1602  *
1603  * @param[in] dev
1604  *   Pointer to the Ethernet device structure.
1605  * @param[in] attr
1606  *   Attributes of flow that includes this action.
1607  * @param[out] error
1608  *   Pointer to error structure.
1609  *
1610  * @return
1611  *   0 on success, a negative errno value otherwise and rte_errno is set.
1612  */
1613 int
1614 mlx5_flow_validate_action_count(struct rte_eth_dev *dev __rte_unused,
1615                                 const struct rte_flow_attr *attr,
1616                                 struct rte_flow_error *error)
1617 {
1618         if (attr->egress)
1619                 return rte_flow_error_set(error, ENOTSUP,
1620                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1621                                           "count action not supported for "
1622                                           "egress");
1623         return 0;
1624 }
1625
1626 /**
1627  * Verify the @p attributes will be correctly understood by the NIC and store
1628  * them in the @p flow if everything is correct.
1629  *
1630  * @param[in] dev
1631  *   Pointer to the Ethernet device structure.
1632  * @param[in] attributes
1633  *   Pointer to flow attributes
1634  * @param[out] error
1635  *   Pointer to error structure.
1636  *
1637  * @return
1638  *   0 on success, a negative errno value otherwise and rte_errno is set.
1639  */
1640 int
1641 mlx5_flow_validate_attributes(struct rte_eth_dev *dev,
1642                               const struct rte_flow_attr *attributes,
1643                               struct rte_flow_error *error)
1644 {
1645         struct mlx5_priv *priv = dev->data->dev_private;
1646         uint32_t priority_max = priv->config.flow_prio - 1;
1647
1648         if (attributes->group)
1649                 return rte_flow_error_set(error, ENOTSUP,
1650                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1651                                           NULL, "groups is not supported");
1652         if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
1653             attributes->priority >= priority_max)
1654                 return rte_flow_error_set(error, ENOTSUP,
1655                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1656                                           NULL, "priority out of range");
1657         if (attributes->egress)
1658                 return rte_flow_error_set(error, ENOTSUP,
1659                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1660                                           "egress is not supported");
1661         if (attributes->transfer && !priv->config.dv_esw_en)
1662                 return rte_flow_error_set(error, ENOTSUP,
1663                                           RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1664                                           NULL, "transfer is not supported");
1665         if (!attributes->ingress)
1666                 return rte_flow_error_set(error, EINVAL,
1667                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1668                                           NULL,
1669                                           "ingress attribute is mandatory");
1670         return 0;
1671 }
1672
1673 /**
1674  * Validate ICMP6 item.
1675  *
1676  * @param[in] item
1677  *   Item specification.
1678  * @param[in] item_flags
1679  *   Bit-fields that holds the items detected until now.
1680  * @param[in] ext_vlan_sup
1681  *   Whether extended VLAN features are supported or not.
1682  * @param[out] error
1683  *   Pointer to error structure.
1684  *
1685  * @return
1686  *   0 on success, a negative errno value otherwise and rte_errno is set.
1687  */
1688 int
1689 mlx5_flow_validate_item_icmp6(const struct rte_flow_item *item,
1690                                uint64_t item_flags,
1691                                uint8_t target_protocol,
1692                                struct rte_flow_error *error)
1693 {
1694         const struct rte_flow_item_icmp6 *mask = item->mask;
1695         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1696         const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1697                                       MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1698         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1699                                       MLX5_FLOW_LAYER_OUTER_L4;
1700         int ret;
1701
1702         if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMPV6)
1703                 return rte_flow_error_set(error, EINVAL,
1704                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1705                                           "protocol filtering not compatible"
1706                                           " with ICMP6 layer");
1707         if (!(item_flags & l3m))
1708                 return rte_flow_error_set(error, EINVAL,
1709                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1710                                           "IPv6 is mandatory to filter on"
1711                                           " ICMP6");
1712         if (item_flags & l4m)
1713                 return rte_flow_error_set(error, EINVAL,
1714                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1715                                           "multiple L4 layers not supported");
1716         if (!mask)
1717                 mask = &rte_flow_item_icmp6_mask;
1718         ret = mlx5_flow_item_acceptable
1719                 (item, (const uint8_t *)mask,
1720                  (const uint8_t *)&rte_flow_item_icmp6_mask,
1721                  sizeof(struct rte_flow_item_icmp6),
1722                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1723         if (ret < 0)
1724                 return ret;
1725         return 0;
1726 }
1727
1728 /**
1729  * Validate ICMP item.
1730  *
1731  * @param[in] item
1732  *   Item specification.
1733  * @param[in] item_flags
1734  *   Bit-fields that holds the items detected until now.
1735  * @param[out] error
1736  *   Pointer to error structure.
1737  *
1738  * @return
1739  *   0 on success, a negative errno value otherwise and rte_errno is set.
1740  */
1741 int
1742 mlx5_flow_validate_item_icmp(const struct rte_flow_item *item,
1743                              uint64_t item_flags,
1744                              uint8_t target_protocol,
1745                              struct rte_flow_error *error)
1746 {
1747         const struct rte_flow_item_icmp *mask = item->mask;
1748         const struct rte_flow_item_icmp nic_mask = {
1749                 .hdr.icmp_type = 0xff,
1750                 .hdr.icmp_code = 0xff,
1751                 .hdr.icmp_ident = RTE_BE16(0xffff),
1752                 .hdr.icmp_seq_nb = RTE_BE16(0xffff),
1753         };
1754         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1755         const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1756                                       MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1757         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1758                                       MLX5_FLOW_LAYER_OUTER_L4;
1759         int ret;
1760
1761         if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMP)
1762                 return rte_flow_error_set(error, EINVAL,
1763                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1764                                           "protocol filtering not compatible"
1765                                           " with ICMP layer");
1766         if (!(item_flags & l3m))
1767                 return rte_flow_error_set(error, EINVAL,
1768                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1769                                           "IPv4 is mandatory to filter"
1770                                           " on ICMP");
1771         if (item_flags & l4m)
1772                 return rte_flow_error_set(error, EINVAL,
1773                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1774                                           "multiple L4 layers not supported");
1775         if (!mask)
1776                 mask = &nic_mask;
1777         ret = mlx5_flow_item_acceptable
1778                 (item, (const uint8_t *)mask,
1779                  (const uint8_t *)&nic_mask,
1780                  sizeof(struct rte_flow_item_icmp),
1781                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1782         if (ret < 0)
1783                 return ret;
1784         return 0;
1785 }
1786
1787 /**
1788  * Validate Ethernet item.
1789  *
1790  * @param[in] item
1791  *   Item specification.
1792  * @param[in] item_flags
1793  *   Bit-fields that holds the items detected until now.
1794  * @param[out] error
1795  *   Pointer to error structure.
1796  *
1797  * @return
1798  *   0 on success, a negative errno value otherwise and rte_errno is set.
1799  */
1800 int
1801 mlx5_flow_validate_item_eth(const struct rte_flow_item *item,
1802                             uint64_t item_flags, bool ext_vlan_sup,
1803                             struct rte_flow_error *error)
1804 {
1805         const struct rte_flow_item_eth *mask = item->mask;
1806         const struct rte_flow_item_eth nic_mask = {
1807                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
1808                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
1809                 .type = RTE_BE16(0xffff),
1810                 .has_vlan = ext_vlan_sup ? 1 : 0,
1811         };
1812         int ret;
1813         int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1814         const uint64_t ethm = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1815                                        MLX5_FLOW_LAYER_OUTER_L2;
1816
1817         if (item_flags & ethm)
1818                 return rte_flow_error_set(error, ENOTSUP,
1819                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1820                                           "multiple L2 layers not supported");
1821         if ((!tunnel && (item_flags & MLX5_FLOW_LAYER_OUTER_L3)) ||
1822             (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_L3)))
1823                 return rte_flow_error_set(error, EINVAL,
1824                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1825                                           "L2 layer should not follow "
1826                                           "L3 layers");
1827         if ((!tunnel && (item_flags & MLX5_FLOW_LAYER_OUTER_VLAN)) ||
1828             (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_VLAN)))
1829                 return rte_flow_error_set(error, EINVAL,
1830                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1831                                           "L2 layer should not follow VLAN");
1832         if (!mask)
1833                 mask = &rte_flow_item_eth_mask;
1834         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1835                                         (const uint8_t *)&nic_mask,
1836                                         sizeof(struct rte_flow_item_eth),
1837                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1838         return ret;
1839 }
1840
1841 /**
1842  * Validate VLAN item.
1843  *
1844  * @param[in] item
1845  *   Item specification.
1846  * @param[in] item_flags
1847  *   Bit-fields that holds the items detected until now.
1848  * @param[in] dev
1849  *   Ethernet device flow is being created on.
1850  * @param[out] error
1851  *   Pointer to error structure.
1852  *
1853  * @return
1854  *   0 on success, a negative errno value otherwise and rte_errno is set.
1855  */
1856 int
1857 mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
1858                              uint64_t item_flags,
1859                              struct rte_eth_dev *dev,
1860                              struct rte_flow_error *error)
1861 {
1862         const struct rte_flow_item_vlan *spec = item->spec;
1863         const struct rte_flow_item_vlan *mask = item->mask;
1864         const struct rte_flow_item_vlan nic_mask = {
1865                 .tci = RTE_BE16(UINT16_MAX),
1866                 .inner_type = RTE_BE16(UINT16_MAX),
1867         };
1868         uint16_t vlan_tag = 0;
1869         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1870         int ret;
1871         const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
1872                                         MLX5_FLOW_LAYER_INNER_L4) :
1873                                        (MLX5_FLOW_LAYER_OUTER_L3 |
1874                                         MLX5_FLOW_LAYER_OUTER_L4);
1875         const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
1876                                         MLX5_FLOW_LAYER_OUTER_VLAN;
1877
1878         if (item_flags & vlanm)
1879                 return rte_flow_error_set(error, EINVAL,
1880                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1881                                           "multiple VLAN layers not supported");
1882         else if ((item_flags & l34m) != 0)
1883                 return rte_flow_error_set(error, EINVAL,
1884                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1885                                           "VLAN cannot follow L3/L4 layer");
1886         if (!mask)
1887                 mask = &rte_flow_item_vlan_mask;
1888         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1889                                         (const uint8_t *)&nic_mask,
1890                                         sizeof(struct rte_flow_item_vlan),
1891                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1892         if (ret)
1893                 return ret;
1894         if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
1895                 struct mlx5_priv *priv = dev->data->dev_private;
1896
1897                 if (priv->vmwa_context) {
1898                         /*
1899                          * Non-NULL context means we have a virtual machine
1900                          * and SR-IOV enabled, we have to create VLAN interface
1901                          * to make hypervisor to setup E-Switch vport
1902                          * context correctly. We avoid creating the multiple
1903                          * VLAN interfaces, so we cannot support VLAN tag mask.
1904                          */
1905                         return rte_flow_error_set(error, EINVAL,
1906                                                   RTE_FLOW_ERROR_TYPE_ITEM,
1907                                                   item,
1908                                                   "VLAN tag mask is not"
1909                                                   " supported in virtual"
1910                                                   " environment");
1911                 }
1912         }
1913         if (spec) {
1914                 vlan_tag = spec->tci;
1915                 vlan_tag &= mask->tci;
1916         }
1917         /*
1918          * From verbs perspective an empty VLAN is equivalent
1919          * to a packet without VLAN layer.
1920          */
1921         if (!vlan_tag)
1922                 return rte_flow_error_set(error, EINVAL,
1923                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1924                                           item->spec,
1925                                           "VLAN cannot be empty");
1926         return 0;
1927 }
1928
1929 /**
1930  * Validate IPV4 item.
1931  *
1932  * @param[in] item
1933  *   Item specification.
1934  * @param[in] item_flags
1935  *   Bit-fields that holds the items detected until now.
1936  * @param[in] last_item
1937  *   Previous validated item in the pattern items.
1938  * @param[in] ether_type
1939  *   Type in the ethernet layer header (including dot1q).
1940  * @param[in] acc_mask
1941  *   Acceptable mask, if NULL default internal default mask
1942  *   will be used to check whether item fields are supported.
1943  * @param[in] range_accepted
1944  *   True if range of values is accepted for specific fields, false otherwise.
1945  * @param[out] error
1946  *   Pointer to error structure.
1947  *
1948  * @return
1949  *   0 on success, a negative errno value otherwise and rte_errno is set.
1950  */
1951 int
1952 mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
1953                              uint64_t item_flags,
1954                              uint64_t last_item,
1955                              uint16_t ether_type,
1956                              const struct rte_flow_item_ipv4 *acc_mask,
1957                              bool range_accepted,
1958                              struct rte_flow_error *error)
1959 {
1960         const struct rte_flow_item_ipv4 *mask = item->mask;
1961         const struct rte_flow_item_ipv4 *spec = item->spec;
1962         const struct rte_flow_item_ipv4 nic_mask = {
1963                 .hdr = {
1964                         .src_addr = RTE_BE32(0xffffffff),
1965                         .dst_addr = RTE_BE32(0xffffffff),
1966                         .type_of_service = 0xff,
1967                         .next_proto_id = 0xff,
1968                 },
1969         };
1970         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1971         const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
1972                                       MLX5_FLOW_LAYER_OUTER_L3;
1973         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1974                                       MLX5_FLOW_LAYER_OUTER_L4;
1975         int ret;
1976         uint8_t next_proto = 0xFF;
1977         const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 |
1978                                   MLX5_FLOW_LAYER_OUTER_VLAN |
1979                                   MLX5_FLOW_LAYER_INNER_VLAN);
1980
1981         if ((last_item & l2_vlan) && ether_type &&
1982             ether_type != RTE_ETHER_TYPE_IPV4)
1983                 return rte_flow_error_set(error, EINVAL,
1984                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1985                                           "IPv4 cannot follow L2/VLAN layer "
1986                                           "which ether type is not IPv4");
1987         if (item_flags & MLX5_FLOW_LAYER_IPIP) {
1988                 if (mask && spec)
1989                         next_proto = mask->hdr.next_proto_id &
1990                                      spec->hdr.next_proto_id;
1991                 if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6)
1992                         return rte_flow_error_set(error, EINVAL,
1993                                                   RTE_FLOW_ERROR_TYPE_ITEM,
1994                                                   item,
1995                                                   "multiple tunnel "
1996                                                   "not supported");
1997         }
1998         if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP)
1999                 return rte_flow_error_set(error, EINVAL,
2000                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2001                                           "wrong tunnel type - IPv6 specified "
2002                                           "but IPv4 item provided");
2003         if (item_flags & l3m)
2004                 return rte_flow_error_set(error, ENOTSUP,
2005                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2006                                           "multiple L3 layers not supported");
2007         else if (item_flags & l4m)
2008                 return rte_flow_error_set(error, EINVAL,
2009                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2010                                           "L3 cannot follow an L4 layer.");
2011         else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) &&
2012                   !(item_flags & MLX5_FLOW_LAYER_INNER_L2))
2013                 return rte_flow_error_set(error, EINVAL,
2014                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2015                                           "L3 cannot follow an NVGRE layer.");
2016         if (!mask)
2017                 mask = &rte_flow_item_ipv4_mask;
2018         else if (mask->hdr.next_proto_id != 0 &&
2019                  mask->hdr.next_proto_id != 0xff)
2020                 return rte_flow_error_set(error, EINVAL,
2021                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
2022                                           "partial mask is not supported"
2023                                           " for protocol");
2024         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2025                                         acc_mask ? (const uint8_t *)acc_mask
2026                                                  : (const uint8_t *)&nic_mask,
2027                                         sizeof(struct rte_flow_item_ipv4),
2028                                         range_accepted, error);
2029         if (ret < 0)
2030                 return ret;
2031         return 0;
2032 }
2033
2034 /**
2035  * Validate IPV6 item.
2036  *
2037  * @param[in] item
2038  *   Item specification.
2039  * @param[in] item_flags
2040  *   Bit-fields that holds the items detected until now.
2041  * @param[in] last_item
2042  *   Previous validated item in the pattern items.
2043  * @param[in] ether_type
2044  *   Type in the ethernet layer header (including dot1q).
2045  * @param[in] acc_mask
2046  *   Acceptable mask, if NULL default internal default mask
2047  *   will be used to check whether item fields are supported.
2048  * @param[out] error
2049  *   Pointer to error structure.
2050  *
2051  * @return
2052  *   0 on success, a negative errno value otherwise and rte_errno is set.
2053  */
2054 int
2055 mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
2056                              uint64_t item_flags,
2057                              uint64_t last_item,
2058                              uint16_t ether_type,
2059                              const struct rte_flow_item_ipv6 *acc_mask,
2060                              struct rte_flow_error *error)
2061 {
2062         const struct rte_flow_item_ipv6 *mask = item->mask;
2063         const struct rte_flow_item_ipv6 *spec = item->spec;
2064         const struct rte_flow_item_ipv6 nic_mask = {
2065                 .hdr = {
2066                         .src_addr =
2067                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
2068                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
2069                         .dst_addr =
2070                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
2071                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
2072                         .vtc_flow = RTE_BE32(0xffffffff),
2073                         .proto = 0xff,
2074                 },
2075         };
2076         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2077         const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
2078                                       MLX5_FLOW_LAYER_OUTER_L3;
2079         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2080                                       MLX5_FLOW_LAYER_OUTER_L4;
2081         int ret;
2082         uint8_t next_proto = 0xFF;
2083         const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 |
2084                                   MLX5_FLOW_LAYER_OUTER_VLAN |
2085                                   MLX5_FLOW_LAYER_INNER_VLAN);
2086
2087         if ((last_item & l2_vlan) && ether_type &&
2088             ether_type != RTE_ETHER_TYPE_IPV6)
2089                 return rte_flow_error_set(error, EINVAL,
2090                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2091                                           "IPv6 cannot follow L2/VLAN layer "
2092                                           "which ether type is not IPv6");
2093         if (mask && mask->hdr.proto == UINT8_MAX && spec)
2094                 next_proto = spec->hdr.proto;
2095         if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) {
2096                 if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6)
2097                         return rte_flow_error_set(error, EINVAL,
2098                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2099                                                   item,
2100                                                   "multiple tunnel "
2101                                                   "not supported");
2102         }
2103         if (next_proto == IPPROTO_HOPOPTS  ||
2104             next_proto == IPPROTO_ROUTING  ||
2105             next_proto == IPPROTO_FRAGMENT ||
2106             next_proto == IPPROTO_ESP      ||
2107             next_proto == IPPROTO_AH       ||
2108             next_proto == IPPROTO_DSTOPTS)
2109                 return rte_flow_error_set(error, EINVAL,
2110                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2111                                           "IPv6 proto (next header) should "
2112                                           "not be set as extension header");
2113         if (item_flags & MLX5_FLOW_LAYER_IPIP)
2114                 return rte_flow_error_set(error, EINVAL,
2115                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2116                                           "wrong tunnel type - IPv4 specified "
2117                                           "but IPv6 item provided");
2118         if (item_flags & l3m)
2119                 return rte_flow_error_set(error, ENOTSUP,
2120                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2121                                           "multiple L3 layers not supported");
2122         else if (item_flags & l4m)
2123                 return rte_flow_error_set(error, EINVAL,
2124                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2125                                           "L3 cannot follow an L4 layer.");
2126         else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) &&
2127                   !(item_flags & MLX5_FLOW_LAYER_INNER_L2))
2128                 return rte_flow_error_set(error, EINVAL,
2129                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2130                                           "L3 cannot follow an NVGRE layer.");
2131         if (!mask)
2132                 mask = &rte_flow_item_ipv6_mask;
2133         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2134                                         acc_mask ? (const uint8_t *)acc_mask
2135                                                  : (const uint8_t *)&nic_mask,
2136                                         sizeof(struct rte_flow_item_ipv6),
2137                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2138         if (ret < 0)
2139                 return ret;
2140         return 0;
2141 }
2142
2143 /**
2144  * Validate UDP item.
2145  *
2146  * @param[in] item
2147  *   Item specification.
2148  * @param[in] item_flags
2149  *   Bit-fields that holds the items detected until now.
2150  * @param[in] target_protocol
2151  *   The next protocol in the previous item.
2152  * @param[in] flow_mask
2153  *   mlx5 flow-specific (DV, verbs, etc.) supported header fields mask.
2154  * @param[out] error
2155  *   Pointer to error structure.
2156  *
2157  * @return
2158  *   0 on success, a negative errno value otherwise and rte_errno is set.
2159  */
2160 int
2161 mlx5_flow_validate_item_udp(const struct rte_flow_item *item,
2162                             uint64_t item_flags,
2163                             uint8_t target_protocol,
2164                             struct rte_flow_error *error)
2165 {
2166         const struct rte_flow_item_udp *mask = item->mask;
2167         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2168         const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
2169                                       MLX5_FLOW_LAYER_OUTER_L3;
2170         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2171                                       MLX5_FLOW_LAYER_OUTER_L4;
2172         int ret;
2173
2174         if (target_protocol != 0xff && target_protocol != IPPROTO_UDP)
2175                 return rte_flow_error_set(error, EINVAL,
2176                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2177                                           "protocol filtering not compatible"
2178                                           " with UDP layer");
2179         if (!(item_flags & l3m))
2180                 return rte_flow_error_set(error, EINVAL,
2181                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2182                                           "L3 is mandatory to filter on L4");
2183         if (item_flags & l4m)
2184                 return rte_flow_error_set(error, EINVAL,
2185                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2186                                           "multiple L4 layers not supported");
2187         if (!mask)
2188                 mask = &rte_flow_item_udp_mask;
2189         ret = mlx5_flow_item_acceptable
2190                 (item, (const uint8_t *)mask,
2191                  (const uint8_t *)&rte_flow_item_udp_mask,
2192                  sizeof(struct rte_flow_item_udp), MLX5_ITEM_RANGE_NOT_ACCEPTED,
2193                  error);
2194         if (ret < 0)
2195                 return ret;
2196         return 0;
2197 }
2198
2199 /**
2200  * Validate TCP item.
2201  *
2202  * @param[in] item
2203  *   Item specification.
2204  * @param[in] item_flags
2205  *   Bit-fields that holds the items detected until now.
2206  * @param[in] target_protocol
2207  *   The next protocol in the previous item.
2208  * @param[out] error
2209  *   Pointer to error structure.
2210  *
2211  * @return
2212  *   0 on success, a negative errno value otherwise and rte_errno is set.
2213  */
2214 int
2215 mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,
2216                             uint64_t item_flags,
2217                             uint8_t target_protocol,
2218                             const struct rte_flow_item_tcp *flow_mask,
2219                             struct rte_flow_error *error)
2220 {
2221         const struct rte_flow_item_tcp *mask = item->mask;
2222         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2223         const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
2224                                       MLX5_FLOW_LAYER_OUTER_L3;
2225         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2226                                       MLX5_FLOW_LAYER_OUTER_L4;
2227         int ret;
2228
2229         MLX5_ASSERT(flow_mask);
2230         if (target_protocol != 0xff && target_protocol != IPPROTO_TCP)
2231                 return rte_flow_error_set(error, EINVAL,
2232                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2233                                           "protocol filtering not compatible"
2234                                           " with TCP layer");
2235         if (!(item_flags & l3m))
2236                 return rte_flow_error_set(error, EINVAL,
2237                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2238                                           "L3 is mandatory to filter on L4");
2239         if (item_flags & l4m)
2240                 return rte_flow_error_set(error, EINVAL,
2241                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2242                                           "multiple L4 layers not supported");
2243         if (!mask)
2244                 mask = &rte_flow_item_tcp_mask;
2245         ret = mlx5_flow_item_acceptable
2246                 (item, (const uint8_t *)mask,
2247                  (const uint8_t *)flow_mask,
2248                  sizeof(struct rte_flow_item_tcp), MLX5_ITEM_RANGE_NOT_ACCEPTED,
2249                  error);
2250         if (ret < 0)
2251                 return ret;
2252         return 0;
2253 }
2254
2255 /**
2256  * Validate VXLAN item.
2257  *
2258  * @param[in] item
2259  *   Item specification.
2260  * @param[in] item_flags
2261  *   Bit-fields that holds the items detected until now.
2262  * @param[in] target_protocol
2263  *   The next protocol in the previous item.
2264  * @param[out] error
2265  *   Pointer to error structure.
2266  *
2267  * @return
2268  *   0 on success, a negative errno value otherwise and rte_errno is set.
2269  */
2270 int
2271 mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item,
2272                               uint64_t item_flags,
2273                               struct rte_flow_error *error)
2274 {
2275         const struct rte_flow_item_vxlan *spec = item->spec;
2276         const struct rte_flow_item_vxlan *mask = item->mask;
2277         int ret;
2278         union vni {
2279                 uint32_t vlan_id;
2280                 uint8_t vni[4];
2281         } id = { .vlan_id = 0, };
2282
2283
2284         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2285                 return rte_flow_error_set(error, ENOTSUP,
2286                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2287                                           "multiple tunnel layers not"
2288                                           " supported");
2289         /*
2290          * Verify only UDPv4 is present as defined in
2291          * https://tools.ietf.org/html/rfc7348
2292          */
2293         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2294                 return rte_flow_error_set(error, EINVAL,
2295                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2296                                           "no outer UDP layer found");
2297         if (!mask)
2298                 mask = &rte_flow_item_vxlan_mask;
2299         ret = mlx5_flow_item_acceptable
2300                 (item, (const uint8_t *)mask,
2301                  (const uint8_t *)&rte_flow_item_vxlan_mask,
2302                  sizeof(struct rte_flow_item_vxlan),
2303                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2304         if (ret < 0)
2305                 return ret;
2306         if (spec) {
2307                 memcpy(&id.vni[1], spec->vni, 3);
2308                 memcpy(&id.vni[1], mask->vni, 3);
2309         }
2310         if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
2311                 return rte_flow_error_set(error, ENOTSUP,
2312                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2313                                           "VXLAN tunnel must be fully defined");
2314         return 0;
2315 }
2316
2317 /**
2318  * Validate VXLAN_GPE item.
2319  *
2320  * @param[in] item
2321  *   Item specification.
2322  * @param[in] item_flags
2323  *   Bit-fields that holds the items detected until now.
2324  * @param[in] priv
2325  *   Pointer to the private data structure.
2326  * @param[in] target_protocol
2327  *   The next protocol in the previous item.
2328  * @param[out] error
2329  *   Pointer to error structure.
2330  *
2331  * @return
2332  *   0 on success, a negative errno value otherwise and rte_errno is set.
2333  */
2334 int
2335 mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,
2336                                   uint64_t item_flags,
2337                                   struct rte_eth_dev *dev,
2338                                   struct rte_flow_error *error)
2339 {
2340         struct mlx5_priv *priv = dev->data->dev_private;
2341         const struct rte_flow_item_vxlan_gpe *spec = item->spec;
2342         const struct rte_flow_item_vxlan_gpe *mask = item->mask;
2343         int ret;
2344         union vni {
2345                 uint32_t vlan_id;
2346                 uint8_t vni[4];
2347         } id = { .vlan_id = 0, };
2348
2349         if (!priv->config.l3_vxlan_en)
2350                 return rte_flow_error_set(error, ENOTSUP,
2351                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2352                                           "L3 VXLAN is not enabled by device"
2353                                           " parameter and/or not configured in"
2354                                           " firmware");
2355         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2356                 return rte_flow_error_set(error, ENOTSUP,
2357                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2358                                           "multiple tunnel layers not"
2359                                           " supported");
2360         /*
2361          * Verify only UDPv4 is present as defined in
2362          * https://tools.ietf.org/html/rfc7348
2363          */
2364         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2365                 return rte_flow_error_set(error, EINVAL,
2366                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2367                                           "no outer UDP layer found");
2368         if (!mask)
2369                 mask = &rte_flow_item_vxlan_gpe_mask;
2370         ret = mlx5_flow_item_acceptable
2371                 (item, (const uint8_t *)mask,
2372                  (const uint8_t *)&rte_flow_item_vxlan_gpe_mask,
2373                  sizeof(struct rte_flow_item_vxlan_gpe),
2374                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2375         if (ret < 0)
2376                 return ret;
2377         if (spec) {
2378                 if (spec->protocol)
2379                         return rte_flow_error_set(error, ENOTSUP,
2380                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2381                                                   item,
2382                                                   "VxLAN-GPE protocol"
2383                                                   " not supported");
2384                 memcpy(&id.vni[1], spec->vni, 3);
2385                 memcpy(&id.vni[1], mask->vni, 3);
2386         }
2387         if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
2388                 return rte_flow_error_set(error, ENOTSUP,
2389                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2390                                           "VXLAN-GPE tunnel must be fully"
2391                                           " defined");
2392         return 0;
2393 }
2394 /**
2395  * Validate GRE Key item.
2396  *
2397  * @param[in] item
2398  *   Item specification.
2399  * @param[in] item_flags
2400  *   Bit flags to mark detected items.
2401  * @param[in] gre_item
2402  *   Pointer to gre_item
2403  * @param[out] error
2404  *   Pointer to error structure.
2405  *
2406  * @return
2407  *   0 on success, a negative errno value otherwise and rte_errno is set.
2408  */
2409 int
2410 mlx5_flow_validate_item_gre_key(const struct rte_flow_item *item,
2411                                 uint64_t item_flags,
2412                                 const struct rte_flow_item *gre_item,
2413                                 struct rte_flow_error *error)
2414 {
2415         const rte_be32_t *mask = item->mask;
2416         int ret = 0;
2417         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
2418         const struct rte_flow_item_gre *gre_spec;
2419         const struct rte_flow_item_gre *gre_mask;
2420
2421         if (item_flags & MLX5_FLOW_LAYER_GRE_KEY)
2422                 return rte_flow_error_set(error, ENOTSUP,
2423                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2424                                           "Multiple GRE key not support");
2425         if (!(item_flags & MLX5_FLOW_LAYER_GRE))
2426                 return rte_flow_error_set(error, ENOTSUP,
2427                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2428                                           "No preceding GRE header");
2429         if (item_flags & MLX5_FLOW_LAYER_INNER)
2430                 return rte_flow_error_set(error, ENOTSUP,
2431                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2432                                           "GRE key following a wrong item");
2433         gre_mask = gre_item->mask;
2434         if (!gre_mask)
2435                 gre_mask = &rte_flow_item_gre_mask;
2436         gre_spec = gre_item->spec;
2437         if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x2000)) &&
2438                          !(gre_spec->c_rsvd0_ver & RTE_BE16(0x2000)))
2439                 return rte_flow_error_set(error, EINVAL,
2440                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2441                                           "Key bit must be on");
2442
2443         if (!mask)
2444                 mask = &gre_key_default_mask;
2445         ret = mlx5_flow_item_acceptable
2446                 (item, (const uint8_t *)mask,
2447                  (const uint8_t *)&gre_key_default_mask,
2448                  sizeof(rte_be32_t), MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2449         return ret;
2450 }
2451
2452 /**
2453  * Validate GRE item.
2454  *
2455  * @param[in] item
2456  *   Item specification.
2457  * @param[in] item_flags
2458  *   Bit flags to mark detected items.
2459  * @param[in] target_protocol
2460  *   The next protocol in the previous item.
2461  * @param[out] error
2462  *   Pointer to error structure.
2463  *
2464  * @return
2465  *   0 on success, a negative errno value otherwise and rte_errno is set.
2466  */
2467 int
2468 mlx5_flow_validate_item_gre(const struct rte_flow_item *item,
2469                             uint64_t item_flags,
2470                             uint8_t target_protocol,
2471                             struct rte_flow_error *error)
2472 {
2473         const struct rte_flow_item_gre *spec __rte_unused = item->spec;
2474         const struct rte_flow_item_gre *mask = item->mask;
2475         int ret;
2476         const struct rte_flow_item_gre nic_mask = {
2477                 .c_rsvd0_ver = RTE_BE16(0xB000),
2478                 .protocol = RTE_BE16(UINT16_MAX),
2479         };
2480
2481         if (target_protocol != 0xff && target_protocol != IPPROTO_GRE)
2482                 return rte_flow_error_set(error, EINVAL,
2483                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2484                                           "protocol filtering not compatible"
2485                                           " with this GRE layer");
2486         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2487                 return rte_flow_error_set(error, ENOTSUP,
2488                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2489                                           "multiple tunnel layers not"
2490                                           " supported");
2491         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3))
2492                 return rte_flow_error_set(error, ENOTSUP,
2493                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2494                                           "L3 Layer is missing");
2495         if (!mask)
2496                 mask = &rte_flow_item_gre_mask;
2497         ret = mlx5_flow_item_acceptable
2498                 (item, (const uint8_t *)mask,
2499                  (const uint8_t *)&nic_mask,
2500                  sizeof(struct rte_flow_item_gre), MLX5_ITEM_RANGE_NOT_ACCEPTED,
2501                  error);
2502         if (ret < 0)
2503                 return ret;
2504 #ifndef HAVE_MLX5DV_DR
2505 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
2506         if (spec && (spec->protocol & mask->protocol))
2507                 return rte_flow_error_set(error, ENOTSUP,
2508                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2509                                           "without MPLS support the"
2510                                           " specification cannot be used for"
2511                                           " filtering");
2512 #endif
2513 #endif
2514         return 0;
2515 }
2516
2517 /**
2518  * Validate Geneve item.
2519  *
2520  * @param[in] item
2521  *   Item specification.
2522  * @param[in] itemFlags
2523  *   Bit-fields that holds the items detected until now.
2524  * @param[in] enPriv
2525  *   Pointer to the private data structure.
2526  * @param[out] error
2527  *   Pointer to error structure.
2528  *
2529  * @return
2530  *   0 on success, a negative errno value otherwise and rte_errno is set.
2531  */
2532
2533 int
2534 mlx5_flow_validate_item_geneve(const struct rte_flow_item *item,
2535                                uint64_t item_flags,
2536                                struct rte_eth_dev *dev,
2537                                struct rte_flow_error *error)
2538 {
2539         struct mlx5_priv *priv = dev->data->dev_private;
2540         const struct rte_flow_item_geneve *spec = item->spec;
2541         const struct rte_flow_item_geneve *mask = item->mask;
2542         int ret;
2543         uint16_t gbhdr;
2544         uint8_t opt_len = priv->config.hca_attr.geneve_max_opt_len ?
2545                           MLX5_GENEVE_OPT_LEN_1 : MLX5_GENEVE_OPT_LEN_0;
2546         const struct rte_flow_item_geneve nic_mask = {
2547                 .ver_opt_len_o_c_rsvd0 = RTE_BE16(0x3f80),
2548                 .vni = "\xff\xff\xff",
2549                 .protocol = RTE_BE16(UINT16_MAX),
2550         };
2551
2552         if (!priv->config.hca_attr.tunnel_stateless_geneve_rx)
2553                 return rte_flow_error_set(error, ENOTSUP,
2554                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2555                                           "L3 Geneve is not enabled by device"
2556                                           " parameter and/or not configured in"
2557                                           " firmware");
2558         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2559                 return rte_flow_error_set(error, ENOTSUP,
2560                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2561                                           "multiple tunnel layers not"
2562                                           " supported");
2563         /*
2564          * Verify only UDPv4 is present as defined in
2565          * https://tools.ietf.org/html/rfc7348
2566          */
2567         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2568                 return rte_flow_error_set(error, EINVAL,
2569                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2570                                           "no outer UDP layer found");
2571         if (!mask)
2572                 mask = &rte_flow_item_geneve_mask;
2573         ret = mlx5_flow_item_acceptable
2574                                   (item, (const uint8_t *)mask,
2575                                    (const uint8_t *)&nic_mask,
2576                                    sizeof(struct rte_flow_item_geneve),
2577                                    MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2578         if (ret)
2579                 return ret;
2580         if (spec) {
2581                 gbhdr = rte_be_to_cpu_16(spec->ver_opt_len_o_c_rsvd0);
2582                 if (MLX5_GENEVE_VER_VAL(gbhdr) ||
2583                      MLX5_GENEVE_CRITO_VAL(gbhdr) ||
2584                      MLX5_GENEVE_RSVD_VAL(gbhdr) || spec->rsvd1)
2585                         return rte_flow_error_set(error, ENOTSUP,
2586                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2587                                                   item,
2588                                                   "Geneve protocol unsupported"
2589                                                   " fields are being used");
2590                 if (MLX5_GENEVE_OPTLEN_VAL(gbhdr) > opt_len)
2591                         return rte_flow_error_set
2592                                         (error, ENOTSUP,
2593                                          RTE_FLOW_ERROR_TYPE_ITEM,
2594                                          item,
2595                                          "Unsupported Geneve options length");
2596         }
2597         if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
2598                 return rte_flow_error_set
2599                                     (error, ENOTSUP,
2600                                      RTE_FLOW_ERROR_TYPE_ITEM, item,
2601                                      "Geneve tunnel must be fully defined");
2602         return 0;
2603 }
2604
2605 /**
2606  * Validate MPLS item.
2607  *
2608  * @param[in] dev
2609  *   Pointer to the rte_eth_dev structure.
2610  * @param[in] item
2611  *   Item specification.
2612  * @param[in] item_flags
2613  *   Bit-fields that holds the items detected until now.
2614  * @param[in] prev_layer
2615  *   The protocol layer indicated in previous item.
2616  * @param[out] error
2617  *   Pointer to error structure.
2618  *
2619  * @return
2620  *   0 on success, a negative errno value otherwise and rte_errno is set.
2621  */
2622 int
2623 mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused,
2624                              const struct rte_flow_item *item __rte_unused,
2625                              uint64_t item_flags __rte_unused,
2626                              uint64_t prev_layer __rte_unused,
2627                              struct rte_flow_error *error)
2628 {
2629 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
2630         const struct rte_flow_item_mpls *mask = item->mask;
2631         struct mlx5_priv *priv = dev->data->dev_private;
2632         int ret;
2633
2634         if (!priv->config.mpls_en)
2635                 return rte_flow_error_set(error, ENOTSUP,
2636                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2637                                           "MPLS not supported or"
2638                                           " disabled in firmware"
2639                                           " configuration.");
2640         /* MPLS over IP, UDP, GRE is allowed */
2641         if (!(prev_layer & (MLX5_FLOW_LAYER_OUTER_L3 |
2642                             MLX5_FLOW_LAYER_OUTER_L4_UDP |
2643                             MLX5_FLOW_LAYER_GRE |
2644                             MLX5_FLOW_LAYER_GRE_KEY)))
2645                 return rte_flow_error_set(error, EINVAL,
2646                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2647                                           "protocol filtering not compatible"
2648                                           " with MPLS layer");
2649         /* Multi-tunnel isn't allowed but MPLS over GRE is an exception. */
2650         if ((item_flags & MLX5_FLOW_LAYER_TUNNEL) &&
2651             !(item_flags & MLX5_FLOW_LAYER_GRE))
2652                 return rte_flow_error_set(error, ENOTSUP,
2653                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2654                                           "multiple tunnel layers not"
2655                                           " supported");
2656         if (!mask)
2657                 mask = &rte_flow_item_mpls_mask;
2658         ret = mlx5_flow_item_acceptable
2659                 (item, (const uint8_t *)mask,
2660                  (const uint8_t *)&rte_flow_item_mpls_mask,
2661                  sizeof(struct rte_flow_item_mpls),
2662                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2663         if (ret < 0)
2664                 return ret;
2665         return 0;
2666 #else
2667         return rte_flow_error_set(error, ENOTSUP,
2668                                   RTE_FLOW_ERROR_TYPE_ITEM, item,
2669                                   "MPLS is not supported by Verbs, please"
2670                                   " update.");
2671 #endif
2672 }
2673
2674 /**
2675  * Validate NVGRE item.
2676  *
2677  * @param[in] item
2678  *   Item specification.
2679  * @param[in] item_flags
2680  *   Bit flags to mark detected items.
2681  * @param[in] target_protocol
2682  *   The next protocol in the previous item.
2683  * @param[out] error
2684  *   Pointer to error structure.
2685  *
2686  * @return
2687  *   0 on success, a negative errno value otherwise and rte_errno is set.
2688  */
2689 int
2690 mlx5_flow_validate_item_nvgre(const struct rte_flow_item *item,
2691                               uint64_t item_flags,
2692                               uint8_t target_protocol,
2693                               struct rte_flow_error *error)
2694 {
2695         const struct rte_flow_item_nvgre *mask = item->mask;
2696         int ret;
2697
2698         if (target_protocol != 0xff && target_protocol != IPPROTO_GRE)
2699                 return rte_flow_error_set(error, EINVAL,
2700                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2701                                           "protocol filtering not compatible"
2702                                           " with this GRE layer");
2703         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2704                 return rte_flow_error_set(error, ENOTSUP,
2705                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2706                                           "multiple tunnel layers not"
2707                                           " supported");
2708         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3))
2709                 return rte_flow_error_set(error, ENOTSUP,
2710                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2711                                           "L3 Layer is missing");
2712         if (!mask)
2713                 mask = &rte_flow_item_nvgre_mask;
2714         ret = mlx5_flow_item_acceptable
2715                 (item, (const uint8_t *)mask,
2716                  (const uint8_t *)&rte_flow_item_nvgre_mask,
2717                  sizeof(struct rte_flow_item_nvgre),
2718                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2719         if (ret < 0)
2720                 return ret;
2721         return 0;
2722 }
2723
2724 /**
2725  * Validate eCPRI item.
2726  *
2727  * @param[in] item
2728  *   Item specification.
2729  * @param[in] item_flags
2730  *   Bit-fields that holds the items detected until now.
2731  * @param[in] last_item
2732  *   Previous validated item in the pattern items.
2733  * @param[in] ether_type
2734  *   Type in the ethernet layer header (including dot1q).
2735  * @param[in] acc_mask
2736  *   Acceptable mask, if NULL default internal default mask
2737  *   will be used to check whether item fields are supported.
2738  * @param[out] error
2739  *   Pointer to error structure.
2740  *
2741  * @return
2742  *   0 on success, a negative errno value otherwise and rte_errno is set.
2743  */
2744 int
2745 mlx5_flow_validate_item_ecpri(const struct rte_flow_item *item,
2746                               uint64_t item_flags,
2747                               uint64_t last_item,
2748                               uint16_t ether_type,
2749                               const struct rte_flow_item_ecpri *acc_mask,
2750                               struct rte_flow_error *error)
2751 {
2752         const struct rte_flow_item_ecpri *mask = item->mask;
2753         const struct rte_flow_item_ecpri nic_mask = {
2754                 .hdr = {
2755                         .common = {
2756                                 .u32 =
2757                                 RTE_BE32(((const struct rte_ecpri_common_hdr) {
2758                                         .type = 0xFF,
2759                                         }).u32),
2760                         },
2761                         .dummy[0] = 0xFFFFFFFF,
2762                 },
2763         };
2764         const uint64_t outer_l2_vlan = (MLX5_FLOW_LAYER_OUTER_L2 |
2765                                         MLX5_FLOW_LAYER_OUTER_VLAN);
2766         struct rte_flow_item_ecpri mask_lo;
2767
2768         if (!(last_item & outer_l2_vlan) &&
2769             last_item != MLX5_FLOW_LAYER_OUTER_L4_UDP)
2770                 return rte_flow_error_set(error, EINVAL,
2771                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2772                                           "eCPRI can only follow L2/VLAN layer or UDP layer");
2773         if ((last_item & outer_l2_vlan) && ether_type &&
2774             ether_type != RTE_ETHER_TYPE_ECPRI)
2775                 return rte_flow_error_set(error, EINVAL,
2776                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2777                                           "eCPRI cannot follow L2/VLAN layer which ether type is not 0xAEFE");
2778         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2779                 return rte_flow_error_set(error, EINVAL,
2780                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2781                                           "eCPRI with tunnel is not supported right now");
2782         if (item_flags & MLX5_FLOW_LAYER_OUTER_L3)
2783                 return rte_flow_error_set(error, ENOTSUP,
2784                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2785                                           "multiple L3 layers not supported");
2786         else if (item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP)
2787                 return rte_flow_error_set(error, EINVAL,
2788                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2789                                           "eCPRI cannot coexist with a TCP layer");
2790         /* In specification, eCPRI could be over UDP layer. */
2791         else if (item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)
2792                 return rte_flow_error_set(error, EINVAL,
2793                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2794                                           "eCPRI over UDP layer is not yet supported right now");
2795         /* Mask for type field in common header could be zero. */
2796         if (!mask)
2797                 mask = &rte_flow_item_ecpri_mask;
2798         mask_lo.hdr.common.u32 = rte_be_to_cpu_32(mask->hdr.common.u32);
2799         /* Input mask is in big-endian format. */
2800         if (mask_lo.hdr.common.type != 0 && mask_lo.hdr.common.type != 0xff)
2801                 return rte_flow_error_set(error, EINVAL,
2802                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
2803                                           "partial mask is not supported for protocol");
2804         else if (mask_lo.hdr.common.type == 0 && mask->hdr.dummy[0] != 0)
2805                 return rte_flow_error_set(error, EINVAL,
2806                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
2807                                           "message header mask must be after a type mask");
2808         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2809                                          acc_mask ? (const uint8_t *)acc_mask
2810                                                   : (const uint8_t *)&nic_mask,
2811                                          sizeof(struct rte_flow_item_ecpri),
2812                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2813 }
2814
2815 /**
2816  * Release resource related QUEUE/RSS action split.
2817  *
2818  * @param dev
2819  *   Pointer to Ethernet device.
2820  * @param flow
2821  *   Flow to release id's from.
2822  */
2823 static void
2824 flow_mreg_split_qrss_release(struct rte_eth_dev *dev,
2825                              struct rte_flow *flow)
2826 {
2827         struct mlx5_priv *priv = dev->data->dev_private;
2828         uint32_t handle_idx;
2829         struct mlx5_flow_handle *dev_handle;
2830
2831         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
2832                        handle_idx, dev_handle, next)
2833                 if (dev_handle->split_flow_id)
2834                         mlx5_ipool_free(priv->sh->ipool
2835                                         [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
2836                                         dev_handle->split_flow_id);
2837 }
2838
2839 static int
2840 flow_null_validate(struct rte_eth_dev *dev __rte_unused,
2841                    const struct rte_flow_attr *attr __rte_unused,
2842                    const struct rte_flow_item items[] __rte_unused,
2843                    const struct rte_flow_action actions[] __rte_unused,
2844                    bool external __rte_unused,
2845                    int hairpin __rte_unused,
2846                    struct rte_flow_error *error)
2847 {
2848         return rte_flow_error_set(error, ENOTSUP,
2849                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
2850 }
2851
2852 static struct mlx5_flow *
2853 flow_null_prepare(struct rte_eth_dev *dev __rte_unused,
2854                   const struct rte_flow_attr *attr __rte_unused,
2855                   const struct rte_flow_item items[] __rte_unused,
2856                   const struct rte_flow_action actions[] __rte_unused,
2857                   struct rte_flow_error *error)
2858 {
2859         rte_flow_error_set(error, ENOTSUP,
2860                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
2861         return NULL;
2862 }
2863
2864 static int
2865 flow_null_translate(struct rte_eth_dev *dev __rte_unused,
2866                     struct mlx5_flow *dev_flow __rte_unused,
2867                     const struct rte_flow_attr *attr __rte_unused,
2868                     const struct rte_flow_item items[] __rte_unused,
2869                     const struct rte_flow_action actions[] __rte_unused,
2870                     struct rte_flow_error *error)
2871 {
2872         return rte_flow_error_set(error, ENOTSUP,
2873                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
2874 }
2875
2876 static int
2877 flow_null_apply(struct rte_eth_dev *dev __rte_unused,
2878                 struct rte_flow *flow __rte_unused,
2879                 struct rte_flow_error *error)
2880 {
2881         return rte_flow_error_set(error, ENOTSUP,
2882                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
2883 }
2884
2885 static void
2886 flow_null_remove(struct rte_eth_dev *dev __rte_unused,
2887                  struct rte_flow *flow __rte_unused)
2888 {
2889 }
2890
2891 static void
2892 flow_null_destroy(struct rte_eth_dev *dev __rte_unused,
2893                   struct rte_flow *flow __rte_unused)
2894 {
2895 }
2896
2897 static int
2898 flow_null_query(struct rte_eth_dev *dev __rte_unused,
2899                 struct rte_flow *flow __rte_unused,
2900                 const struct rte_flow_action *actions __rte_unused,
2901                 void *data __rte_unused,
2902                 struct rte_flow_error *error)
2903 {
2904         return rte_flow_error_set(error, ENOTSUP,
2905                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
2906 }
2907
2908 static int
2909 flow_null_sync_domain(struct rte_eth_dev *dev __rte_unused,
2910                       uint32_t domains __rte_unused,
2911                       uint32_t flags __rte_unused)
2912 {
2913         return 0;
2914 }
2915
2916 /* Void driver to protect from null pointer reference. */
2917 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops = {
2918         .validate = flow_null_validate,
2919         .prepare = flow_null_prepare,
2920         .translate = flow_null_translate,
2921         .apply = flow_null_apply,
2922         .remove = flow_null_remove,
2923         .destroy = flow_null_destroy,
2924         .query = flow_null_query,
2925         .sync_domain = flow_null_sync_domain,
2926 };
2927
2928 /**
2929  * Select flow driver type according to flow attributes and device
2930  * configuration.
2931  *
2932  * @param[in] dev
2933  *   Pointer to the dev structure.
2934  * @param[in] attr
2935  *   Pointer to the flow attributes.
2936  *
2937  * @return
2938  *   flow driver type, MLX5_FLOW_TYPE_MAX otherwise.
2939  */
2940 static enum mlx5_flow_drv_type
2941 flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr)
2942 {
2943         struct mlx5_priv *priv = dev->data->dev_private;
2944         /* The OS can determine first a specific flow type (DV, VERBS) */
2945         enum mlx5_flow_drv_type type = mlx5_flow_os_get_type();
2946
2947         if (type != MLX5_FLOW_TYPE_MAX)
2948                 return type;
2949         /* If no OS specific type - continue with DV/VERBS selection */
2950         if (attr->transfer && priv->config.dv_esw_en)
2951                 type = MLX5_FLOW_TYPE_DV;
2952         if (!attr->transfer)
2953                 type = priv->config.dv_flow_en ? MLX5_FLOW_TYPE_DV :
2954                                                  MLX5_FLOW_TYPE_VERBS;
2955         return type;
2956 }
2957
2958 #define flow_get_drv_ops(type) flow_drv_ops[type]
2959
2960 /**
2961  * Flow driver validation API. This abstracts calling driver specific functions.
2962  * The type of flow driver is determined according to flow attributes.
2963  *
2964  * @param[in] dev
2965  *   Pointer to the dev structure.
2966  * @param[in] attr
2967  *   Pointer to the flow attributes.
2968  * @param[in] items
2969  *   Pointer to the list of items.
2970  * @param[in] actions
2971  *   Pointer to the list of actions.
2972  * @param[in] external
2973  *   This flow rule is created by request external to PMD.
2974  * @param[in] hairpin
2975  *   Number of hairpin TX actions, 0 means classic flow.
2976  * @param[out] error
2977  *   Pointer to the error structure.
2978  *
2979  * @return
2980  *   0 on success, a negative errno value otherwise and rte_errno is set.
2981  */
2982 static inline int
2983 flow_drv_validate(struct rte_eth_dev *dev,
2984                   const struct rte_flow_attr *attr,
2985                   const struct rte_flow_item items[],
2986                   const struct rte_flow_action actions[],
2987                   bool external, int hairpin, struct rte_flow_error *error)
2988 {
2989         const struct mlx5_flow_driver_ops *fops;
2990         enum mlx5_flow_drv_type type = flow_get_drv_type(dev, attr);
2991
2992         fops = flow_get_drv_ops(type);
2993         return fops->validate(dev, attr, items, actions, external,
2994                               hairpin, error);
2995 }
2996
2997 /**
2998  * Flow driver preparation API. This abstracts calling driver specific
2999  * functions. Parent flow (rte_flow) should have driver type (drv_type). It
3000  * calculates the size of memory required for device flow, allocates the memory,
3001  * initializes the device flow and returns the pointer.
3002  *
3003  * @note
3004  *   This function initializes device flow structure such as dv or verbs in
3005  *   struct mlx5_flow. However, it is caller's responsibility to initialize the
3006  *   rest. For example, adding returning device flow to flow->dev_flow list and
3007  *   setting backward reference to the flow should be done out of this function.
3008  *   layers field is not filled either.
3009  *
3010  * @param[in] dev
3011  *   Pointer to the dev structure.
3012  * @param[in] attr
3013  *   Pointer to the flow attributes.
3014  * @param[in] items
3015  *   Pointer to the list of items.
3016  * @param[in] actions
3017  *   Pointer to the list of actions.
3018  * @param[in] flow_idx
3019  *   This memory pool index to the flow.
3020  * @param[out] error
3021  *   Pointer to the error structure.
3022  *
3023  * @return
3024  *   Pointer to device flow on success, otherwise NULL and rte_errno is set.
3025  */
3026 static inline struct mlx5_flow *
3027 flow_drv_prepare(struct rte_eth_dev *dev,
3028                  const struct rte_flow *flow,
3029                  const struct rte_flow_attr *attr,
3030                  const struct rte_flow_item items[],
3031                  const struct rte_flow_action actions[],
3032                  uint32_t flow_idx,
3033                  struct rte_flow_error *error)
3034 {
3035         const struct mlx5_flow_driver_ops *fops;
3036         enum mlx5_flow_drv_type type = flow->drv_type;
3037         struct mlx5_flow *mlx5_flow = NULL;
3038
3039         MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
3040         fops = flow_get_drv_ops(type);
3041         mlx5_flow = fops->prepare(dev, attr, items, actions, error);
3042         if (mlx5_flow)
3043                 mlx5_flow->flow_idx = flow_idx;
3044         return mlx5_flow;
3045 }
3046
3047 /**
3048  * Flow driver translation API. This abstracts calling driver specific
3049  * functions. Parent flow (rte_flow) should have driver type (drv_type). It
3050  * translates a generic flow into a driver flow. flow_drv_prepare() must
3051  * precede.
3052  *
3053  * @note
3054  *   dev_flow->layers could be filled as a result of parsing during translation
3055  *   if needed by flow_drv_apply(). dev_flow->flow->actions can also be filled
3056  *   if necessary. As a flow can have multiple dev_flows by RSS flow expansion,
3057  *   flow->actions could be overwritten even though all the expanded dev_flows
3058  *   have the same actions.
3059  *
3060  * @param[in] dev
3061  *   Pointer to the rte dev structure.
3062  * @param[in, out] dev_flow
3063  *   Pointer to the mlx5 flow.
3064  * @param[in] attr
3065  *   Pointer to the flow attributes.
3066  * @param[in] items
3067  *   Pointer to the list of items.
3068  * @param[in] actions
3069  *   Pointer to the list of actions.
3070  * @param[out] error
3071  *   Pointer to the error structure.
3072  *
3073  * @return
3074  *   0 on success, a negative errno value otherwise and rte_errno is set.
3075  */
3076 static inline int
3077 flow_drv_translate(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow,
3078                    const struct rte_flow_attr *attr,
3079                    const struct rte_flow_item items[],
3080                    const struct rte_flow_action actions[],
3081                    struct rte_flow_error *error)
3082 {
3083         const struct mlx5_flow_driver_ops *fops;
3084         enum mlx5_flow_drv_type type = dev_flow->flow->drv_type;
3085
3086         MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
3087         fops = flow_get_drv_ops(type);
3088         return fops->translate(dev, dev_flow, attr, items, actions, error);
3089 }
3090
3091 /**
3092  * Flow driver apply API. This abstracts calling driver specific functions.
3093  * Parent flow (rte_flow) should have driver type (drv_type). It applies
3094  * translated driver flows on to device. flow_drv_translate() must precede.
3095  *
3096  * @param[in] dev
3097  *   Pointer to Ethernet device structure.
3098  * @param[in, out] flow
3099  *   Pointer to flow structure.
3100  * @param[out] error
3101  *   Pointer to error structure.
3102  *
3103  * @return
3104  *   0 on success, a negative errno value otherwise and rte_errno is set.
3105  */
3106 static inline int
3107 flow_drv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
3108                struct rte_flow_error *error)
3109 {
3110         const struct mlx5_flow_driver_ops *fops;
3111         enum mlx5_flow_drv_type type = flow->drv_type;
3112
3113         MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
3114         fops = flow_get_drv_ops(type);
3115         return fops->apply(dev, flow, error);
3116 }
3117
3118 /**
3119  * Flow driver destroy API. This abstracts calling driver specific functions.
3120  * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow
3121  * on device and releases resources of the flow.
3122  *
3123  * @param[in] dev
3124  *   Pointer to Ethernet device.
3125  * @param[in, out] flow
3126  *   Pointer to flow structure.
3127  */
3128 static inline void
3129 flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
3130 {
3131         const struct mlx5_flow_driver_ops *fops;
3132         enum mlx5_flow_drv_type type = flow->drv_type;
3133
3134         flow_mreg_split_qrss_release(dev, flow);
3135         MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
3136         fops = flow_get_drv_ops(type);
3137         fops->destroy(dev, flow);
3138 }
3139
3140 /**
3141  * Get RSS action from the action list.
3142  *
3143  * @param[in] actions
3144  *   Pointer to the list of actions.
3145  *
3146  * @return
3147  *   Pointer to the RSS action if exist, else return NULL.
3148  */
3149 static const struct rte_flow_action_rss*
3150 flow_get_rss_action(const struct rte_flow_action actions[])
3151 {
3152         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3153                 switch (actions->type) {
3154                 case RTE_FLOW_ACTION_TYPE_RSS:
3155                         return (const struct rte_flow_action_rss *)
3156                                actions->conf;
3157                 default:
3158                         break;
3159                 }
3160         }
3161         return NULL;
3162 }
3163
3164 /**
3165  * Get ASO age action by index.
3166  *
3167  * @param[in] dev
3168  *   Pointer to the Ethernet device structure.
3169  * @param[in] age_idx
3170  *   Index to the ASO age action.
3171  *
3172  * @return
3173  *   The specified ASO age action.
3174  */
3175 struct mlx5_aso_age_action*
3176 flow_aso_age_get_by_idx(struct rte_eth_dev *dev, uint32_t age_idx)
3177 {
3178         uint16_t pool_idx = age_idx & UINT16_MAX;
3179         uint16_t offset = (age_idx >> 16) & UINT16_MAX;
3180         struct mlx5_priv *priv = dev->data->dev_private;
3181         struct mlx5_aso_age_mng *mng = priv->sh->aso_age_mng;
3182         struct mlx5_aso_age_pool *pool = mng->pools[pool_idx];
3183
3184         return &pool->actions[offset - 1];
3185 }
3186
3187 /* maps shared action to translated non shared in some actions array */
3188 struct mlx5_translated_shared_action {
3189         struct rte_flow_shared_action *action; /**< Shared action */
3190         int index; /**< Index in related array of rte_flow_action */
3191 };
3192
3193 /**
3194  * Translates actions of type RTE_FLOW_ACTION_TYPE_SHARED to related
3195  * non shared action if translation possible.
3196  * This functionality used to run same execution path for both shared & non
3197  * shared actions on flow create. All necessary preparations for shared
3198  * action handling should be preformed on *shared* actions list returned
3199  * from this call.
3200  *
3201  * @param[in] dev
3202  *   Pointer to Ethernet device.
3203  * @param[in] actions
3204  *   List of actions to translate.
3205  * @param[out] shared
3206  *   List to store translated shared actions.
3207  * @param[in, out] shared_n
3208  *   Size of *shared* array. On return should be updated with number of shared
3209  *   actions retrieved from the *actions* list.
3210  * @param[out] translated_actions
3211  *   List of actions where all shared actions were translated to non shared
3212  *   if possible. NULL if no translation took place.
3213  * @param[out] error
3214  *   Pointer to the error structure.
3215  *
3216  * @return
3217  *   0 on success, a negative errno value otherwise and rte_errno is set.
3218  */
3219 static int
3220 flow_shared_actions_translate(struct rte_eth_dev *dev,
3221                               const struct rte_flow_action actions[],
3222                               struct mlx5_translated_shared_action *shared,
3223                               int *shared_n,
3224                               struct rte_flow_action **translated_actions,
3225                               struct rte_flow_error *error)
3226 {
3227         struct mlx5_priv *priv = dev->data->dev_private;
3228         struct rte_flow_action *translated = NULL;
3229         size_t actions_size;
3230         int n;
3231         int copied_n = 0;
3232         struct mlx5_translated_shared_action *shared_end = NULL;
3233
3234         for (n = 0; actions[n].type != RTE_FLOW_ACTION_TYPE_END; n++) {
3235                 if (actions[n].type != RTE_FLOW_ACTION_TYPE_SHARED)
3236                         continue;
3237                 if (copied_n == *shared_n) {
3238                         return rte_flow_error_set
3239                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_NUM,
3240                                  NULL, "too many shared actions");
3241                 }
3242                 rte_memcpy(&shared[copied_n].action, &actions[n].conf,
3243                            sizeof(actions[n].conf));
3244                 shared[copied_n].index = n;
3245                 copied_n++;
3246         }
3247         n++;
3248         *shared_n = copied_n;
3249         if (!copied_n)
3250                 return 0;
3251         actions_size = sizeof(struct rte_flow_action) * n;
3252         translated = mlx5_malloc(MLX5_MEM_ZERO, actions_size, 0, SOCKET_ID_ANY);
3253         if (!translated) {
3254                 rte_errno = ENOMEM;
3255                 return -ENOMEM;
3256         }
3257         memcpy(translated, actions, actions_size);
3258         for (shared_end = shared + copied_n; shared < shared_end; shared++) {
3259                 struct mlx5_shared_action_rss *shared_rss;
3260                 uint32_t act_idx = (uint32_t)(uintptr_t)shared->action;
3261                 uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET;
3262                 uint32_t idx = act_idx & ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET)
3263                                                                            - 1);
3264
3265                 switch (type) {
3266                 case MLX5_SHARED_ACTION_TYPE_RSS:
3267                         shared_rss = mlx5_ipool_get
3268                           (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS], idx);
3269                         translated[shared->index].type =
3270                                 RTE_FLOW_ACTION_TYPE_RSS;
3271                         translated[shared->index].conf =
3272                                 &shared_rss->origin;
3273                         break;
3274                 case MLX5_SHARED_ACTION_TYPE_AGE:
3275                         if (priv->sh->flow_hit_aso_en) {
3276                                 translated[shared->index].type =
3277                                         (enum rte_flow_action_type)
3278                                         MLX5_RTE_FLOW_ACTION_TYPE_AGE;
3279                                 translated[shared->index].conf =
3280                                                          (void *)(uintptr_t)idx;
3281                                 break;
3282                         }
3283                         /* Fall-through */
3284                 default:
3285                         mlx5_free(translated);
3286                         return rte_flow_error_set
3287                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
3288                                  NULL, "invalid shared action type");
3289                 }
3290         }
3291         *translated_actions = translated;
3292         return 0;
3293 }
3294
3295 /**
3296  * Get Shared RSS action from the action list.
3297  *
3298  * @param[in] dev
3299  *   Pointer to Ethernet device.
3300  * @param[in] shared
3301  *   Pointer to the list of actions.
3302  * @param[in] shared_n
3303  *   Actions list length.
3304  *
3305  * @return
3306  *   The MLX5 RSS action ID if exists, otherwise return 0.
3307  */
3308 static uint32_t
3309 flow_get_shared_rss_action(struct rte_eth_dev *dev,
3310                            struct mlx5_translated_shared_action *shared,
3311                            int shared_n)
3312 {
3313         struct mlx5_translated_shared_action *shared_end;
3314         struct mlx5_priv *priv = dev->data->dev_private;
3315         struct mlx5_shared_action_rss *shared_rss;
3316
3317
3318         for (shared_end = shared + shared_n; shared < shared_end; shared++) {
3319                 uint32_t act_idx = (uint32_t)(uintptr_t)shared->action;
3320                 uint32_t type = act_idx >> MLX5_SHARED_ACTION_TYPE_OFFSET;
3321                 uint32_t idx = act_idx &
3322                                    ((1u << MLX5_SHARED_ACTION_TYPE_OFFSET) - 1);
3323                 switch (type) {
3324                 case MLX5_SHARED_ACTION_TYPE_RSS:
3325                         shared_rss = mlx5_ipool_get
3326                                 (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
3327                                                                            idx);
3328                         __atomic_add_fetch(&shared_rss->refcnt, 1,
3329                                            __ATOMIC_RELAXED);
3330                         return idx;
3331                 default:
3332                         break;
3333                 }
3334         }
3335         return 0;
3336 }
3337
3338 static unsigned int
3339 find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level)
3340 {
3341         const struct rte_flow_item *item;
3342         unsigned int has_vlan = 0;
3343
3344         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3345                 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
3346                         has_vlan = 1;
3347                         break;
3348                 }
3349         }
3350         if (has_vlan)
3351                 return rss_level < 2 ? MLX5_EXPANSION_ROOT_ETH_VLAN :
3352                                        MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN;
3353         return rss_level < 2 ? MLX5_EXPANSION_ROOT :
3354                                MLX5_EXPANSION_ROOT_OUTER;
3355 }
3356
3357 /**
3358  *  Get layer flags from the prefix flow.
3359  *
3360  *  Some flows may be split to several subflows, the prefix subflow gets the
3361  *  match items and the suffix sub flow gets the actions.
3362  *  Some actions need the user defined match item flags to get the detail for
3363  *  the action.
3364  *  This function helps the suffix flow to get the item layer flags from prefix
3365  *  subflow.
3366  *
3367  * @param[in] dev_flow
3368  *   Pointer the created preifx subflow.
3369  *
3370  * @return
3371  *   The layers get from prefix subflow.
3372  */
3373 static inline uint64_t
3374 flow_get_prefix_layer_flags(struct mlx5_flow *dev_flow)
3375 {
3376         uint64_t layers = 0;
3377
3378         /*
3379          * Layers bits could be localization, but usually the compiler will
3380          * help to do the optimization work for source code.
3381          * If no decap actions, use the layers directly.
3382          */
3383         if (!(dev_flow->act_flags & MLX5_FLOW_ACTION_DECAP))
3384                 return dev_flow->handle->layers;
3385         /* Convert L3 layers with decap action. */
3386         if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV4)
3387                 layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3388         else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV6)
3389                 layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3390         /* Convert L4 layers with decap action.  */
3391         if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_TCP)
3392                 layers |= MLX5_FLOW_LAYER_OUTER_L4_TCP;
3393         else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_UDP)
3394                 layers |= MLX5_FLOW_LAYER_OUTER_L4_UDP;
3395         return layers;
3396 }
3397
3398 /**
3399  * Get metadata split action information.
3400  *
3401  * @param[in] actions
3402  *   Pointer to the list of actions.
3403  * @param[out] qrss
3404  *   Pointer to the return pointer.
3405  * @param[out] qrss_type
3406  *   Pointer to the action type to return. RTE_FLOW_ACTION_TYPE_END is returned
3407  *   if no QUEUE/RSS is found.
3408  * @param[out] encap_idx
3409  *   Pointer to the index of the encap action if exists, otherwise the last
3410  *   action index.
3411  *
3412  * @return
3413  *   Total number of actions.
3414  */
3415 static int
3416 flow_parse_metadata_split_actions_info(const struct rte_flow_action actions[],
3417                                        const struct rte_flow_action **qrss,
3418                                        int *encap_idx)
3419 {
3420         const struct rte_flow_action_raw_encap *raw_encap;
3421         int actions_n = 0;
3422         int raw_decap_idx = -1;
3423
3424         *encap_idx = -1;
3425         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3426                 switch (actions->type) {
3427                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3428                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3429                         *encap_idx = actions_n;
3430                         break;
3431                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
3432                         raw_decap_idx = actions_n;
3433                         break;
3434                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3435                         raw_encap = actions->conf;
3436                         if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
3437                                 *encap_idx = raw_decap_idx != -1 ?
3438                                                       raw_decap_idx : actions_n;
3439                         break;
3440                 case RTE_FLOW_ACTION_TYPE_QUEUE:
3441                 case RTE_FLOW_ACTION_TYPE_RSS:
3442                         *qrss = actions;
3443                         break;
3444                 default:
3445                         break;
3446                 }
3447                 actions_n++;
3448         }
3449         if (*encap_idx == -1)
3450                 *encap_idx = actions_n;
3451         /* Count RTE_FLOW_ACTION_TYPE_END. */
3452         return actions_n + 1;
3453 }
3454
3455 /**
3456  * Check meter action from the action list.
3457  *
3458  * @param[in] actions
3459  *   Pointer to the list of actions.
3460  * @param[out] mtr
3461  *   Pointer to the meter exist flag.
3462  *
3463  * @return
3464  *   Total number of actions.
3465  */
3466 static int
3467 flow_check_meter_action(const struct rte_flow_action actions[], uint32_t *mtr)
3468 {
3469         int actions_n = 0;
3470
3471         MLX5_ASSERT(mtr);
3472         *mtr = 0;
3473         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3474                 switch (actions->type) {
3475                 case RTE_FLOW_ACTION_TYPE_METER:
3476                         *mtr = 1;
3477                         break;
3478                 default:
3479                         break;
3480                 }
3481                 actions_n++;
3482         }
3483         /* Count RTE_FLOW_ACTION_TYPE_END. */
3484         return actions_n + 1;
3485 }
3486
3487 /**
3488  * Check if the flow should be split due to hairpin.
3489  * The reason for the split is that in current HW we can't
3490  * support encap and push-vlan on Rx, so if a flow contains
3491  * these actions we move it to Tx.
3492  *
3493  * @param dev
3494  *   Pointer to Ethernet device.
3495  * @param[in] attr
3496  *   Flow rule attributes.
3497  * @param[in] actions
3498  *   Associated actions (list terminated by the END action).
3499  *
3500  * @return
3501  *   > 0 the number of actions and the flow should be split,
3502  *   0 when no split required.
3503  */
3504 static int
3505 flow_check_hairpin_split(struct rte_eth_dev *dev,
3506                          const struct rte_flow_attr *attr,
3507                          const struct rte_flow_action actions[])
3508 {
3509         int queue_action = 0;
3510         int action_n = 0;
3511         int split = 0;
3512         const struct rte_flow_action_queue *queue;
3513         const struct rte_flow_action_rss *rss;
3514         const struct rte_flow_action_raw_encap *raw_encap;
3515         const struct rte_eth_hairpin_conf *conf;
3516
3517         if (!attr->ingress)
3518                 return 0;
3519         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3520                 switch (actions->type) {
3521                 case RTE_FLOW_ACTION_TYPE_QUEUE:
3522                         queue = actions->conf;
3523                         if (queue == NULL)
3524                                 return 0;
3525                         conf = mlx5_rxq_get_hairpin_conf(dev, queue->index);
3526                         if (conf != NULL && !!conf->tx_explicit)
3527                                 return 0;
3528                         queue_action = 1;
3529                         action_n++;
3530                         break;
3531                 case RTE_FLOW_ACTION_TYPE_RSS:
3532                         rss = actions->conf;
3533                         if (rss == NULL || rss->queue_num == 0)
3534                                 return 0;
3535                         conf = mlx5_rxq_get_hairpin_conf(dev, rss->queue[0]);
3536                         if (conf != NULL && !!conf->tx_explicit)
3537                                 return 0;
3538                         queue_action = 1;
3539                         action_n++;
3540                         break;
3541                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3542                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3543                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
3544                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
3545                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
3546                         split++;
3547                         action_n++;
3548                         break;
3549                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3550                         raw_encap = actions->conf;
3551                         if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
3552                                 split++;
3553                         action_n++;
3554                         break;
3555                 default:
3556                         action_n++;
3557                         break;
3558                 }
3559         }
3560         if (split && queue_action)
3561                 return action_n;
3562         return 0;
3563 }
3564
3565 /* Declare flow create/destroy prototype in advance. */
3566 static uint32_t
3567 flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
3568                  const struct rte_flow_attr *attr,
3569                  const struct rte_flow_item items[],
3570                  const struct rte_flow_action actions[],
3571                  bool external, struct rte_flow_error *error);
3572
3573 static void
3574 flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,
3575                   uint32_t flow_idx);
3576
3577 int
3578 flow_dv_mreg_match_cb(struct mlx5_hlist *list __rte_unused,
3579                       struct mlx5_hlist_entry *entry,
3580                       uint64_t key, void *cb_ctx __rte_unused)
3581 {
3582         struct mlx5_flow_mreg_copy_resource *mcp_res =
3583                 container_of(entry, typeof(*mcp_res), hlist_ent);
3584
3585         return mcp_res->mark_id != key;
3586 }
3587
3588 struct mlx5_hlist_entry *
3589 flow_dv_mreg_create_cb(struct mlx5_hlist *list, uint64_t key,
3590                        void *cb_ctx)
3591 {
3592         struct rte_eth_dev *dev = list->ctx;
3593         struct mlx5_priv *priv = dev->data->dev_private;
3594         struct mlx5_flow_cb_ctx *ctx = cb_ctx;
3595         struct mlx5_flow_mreg_copy_resource *mcp_res;
3596         struct rte_flow_error *error = ctx->error;
3597         uint32_t idx = 0;
3598         int ret;
3599         uint32_t mark_id = key;
3600         struct rte_flow_attr attr = {
3601                 .group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
3602                 .ingress = 1,
3603         };
3604         struct mlx5_rte_flow_item_tag tag_spec = {
3605                 .data = mark_id,
3606         };
3607         struct rte_flow_item items[] = {
3608                 [1] = { .type = RTE_FLOW_ITEM_TYPE_END, },
3609         };
3610         struct rte_flow_action_mark ftag = {
3611                 .id = mark_id,
3612         };
3613         struct mlx5_flow_action_copy_mreg cp_mreg = {
3614                 .dst = REG_B,
3615                 .src = REG_NON,
3616         };
3617         struct rte_flow_action_jump jump = {
3618                 .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP,
3619         };
3620         struct rte_flow_action actions[] = {
3621                 [3] = { .type = RTE_FLOW_ACTION_TYPE_END, },
3622         };
3623
3624         /* Fill the register fileds in the flow. */
3625         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3626         if (ret < 0)
3627                 return NULL;
3628         tag_spec.id = ret;
3629         ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error);
3630         if (ret < 0)
3631                 return NULL;
3632         cp_mreg.src = ret;
3633         /* Provide the full width of FLAG specific value. */
3634         if (mark_id == (priv->sh->dv_regc0_mask & MLX5_FLOW_MARK_DEFAULT))
3635                 tag_spec.data = MLX5_FLOW_MARK_DEFAULT;
3636         /* Build a new flow. */
3637         if (mark_id != MLX5_DEFAULT_COPY_ID) {
3638                 items[0] = (struct rte_flow_item){
3639                         .type = (enum rte_flow_item_type)
3640                                 MLX5_RTE_FLOW_ITEM_TYPE_TAG,
3641                         .spec = &tag_spec,
3642                 };
3643                 items[1] = (struct rte_flow_item){
3644                         .type = RTE_FLOW_ITEM_TYPE_END,
3645                 };
3646                 actions[0] = (struct rte_flow_action){
3647                         .type = (enum rte_flow_action_type)
3648                                 MLX5_RTE_FLOW_ACTION_TYPE_MARK,
3649                         .conf = &ftag,
3650                 };
3651                 actions[1] = (struct rte_flow_action){
3652                         .type = (enum rte_flow_action_type)
3653                                 MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
3654                         .conf = &cp_mreg,
3655                 };
3656                 actions[2] = (struct rte_flow_action){
3657                         .type = RTE_FLOW_ACTION_TYPE_JUMP,
3658                         .conf = &jump,
3659                 };
3660                 actions[3] = (struct rte_flow_action){
3661                         .type = RTE_FLOW_ACTION_TYPE_END,
3662                 };
3663         } else {
3664                 /* Default rule, wildcard match. */
3665                 attr.priority = MLX5_FLOW_PRIO_RSVD;
3666                 items[0] = (struct rte_flow_item){
3667                         .type = RTE_FLOW_ITEM_TYPE_END,
3668                 };
3669                 actions[0] = (struct rte_flow_action){
3670                         .type = (enum rte_flow_action_type)
3671                                 MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
3672                         .conf = &cp_mreg,
3673                 };
3674                 actions[1] = (struct rte_flow_action){
3675                         .type = RTE_FLOW_ACTION_TYPE_JUMP,
3676                         .conf = &jump,
3677                 };
3678                 actions[2] = (struct rte_flow_action){
3679                         .type = RTE_FLOW_ACTION_TYPE_END,
3680                 };
3681         }
3682         /* Build a new entry. */
3683         mcp_res = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MCP], &idx);
3684         if (!mcp_res) {
3685                 rte_errno = ENOMEM;
3686                 return NULL;
3687         }
3688         mcp_res->idx = idx;
3689         mcp_res->mark_id = mark_id;
3690         /*
3691          * The copy Flows are not included in any list. There
3692          * ones are referenced from other Flows and can not
3693          * be applied, removed, deleted in ardbitrary order
3694          * by list traversing.
3695          */
3696         mcp_res->rix_flow = flow_list_create(dev, NULL, &attr, items,
3697                                          actions, false, error);
3698         if (!mcp_res->rix_flow) {
3699                 mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], idx);
3700                 return NULL;
3701         }
3702         return &mcp_res->hlist_ent;
3703 }
3704
3705 /**
3706  * Add a flow of copying flow metadata registers in RX_CP_TBL.
3707  *
3708  * As mark_id is unique, if there's already a registered flow for the mark_id,
3709  * return by increasing the reference counter of the resource. Otherwise, create
3710  * the resource (mcp_res) and flow.
3711  *
3712  * Flow looks like,
3713  *   - If ingress port is ANY and reg_c[1] is mark_id,
3714  *     flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL.
3715  *
3716  * For default flow (zero mark_id), flow is like,
3717  *   - If ingress port is ANY,
3718  *     reg_b := reg_c[0] and jump to RX_ACT_TBL.
3719  *
3720  * @param dev
3721  *   Pointer to Ethernet device.
3722  * @param mark_id
3723  *   ID of MARK action, zero means default flow for META.
3724  * @param[out] error
3725  *   Perform verbose error reporting if not NULL.
3726  *
3727  * @return
3728  *   Associated resource on success, NULL otherwise and rte_errno is set.
3729  */
3730 static struct mlx5_flow_mreg_copy_resource *
3731 flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id,
3732                           struct rte_flow_error *error)
3733 {
3734         struct mlx5_priv *priv = dev->data->dev_private;
3735         struct mlx5_hlist_entry *entry;
3736         struct mlx5_flow_cb_ctx ctx = {
3737                 .dev = dev,
3738                 .error = error,
3739         };
3740
3741         /* Check if already registered. */
3742         MLX5_ASSERT(priv->mreg_cp_tbl);
3743         entry = mlx5_hlist_register(priv->mreg_cp_tbl, mark_id, &ctx);
3744         if (!entry)
3745                 return NULL;
3746         return container_of(entry, struct mlx5_flow_mreg_copy_resource,
3747                             hlist_ent);
3748 }
3749
3750 void
3751 flow_dv_mreg_remove_cb(struct mlx5_hlist *list, struct mlx5_hlist_entry *entry)
3752 {
3753         struct mlx5_flow_mreg_copy_resource *mcp_res =
3754                 container_of(entry, typeof(*mcp_res), hlist_ent);
3755         struct rte_eth_dev *dev = list->ctx;
3756         struct mlx5_priv *priv = dev->data->dev_private;
3757
3758         MLX5_ASSERT(mcp_res->rix_flow);
3759         flow_list_destroy(dev, NULL, mcp_res->rix_flow);
3760         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx);
3761 }
3762
3763 /**
3764  * Release flow in RX_CP_TBL.
3765  *
3766  * @param dev
3767  *   Pointer to Ethernet device.
3768  * @flow
3769  *   Parent flow for wich copying is provided.
3770  */
3771 static void
3772 flow_mreg_del_copy_action(struct rte_eth_dev *dev,
3773                           struct rte_flow *flow)
3774 {
3775         struct mlx5_flow_mreg_copy_resource *mcp_res;
3776         struct mlx5_priv *priv = dev->data->dev_private;
3777
3778         if (!flow->rix_mreg_copy)
3779                 return;
3780         mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP],
3781                                  flow->rix_mreg_copy);
3782         if (!mcp_res || !priv->mreg_cp_tbl)
3783                 return;
3784         MLX5_ASSERT(mcp_res->rix_flow);
3785         mlx5_hlist_unregister(priv->mreg_cp_tbl, &mcp_res->hlist_ent);
3786         flow->rix_mreg_copy = 0;
3787 }
3788
3789 /**
3790  * Remove the default copy action from RX_CP_TBL.
3791  *
3792  * This functions is called in the mlx5_dev_start(). No thread safe
3793  * is guaranteed.
3794  *
3795  * @param dev
3796  *   Pointer to Ethernet device.
3797  */
3798 static void
3799 flow_mreg_del_default_copy_action(struct rte_eth_dev *dev)
3800 {
3801         struct mlx5_hlist_entry *entry;
3802         struct mlx5_priv *priv = dev->data->dev_private;
3803
3804         /* Check if default flow is registered. */
3805         if (!priv->mreg_cp_tbl)
3806                 return;
3807         entry = mlx5_hlist_lookup(priv->mreg_cp_tbl,
3808                                   MLX5_DEFAULT_COPY_ID, NULL);
3809         if (!entry)
3810                 return;
3811         mlx5_hlist_unregister(priv->mreg_cp_tbl, entry);
3812 }
3813
3814 /**
3815  * Add the default copy action in in RX_CP_TBL.
3816  *
3817  * This functions is called in the mlx5_dev_start(). No thread safe
3818  * is guaranteed.
3819  *
3820  * @param dev
3821  *   Pointer to Ethernet device.
3822  * @param[out] error
3823  *   Perform verbose error reporting if not NULL.
3824  *
3825  * @return
3826  *   0 for success, negative value otherwise and rte_errno is set.
3827  */
3828 static int
3829 flow_mreg_add_default_copy_action(struct rte_eth_dev *dev,
3830                                   struct rte_flow_error *error)
3831 {
3832         struct mlx5_priv *priv = dev->data->dev_private;
3833         struct mlx5_flow_mreg_copy_resource *mcp_res;
3834
3835         /* Check whether extensive metadata feature is engaged. */
3836         if (!priv->config.dv_flow_en ||
3837             priv->config.dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
3838             !mlx5_flow_ext_mreg_supported(dev) ||
3839             !priv->sh->dv_regc0_mask)
3840                 return 0;
3841         /*
3842          * Add default mreg copy flow may be called multiple time, but
3843          * only be called once in stop. Avoid register it twice.
3844          */
3845         if (mlx5_hlist_lookup(priv->mreg_cp_tbl, MLX5_DEFAULT_COPY_ID, NULL))
3846                 return 0;
3847         mcp_res = flow_mreg_add_copy_action(dev, MLX5_DEFAULT_COPY_ID, error);
3848         if (!mcp_res)
3849                 return -rte_errno;
3850         return 0;
3851 }
3852
3853 /**
3854  * Add a flow of copying flow metadata registers in RX_CP_TBL.
3855  *
3856  * All the flow having Q/RSS action should be split by
3857  * flow_mreg_split_qrss_prep() to pass by RX_CP_TBL. A flow in the RX_CP_TBL
3858  * performs the following,
3859  *   - CQE->flow_tag := reg_c[1] (MARK)
3860  *   - CQE->flow_table_metadata (reg_b) := reg_c[0] (META)
3861  * As CQE's flow_tag is not a register, it can't be simply copied from reg_c[1]
3862  * but there should be a flow per each MARK ID set by MARK action.
3863  *
3864  * For the aforementioned reason, if there's a MARK action in flow's action
3865  * list, a corresponding flow should be added to the RX_CP_TBL in order to copy
3866  * the MARK ID to CQE's flow_tag like,
3867  *   - If reg_c[1] is mark_id,
3868  *     flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL.
3869  *
3870  * For SET_META action which stores value in reg_c[0], as the destination is
3871  * also a flow metadata register (reg_b), adding a default flow is enough. Zero
3872  * MARK ID means the default flow. The default flow looks like,
3873  *   - For all flow, reg_b := reg_c[0] and jump to RX_ACT_TBL.
3874  *
3875  * @param dev
3876  *   Pointer to Ethernet device.
3877  * @param flow
3878  *   Pointer to flow structure.
3879  * @param[in] actions
3880  *   Pointer to the list of actions.
3881  * @param[out] error
3882  *   Perform verbose error reporting if not NULL.
3883  *
3884  * @return
3885  *   0 on success, negative value otherwise and rte_errno is set.
3886  */
3887 static int
3888 flow_mreg_update_copy_table(struct rte_eth_dev *dev,
3889                             struct rte_flow *flow,
3890                             const struct rte_flow_action *actions,
3891                             struct rte_flow_error *error)
3892 {
3893         struct mlx5_priv *priv = dev->data->dev_private;
3894         struct mlx5_dev_config *config = &priv->config;
3895         struct mlx5_flow_mreg_copy_resource *mcp_res;
3896         const struct rte_flow_action_mark *mark;
3897
3898         /* Check whether extensive metadata feature is engaged. */
3899         if (!config->dv_flow_en ||
3900             config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
3901             !mlx5_flow_ext_mreg_supported(dev) ||
3902             !priv->sh->dv_regc0_mask)
3903                 return 0;
3904         /* Find MARK action. */
3905         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3906                 switch (actions->type) {
3907                 case RTE_FLOW_ACTION_TYPE_FLAG:
3908                         mcp_res = flow_mreg_add_copy_action
3909                                 (dev, MLX5_FLOW_MARK_DEFAULT, error);
3910                         if (!mcp_res)
3911                                 return -rte_errno;
3912                         flow->rix_mreg_copy = mcp_res->idx;
3913                         return 0;
3914                 case RTE_FLOW_ACTION_TYPE_MARK:
3915                         mark = (const struct rte_flow_action_mark *)
3916                                 actions->conf;
3917                         mcp_res =
3918                                 flow_mreg_add_copy_action(dev, mark->id, error);
3919                         if (!mcp_res)
3920                                 return -rte_errno;
3921                         flow->rix_mreg_copy = mcp_res->idx;
3922                         return 0;
3923                 default:
3924                         break;
3925                 }
3926         }
3927         return 0;
3928 }
3929
3930 #define MLX5_MAX_SPLIT_ACTIONS 24
3931 #define MLX5_MAX_SPLIT_ITEMS 24
3932
3933 /**
3934  * Split the hairpin flow.
3935  * Since HW can't support encap and push-vlan on Rx, we move these
3936  * actions to Tx.
3937  * If the count action is after the encap then we also
3938  * move the count action. in this case the count will also measure
3939  * the outer bytes.
3940  *
3941  * @param dev
3942  *   Pointer to Ethernet device.
3943  * @param[in] actions
3944  *   Associated actions (list terminated by the END action).
3945  * @param[out] actions_rx
3946  *   Rx flow actions.
3947  * @param[out] actions_tx
3948  *   Tx flow actions..
3949  * @param[out] pattern_tx
3950  *   The pattern items for the Tx flow.
3951  * @param[out] flow_id
3952  *   The flow ID connected to this flow.
3953  *
3954  * @return
3955  *   0 on success.
3956  */
3957 static int
3958 flow_hairpin_split(struct rte_eth_dev *dev,
3959                    const struct rte_flow_action actions[],
3960                    struct rte_flow_action actions_rx[],
3961                    struct rte_flow_action actions_tx[],
3962                    struct rte_flow_item pattern_tx[],
3963                    uint32_t flow_id)
3964 {
3965         const struct rte_flow_action_raw_encap *raw_encap;
3966         const struct rte_flow_action_raw_decap *raw_decap;
3967         struct mlx5_rte_flow_action_set_tag *set_tag;
3968         struct rte_flow_action *tag_action;
3969         struct mlx5_rte_flow_item_tag *tag_item;
3970         struct rte_flow_item *item;
3971         char *addr;
3972         int encap = 0;
3973
3974         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3975                 switch (actions->type) {
3976                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3977                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3978                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
3979                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
3980                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
3981                         rte_memcpy(actions_tx, actions,
3982                                sizeof(struct rte_flow_action));
3983                         actions_tx++;
3984                         break;
3985                 case RTE_FLOW_ACTION_TYPE_COUNT:
3986                         if (encap) {
3987                                 rte_memcpy(actions_tx, actions,
3988                                            sizeof(struct rte_flow_action));
3989                                 actions_tx++;
3990                         } else {
3991                                 rte_memcpy(actions_rx, actions,
3992                                            sizeof(struct rte_flow_action));
3993                                 actions_rx++;
3994                         }
3995                         break;
3996                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3997                         raw_encap = actions->conf;
3998                         if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE) {
3999                                 memcpy(actions_tx, actions,
4000                                        sizeof(struct rte_flow_action));
4001                                 actions_tx++;
4002                                 encap = 1;
4003                         } else {
4004                                 rte_memcpy(actions_rx, actions,
4005                                            sizeof(struct rte_flow_action));
4006                                 actions_rx++;
4007                         }
4008                         break;
4009                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
4010                         raw_decap = actions->conf;
4011                         if (raw_decap->size < MLX5_ENCAPSULATION_DECISION_SIZE) {
4012                                 memcpy(actions_tx, actions,
4013                                        sizeof(struct rte_flow_action));
4014                                 actions_tx++;
4015                         } else {
4016                                 rte_memcpy(actions_rx, actions,
4017                                            sizeof(struct rte_flow_action));
4018                                 actions_rx++;
4019                         }
4020                         break;
4021                 default:
4022                         rte_memcpy(actions_rx, actions,
4023                                    sizeof(struct rte_flow_action));
4024                         actions_rx++;
4025                         break;
4026                 }
4027         }
4028         /* Add set meta action and end action for the Rx flow. */
4029         tag_action = actions_rx;
4030         tag_action->type = (enum rte_flow_action_type)
4031                            MLX5_RTE_FLOW_ACTION_TYPE_TAG;
4032         actions_rx++;
4033         rte_memcpy(actions_rx, actions, sizeof(struct rte_flow_action));
4034         actions_rx++;
4035         set_tag = (void *)actions_rx;
4036         set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_RX, 0, NULL);
4037         MLX5_ASSERT(set_tag->id > REG_NON);
4038         set_tag->data = flow_id;
4039         tag_action->conf = set_tag;
4040         /* Create Tx item list. */
4041         rte_memcpy(actions_tx, actions, sizeof(struct rte_flow_action));
4042         addr = (void *)&pattern_tx[2];
4043         item = pattern_tx;
4044         item->type = (enum rte_flow_item_type)
4045                      MLX5_RTE_FLOW_ITEM_TYPE_TAG;
4046         tag_item = (void *)addr;
4047         tag_item->data = flow_id;
4048         tag_item->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_TX, 0, NULL);
4049         MLX5_ASSERT(set_tag->id > REG_NON);
4050         item->spec = tag_item;
4051         addr += sizeof(struct mlx5_rte_flow_item_tag);
4052         tag_item = (void *)addr;
4053         tag_item->data = UINT32_MAX;
4054         tag_item->id = UINT16_MAX;
4055         item->mask = tag_item;
4056         item->last = NULL;
4057         item++;
4058         item->type = RTE_FLOW_ITEM_TYPE_END;
4059         return 0;
4060 }
4061
4062 /**
4063  * The last stage of splitting chain, just creates the subflow
4064  * without any modification.
4065  *
4066  * @param[in] dev
4067  *   Pointer to Ethernet device.
4068  * @param[in] flow
4069  *   Parent flow structure pointer.
4070  * @param[in, out] sub_flow
4071  *   Pointer to return the created subflow, may be NULL.
4072  * @param[in] attr
4073  *   Flow rule attributes.
4074  * @param[in] items
4075  *   Pattern specification (list terminated by the END pattern item).
4076  * @param[in] actions
4077  *   Associated actions (list terminated by the END action).
4078  * @param[in] flow_split_info
4079  *   Pointer to flow split info structure.
4080  * @param[out] error
4081  *   Perform verbose error reporting if not NULL.
4082  * @return
4083  *   0 on success, negative value otherwise
4084  */
4085 static int
4086 flow_create_split_inner(struct rte_eth_dev *dev,
4087                         struct rte_flow *flow,
4088                         struct mlx5_flow **sub_flow,
4089                         const struct rte_flow_attr *attr,
4090                         const struct rte_flow_item items[],
4091                         const struct rte_flow_action actions[],
4092                         struct mlx5_flow_split_info *flow_split_info,
4093                         struct rte_flow_error *error)
4094 {
4095         struct mlx5_flow *dev_flow;
4096
4097         dev_flow = flow_drv_prepare(dev, flow, attr, items, actions,
4098                                     flow_split_info->flow_idx, error);
4099         if (!dev_flow)
4100                 return -rte_errno;
4101         dev_flow->flow = flow;
4102         dev_flow->external = flow_split_info->external;
4103         dev_flow->skip_scale = flow_split_info->skip_scale;
4104         /* Subflow object was created, we must include one in the list. */
4105         SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
4106                       dev_flow->handle, next);
4107         /*
4108          * If dev_flow is as one of the suffix flow, some actions in suffix
4109          * flow may need some user defined item layer flags, and pass the
4110          * Metadate rxq mark flag to suffix flow as well.
4111          */
4112         if (flow_split_info->prefix_layers)
4113                 dev_flow->handle->layers = flow_split_info->prefix_layers;
4114         if (flow_split_info->prefix_mark)
4115                 dev_flow->handle->mark = 1;
4116         if (sub_flow)
4117                 *sub_flow = dev_flow;
4118         return flow_drv_translate(dev, dev_flow, attr, items, actions, error);
4119 }
4120
4121 /**
4122  * Split the meter flow.
4123  *
4124  * As meter flow will split to three sub flow, other than meter
4125  * action, the other actions make sense to only meter accepts
4126  * the packet. If it need to be dropped, no other additional
4127  * actions should be take.
4128  *
4129  * One kind of special action which decapsulates the L3 tunnel
4130  * header will be in the prefix sub flow, as not to take the
4131  * L3 tunnel header into account.
4132  *
4133  * @param dev
4134  *   Pointer to Ethernet device.
4135  * @param[in] items
4136  *   Pattern specification (list terminated by the END pattern item).
4137  * @param[out] sfx_items
4138  *   Suffix flow match items (list terminated by the END pattern item).
4139  * @param[in] actions
4140  *   Associated actions (list terminated by the END action).
4141  * @param[out] actions_sfx
4142  *   Suffix flow actions.
4143  * @param[out] actions_pre
4144  *   Prefix flow actions.
4145  * @param[out] pattern_sfx
4146  *   The pattern items for the suffix flow.
4147  * @param[out] tag_sfx
4148  *   Pointer to suffix flow tag.
4149  *
4150  * @return
4151  *   0 on success.
4152  */
4153 static int
4154 flow_meter_split_prep(struct rte_eth_dev *dev,
4155                  const struct rte_flow_item items[],
4156                  struct rte_flow_item sfx_items[],
4157                  const struct rte_flow_action actions[],
4158                  struct rte_flow_action actions_sfx[],
4159                  struct rte_flow_action actions_pre[])
4160 {
4161         struct mlx5_priv *priv = dev->data->dev_private;
4162         struct rte_flow_action *tag_action = NULL;
4163         struct rte_flow_item *tag_item;
4164         struct mlx5_rte_flow_action_set_tag *set_tag;
4165         struct rte_flow_error error;
4166         const struct rte_flow_action_raw_encap *raw_encap;
4167         const struct rte_flow_action_raw_decap *raw_decap;
4168         struct mlx5_rte_flow_item_tag *tag_spec;
4169         struct mlx5_rte_flow_item_tag *tag_mask;
4170         uint32_t tag_id = 0;
4171         bool copy_vlan = false;
4172
4173         /* Prepare the actions for prefix and suffix flow. */
4174         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
4175                 struct rte_flow_action **action_cur = NULL;
4176
4177                 switch (actions->type) {
4178                 case RTE_FLOW_ACTION_TYPE_METER:
4179                         /* Add the extra tag action first. */
4180                         tag_action = actions_pre;
4181                         tag_action->type = (enum rte_flow_action_type)
4182                                            MLX5_RTE_FLOW_ACTION_TYPE_TAG;
4183                         actions_pre++;
4184                         action_cur = &actions_pre;
4185                         break;
4186                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
4187                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
4188                         action_cur = &actions_pre;
4189                         break;
4190                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
4191                         raw_encap = actions->conf;
4192                         if (raw_encap->size < MLX5_ENCAPSULATION_DECISION_SIZE)
4193                                 action_cur = &actions_pre;
4194                         break;
4195                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
4196                         raw_decap = actions->conf;
4197                         if (raw_decap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
4198                                 action_cur = &actions_pre;
4199                         break;
4200                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
4201                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
4202                         copy_vlan = true;
4203                         break;
4204                 default:
4205                         break;
4206                 }
4207                 if (!action_cur)
4208                         action_cur = &actions_sfx;
4209                 memcpy(*action_cur, actions, sizeof(struct rte_flow_action));
4210                 (*action_cur)++;
4211         }
4212         /* Add end action to the actions. */
4213         actions_sfx->type = RTE_FLOW_ACTION_TYPE_END;
4214         actions_pre->type = RTE_FLOW_ACTION_TYPE_END;
4215         actions_pre++;
4216         /* Set the tag. */
4217         set_tag = (void *)actions_pre;
4218         set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_MTR_SFX, 0, &error);
4219         mlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
4220                           &tag_id);
4221         if (tag_id >= (1 << (sizeof(tag_id) * 8 - MLX5_MTR_COLOR_BITS))) {
4222                 DRV_LOG(ERR, "Port %u meter flow id exceed max limit.",
4223                         dev->data->port_id);
4224                 mlx5_ipool_free(priv->sh->ipool
4225                                 [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], tag_id);
4226                 return 0;
4227         } else if (!tag_id) {
4228                 return 0;
4229         }
4230         set_tag->data = tag_id << MLX5_MTR_COLOR_BITS;
4231         assert(tag_action);
4232         tag_action->conf = set_tag;
4233         /* Prepare the suffix subflow items. */
4234         tag_item = sfx_items++;
4235         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
4236                 int item_type = items->type;
4237
4238                 switch (item_type) {
4239                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
4240                         memcpy(sfx_items, items, sizeof(*sfx_items));
4241                         sfx_items++;
4242                         break;
4243                 case RTE_FLOW_ITEM_TYPE_VLAN:
4244                         if (copy_vlan) {
4245                                 memcpy(sfx_items, items, sizeof(*sfx_items));
4246                                 /*
4247                                  * Convert to internal match item, it is used
4248                                  * for vlan push and set vid.
4249                                  */
4250                                 sfx_items->type = (enum rte_flow_item_type)
4251                                                   MLX5_RTE_FLOW_ITEM_TYPE_VLAN;
4252                                 sfx_items++;
4253                         }
4254                         break;
4255                 default:
4256                         break;
4257                 }
4258         }
4259         sfx_items->type = RTE_FLOW_ITEM_TYPE_END;
4260         sfx_items++;
4261         tag_spec = (struct mlx5_rte_flow_item_tag *)sfx_items;
4262         tag_spec->data = tag_id << MLX5_MTR_COLOR_BITS;
4263         tag_spec->id = mlx5_flow_get_reg_id(dev, MLX5_MTR_SFX, 0, &error);
4264         tag_mask = tag_spec + 1;
4265         tag_mask->data = 0xffffff00;
4266         tag_item->type = (enum rte_flow_item_type)
4267                          MLX5_RTE_FLOW_ITEM_TYPE_TAG;
4268         tag_item->spec = tag_spec;
4269         tag_item->last = NULL;
4270         tag_item->mask = tag_mask;
4271         return tag_id;
4272 }
4273
4274 /**
4275  * Split action list having QUEUE/RSS for metadata register copy.
4276  *
4277  * Once Q/RSS action is detected in user's action list, the flow action
4278  * should be split in order to copy metadata registers, which will happen in
4279  * RX_CP_TBL like,
4280  *   - CQE->flow_tag := reg_c[1] (MARK)
4281  *   - CQE->flow_table_metadata (reg_b) := reg_c[0] (META)
4282  * The Q/RSS action will be performed on RX_ACT_TBL after passing by RX_CP_TBL.
4283  * This is because the last action of each flow must be a terminal action
4284  * (QUEUE, RSS or DROP).
4285  *
4286  * Flow ID must be allocated to identify actions in the RX_ACT_TBL and it is
4287  * stored and kept in the mlx5_flow structure per each sub_flow.
4288  *
4289  * The Q/RSS action is replaced with,
4290  *   - SET_TAG, setting the allocated flow ID to reg_c[2].
4291  * And the following JUMP action is added at the end,
4292  *   - JUMP, to RX_CP_TBL.
4293  *
4294  * A flow to perform remained Q/RSS action will be created in RX_ACT_TBL by
4295  * flow_create_split_metadata() routine. The flow will look like,
4296  *   - If flow ID matches (reg_c[2]), perform Q/RSS.
4297  *
4298  * @param dev
4299  *   Pointer to Ethernet device.
4300  * @param[out] split_actions
4301  *   Pointer to store split actions to jump to CP_TBL.
4302  * @param[in] actions
4303  *   Pointer to the list of original flow actions.
4304  * @param[in] qrss
4305  *   Pointer to the Q/RSS action.
4306  * @param[in] actions_n
4307  *   Number of original actions.
4308  * @param[out] error
4309  *   Perform verbose error reporting if not NULL.
4310  *
4311  * @return
4312  *   non-zero unique flow_id on success, otherwise 0 and
4313  *   error/rte_error are set.
4314  */
4315 static uint32_t
4316 flow_mreg_split_qrss_prep(struct rte_eth_dev *dev,
4317                           struct rte_flow_action *split_actions,
4318                           const struct rte_flow_action *actions,
4319                           const struct rte_flow_action *qrss,
4320                           int actions_n, struct rte_flow_error *error)
4321 {
4322         struct mlx5_priv *priv = dev->data->dev_private;
4323         struct mlx5_rte_flow_action_set_tag *set_tag;
4324         struct rte_flow_action_jump *jump;
4325         const int qrss_idx = qrss - actions;
4326         uint32_t flow_id = 0;
4327         int ret = 0;
4328
4329         /*
4330          * Given actions will be split
4331          * - Replace QUEUE/RSS action with SET_TAG to set flow ID.
4332          * - Add jump to mreg CP_TBL.
4333          * As a result, there will be one more action.
4334          */
4335         ++actions_n;
4336         memcpy(split_actions, actions, sizeof(*split_actions) * actions_n);
4337         set_tag = (void *)(split_actions + actions_n);
4338         /*
4339          * If tag action is not set to void(it means we are not the meter
4340          * suffix flow), add the tag action. Since meter suffix flow already
4341          * has the tag added.
4342          */
4343         if (split_actions[qrss_idx].type != RTE_FLOW_ACTION_TYPE_VOID) {
4344                 /*
4345                  * Allocate the new subflow ID. This one is unique within
4346                  * device and not shared with representors. Otherwise,
4347                  * we would have to resolve multi-thread access synch
4348                  * issue. Each flow on the shared device is appended
4349                  * with source vport identifier, so the resulting
4350                  * flows will be unique in the shared (by master and
4351                  * representors) domain even if they have coinciding
4352                  * IDs.
4353                  */
4354                 mlx5_ipool_malloc(priv->sh->ipool
4355                                   [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], &flow_id);
4356                 if (!flow_id)
4357                         return rte_flow_error_set(error, ENOMEM,
4358                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4359                                                   NULL, "can't allocate id "
4360                                                   "for split Q/RSS subflow");
4361                 /* Internal SET_TAG action to set flow ID. */
4362                 *set_tag = (struct mlx5_rte_flow_action_set_tag){
4363                         .data = flow_id,
4364                 };
4365                 ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0, error);
4366                 if (ret < 0)
4367                         return ret;
4368                 set_tag->id = ret;
4369                 /* Construct new actions array. */
4370                 /* Replace QUEUE/RSS action. */
4371                 split_actions[qrss_idx] = (struct rte_flow_action){
4372                         .type = (enum rte_flow_action_type)
4373                                 MLX5_RTE_FLOW_ACTION_TYPE_TAG,
4374                         .conf = set_tag,
4375                 };
4376         }
4377         /* JUMP action to jump to mreg copy table (CP_TBL). */
4378         jump = (void *)(set_tag + 1);
4379         *jump = (struct rte_flow_action_jump){
4380                 .group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
4381         };
4382         split_actions[actions_n - 2] = (struct rte_flow_action){
4383                 .type = RTE_FLOW_ACTION_TYPE_JUMP,
4384                 .conf = jump,
4385         };
4386         split_actions[actions_n - 1] = (struct rte_flow_action){
4387                 .type = RTE_FLOW_ACTION_TYPE_END,
4388         };
4389         return flow_id;
4390 }
4391
4392 /**
4393  * Extend the given action list for Tx metadata copy.
4394  *
4395  * Copy the given action list to the ext_actions and add flow metadata register
4396  * copy action in order to copy reg_a set by WQE to reg_c[0].
4397  *
4398  * @param[out] ext_actions
4399  *   Pointer to the extended action list.
4400  * @param[in] actions
4401  *   Pointer to the list of actions.
4402  * @param[in] actions_n
4403  *   Number of actions in the list.
4404  * @param[out] error
4405  *   Perform verbose error reporting if not NULL.
4406  * @param[in] encap_idx
4407  *   The encap action inndex.
4408  *
4409  * @return
4410  *   0 on success, negative value otherwise
4411  */
4412 static int
4413 flow_mreg_tx_copy_prep(struct rte_eth_dev *dev,
4414                        struct rte_flow_action *ext_actions,
4415                        const struct rte_flow_action *actions,
4416                        int actions_n, struct rte_flow_error *error,
4417                        int encap_idx)
4418 {
4419         struct mlx5_flow_action_copy_mreg *cp_mreg =
4420                 (struct mlx5_flow_action_copy_mreg *)
4421                         (ext_actions + actions_n + 1);
4422         int ret;
4423
4424         ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error);
4425         if (ret < 0)
4426                 return ret;
4427         cp_mreg->dst = ret;
4428         ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_TX, 0, error);
4429         if (ret < 0)
4430                 return ret;
4431         cp_mreg->src = ret;
4432         if (encap_idx != 0)
4433                 memcpy(ext_actions, actions, sizeof(*ext_actions) * encap_idx);
4434         if (encap_idx == actions_n - 1) {
4435                 ext_actions[actions_n - 1] = (struct rte_flow_action){
4436                         .type = (enum rte_flow_action_type)
4437                                 MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
4438                         .conf = cp_mreg,
4439                 };
4440                 ext_actions[actions_n] = (struct rte_flow_action){
4441                         .type = RTE_FLOW_ACTION_TYPE_END,
4442                 };
4443         } else {
4444                 ext_actions[encap_idx] = (struct rte_flow_action){
4445                         .type = (enum rte_flow_action_type)
4446                                 MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
4447                         .conf = cp_mreg,
4448                 };
4449                 memcpy(ext_actions + encap_idx + 1, actions + encap_idx,
4450                                 sizeof(*ext_actions) * (actions_n - encap_idx));
4451         }
4452         return 0;
4453 }
4454
4455 /**
4456  * Check the match action from the action list.
4457  *
4458  * @param[in] actions
4459  *   Pointer to the list of actions.
4460  * @param[in] attr
4461  *   Flow rule attributes.
4462  * @param[in] action
4463  *   The action to be check if exist.
4464  * @param[out] match_action_pos
4465  *   Pointer to the position of the matched action if exists, otherwise is -1.
4466  * @param[out] qrss_action_pos
4467  *   Pointer to the position of the Queue/RSS action if exists, otherwise is -1.
4468  *
4469  * @return
4470  *   > 0 the total number of actions.
4471  *   0 if not found match action in action list.
4472  */
4473 static int
4474 flow_check_match_action(const struct rte_flow_action actions[],
4475                         const struct rte_flow_attr *attr,
4476                         enum rte_flow_action_type action,
4477                         int *match_action_pos, int *qrss_action_pos)
4478 {
4479         const struct rte_flow_action_sample *sample;
4480         int actions_n = 0;
4481         int jump_flag = 0;
4482         uint32_t ratio = 0;
4483         int sub_type = 0;
4484         int flag = 0;
4485
4486         *match_action_pos = -1;
4487         *qrss_action_pos = -1;
4488         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
4489                 if (actions->type == action) {
4490                         flag = 1;
4491                         *match_action_pos = actions_n;
4492                 }
4493                 if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE ||
4494                     actions->type == RTE_FLOW_ACTION_TYPE_RSS)
4495                         *qrss_action_pos = actions_n;
4496                 if (actions->type == RTE_FLOW_ACTION_TYPE_JUMP)
4497                         jump_flag = 1;
4498                 if (actions->type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
4499                         sample = actions->conf;
4500                         ratio = sample->ratio;
4501                         sub_type = ((const struct rte_flow_action *)
4502                                         (sample->actions))->type;
4503                 }
4504                 actions_n++;
4505         }
4506         if (flag && action == RTE_FLOW_ACTION_TYPE_SAMPLE && attr->transfer) {
4507                 if (ratio == 1) {
4508                         /* JUMP Action not support for Mirroring;
4509                          * Mirroring support multi-destination;
4510                          */
4511                         if (!jump_flag && sub_type != RTE_FLOW_ACTION_TYPE_END)
4512                                 flag = 0;
4513                 }
4514         }
4515         /* Count RTE_FLOW_ACTION_TYPE_END. */
4516         return flag ? actions_n + 1 : 0;
4517 }
4518
4519 #define SAMPLE_SUFFIX_ITEM 2
4520
4521 /**
4522  * Split the sample flow.
4523  *
4524  * As sample flow will split to two sub flow, sample flow with
4525  * sample action, the other actions will move to new suffix flow.
4526  *
4527  * Also add unique tag id with tag action in the sample flow,
4528  * the same tag id will be as match in the suffix flow.
4529  *
4530  * @param dev
4531  *   Pointer to Ethernet device.
4532  * @param[in] fdb_tx
4533  *   FDB egress flow flag.
4534  * @param[out] sfx_items
4535  *   Suffix flow match items (list terminated by the END pattern item).
4536  * @param[in] actions
4537  *   Associated actions (list terminated by the END action).
4538  * @param[out] actions_sfx
4539  *   Suffix flow actions.
4540  * @param[out] actions_pre
4541  *   Prefix flow actions.
4542  * @param[in] actions_n
4543  *  The total number of actions.
4544  * @param[in] sample_action_pos
4545  *   The sample action position.
4546  * @param[in] qrss_action_pos
4547  *   The Queue/RSS action position.
4548  * @param[out] error
4549  *   Perform verbose error reporting if not NULL.
4550  *
4551  * @return
4552  *   0 on success, or unique flow_id, a negative errno value
4553  *   otherwise and rte_errno is set.
4554  */
4555 static int
4556 flow_sample_split_prep(struct rte_eth_dev *dev,
4557                        uint32_t fdb_tx,
4558                        struct rte_flow_item sfx_items[],
4559                        const struct rte_flow_action actions[],
4560                        struct rte_flow_action actions_sfx[],
4561                        struct rte_flow_action actions_pre[],
4562                        int actions_n,
4563                        int sample_action_pos,
4564                        int qrss_action_pos,
4565                        struct rte_flow_error *error)
4566 {
4567         struct mlx5_priv *priv = dev->data->dev_private;
4568         struct mlx5_rte_flow_action_set_tag *set_tag;
4569         struct mlx5_rte_flow_item_tag *tag_spec;
4570         struct mlx5_rte_flow_item_tag *tag_mask;
4571         uint32_t tag_id = 0;
4572         int index;
4573         int ret;
4574
4575         if (sample_action_pos < 0)
4576                 return rte_flow_error_set(error, EINVAL,
4577                                           RTE_FLOW_ERROR_TYPE_ACTION,
4578                                           NULL, "invalid position of sample "
4579                                           "action in list");
4580         if (!fdb_tx) {
4581                 /* Prepare the prefix tag action. */
4582                 set_tag = (void *)(actions_pre + actions_n + 1);
4583                 ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, 0, error);
4584                 if (ret < 0)
4585                         return ret;
4586                 set_tag->id = ret;
4587                 mlx5_ipool_malloc(priv->sh->ipool
4588                                   [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], &tag_id);
4589                 set_tag->data = tag_id;
4590                 /* Prepare the suffix subflow items. */
4591                 tag_spec = (void *)(sfx_items + SAMPLE_SUFFIX_ITEM);
4592                 tag_spec->data = tag_id;
4593                 tag_spec->id = set_tag->id;
4594                 tag_mask = tag_spec + 1;
4595                 tag_mask->data = UINT32_MAX;
4596                 sfx_items[0] = (struct rte_flow_item){
4597                         .type = (enum rte_flow_item_type)
4598                                 MLX5_RTE_FLOW_ITEM_TYPE_TAG,
4599                         .spec = tag_spec,
4600                         .last = NULL,
4601                         .mask = tag_mask,
4602                 };
4603                 sfx_items[1] = (struct rte_flow_item){
4604                         .type = (enum rte_flow_item_type)
4605                                 RTE_FLOW_ITEM_TYPE_END,
4606                 };
4607         }
4608         /* Prepare the actions for prefix and suffix flow. */
4609         if (qrss_action_pos >= 0 && qrss_action_pos < sample_action_pos) {
4610                 index = qrss_action_pos;
4611                 /* Put the preceding the Queue/RSS action into prefix flow. */
4612                 if (index != 0)
4613                         memcpy(actions_pre, actions,
4614                                sizeof(struct rte_flow_action) * index);
4615                 /* Put others preceding the sample action into prefix flow. */
4616                 if (sample_action_pos > index + 1)
4617                         memcpy(actions_pre + index, actions + index + 1,
4618                                sizeof(struct rte_flow_action) *
4619                                (sample_action_pos - index - 1));
4620                 index = sample_action_pos - 1;
4621                 /* Put Queue/RSS action into Suffix flow. */
4622                 memcpy(actions_sfx, actions + qrss_action_pos,
4623                        sizeof(struct rte_flow_action));
4624                 actions_sfx++;
4625         } else {
4626                 index = sample_action_pos;
4627                 if (index != 0)
4628                         memcpy(actions_pre, actions,
4629                                sizeof(struct rte_flow_action) * index);
4630         }
4631         /* Add the extra tag action for NIC-RX and E-Switch ingress. */
4632         if (!fdb_tx) {
4633                 actions_pre[index++] =
4634                         (struct rte_flow_action){
4635                         .type = (enum rte_flow_action_type)
4636                                 MLX5_RTE_FLOW_ACTION_TYPE_TAG,
4637                         .conf = set_tag,
4638                 };
4639         }
4640         memcpy(actions_pre + index, actions + sample_action_pos,
4641                sizeof(struct rte_flow_action));
4642         index += 1;
4643         actions_pre[index] = (struct rte_flow_action){
4644                 .type = (enum rte_flow_action_type)
4645                         RTE_FLOW_ACTION_TYPE_END,
4646         };
4647         /* Put the actions after sample into Suffix flow. */
4648         memcpy(actions_sfx, actions + sample_action_pos + 1,
4649                sizeof(struct rte_flow_action) *
4650                (actions_n - sample_action_pos - 1));
4651         return tag_id;
4652 }
4653
4654 /**
4655  * The splitting for metadata feature.
4656  *
4657  * - Q/RSS action on NIC Rx should be split in order to pass by
4658  *   the mreg copy table (RX_CP_TBL) and then it jumps to the
4659  *   action table (RX_ACT_TBL) which has the split Q/RSS action.
4660  *
4661  * - All the actions on NIC Tx should have a mreg copy action to
4662  *   copy reg_a from WQE to reg_c[0].
4663  *
4664  * @param dev
4665  *   Pointer to Ethernet device.
4666  * @param[in] flow
4667  *   Parent flow structure pointer.
4668  * @param[in] attr
4669  *   Flow rule attributes.
4670  * @param[in] items
4671  *   Pattern specification (list terminated by the END pattern item).
4672  * @param[in] actions
4673  *   Associated actions (list terminated by the END action).
4674  * @param[in] flow_split_info
4675  *   Pointer to flow split info structure.
4676  * @param[out] error
4677  *   Perform verbose error reporting if not NULL.
4678  * @return
4679  *   0 on success, negative value otherwise
4680  */
4681 static int
4682 flow_create_split_metadata(struct rte_eth_dev *dev,
4683                            struct rte_flow *flow,
4684                            const struct rte_flow_attr *attr,
4685                            const struct rte_flow_item items[],
4686                            const struct rte_flow_action actions[],
4687                            struct mlx5_flow_split_info *flow_split_info,
4688                            struct rte_flow_error *error)
4689 {
4690         struct mlx5_priv *priv = dev->data->dev_private;
4691         struct mlx5_dev_config *config = &priv->config;
4692         const struct rte_flow_action *qrss = NULL;
4693         struct rte_flow_action *ext_actions = NULL;
4694         struct mlx5_flow *dev_flow = NULL;
4695         uint32_t qrss_id = 0;
4696         int mtr_sfx = 0;
4697         size_t act_size;
4698         int actions_n;
4699         int encap_idx;
4700         int ret;
4701
4702         /* Check whether extensive metadata feature is engaged. */
4703         if (!config->dv_flow_en ||
4704             config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
4705             !mlx5_flow_ext_mreg_supported(dev))
4706                 return flow_create_split_inner(dev, flow, NULL, attr, items,
4707                                                actions, flow_split_info, error);
4708         actions_n = flow_parse_metadata_split_actions_info(actions, &qrss,
4709                                                            &encap_idx);
4710         if (qrss) {
4711                 /* Exclude hairpin flows from splitting. */
4712                 if (qrss->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
4713                         const struct rte_flow_action_queue *queue;
4714
4715                         queue = qrss->conf;
4716                         if (mlx5_rxq_get_type(dev, queue->index) ==
4717                             MLX5_RXQ_TYPE_HAIRPIN)
4718                                 qrss = NULL;
4719                 } else if (qrss->type == RTE_FLOW_ACTION_TYPE_RSS) {
4720                         const struct rte_flow_action_rss *rss;
4721
4722                         rss = qrss->conf;
4723                         if (mlx5_rxq_get_type(dev, rss->queue[0]) ==
4724                             MLX5_RXQ_TYPE_HAIRPIN)
4725                                 qrss = NULL;
4726                 }
4727         }
4728         if (qrss) {
4729                 /* Check if it is in meter suffix table. */
4730                 mtr_sfx = attr->group == (attr->transfer ?
4731                           (MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) :
4732                           MLX5_FLOW_TABLE_LEVEL_SUFFIX);
4733                 /*
4734                  * Q/RSS action on NIC Rx should be split in order to pass by
4735                  * the mreg copy table (RX_CP_TBL) and then it jumps to the
4736                  * action table (RX_ACT_TBL) which has the split Q/RSS action.
4737                  */
4738                 act_size = sizeof(struct rte_flow_action) * (actions_n + 1) +
4739                            sizeof(struct rte_flow_action_set_tag) +
4740                            sizeof(struct rte_flow_action_jump);
4741                 ext_actions = mlx5_malloc(MLX5_MEM_ZERO, act_size, 0,
4742                                           SOCKET_ID_ANY);
4743                 if (!ext_actions)
4744                         return rte_flow_error_set(error, ENOMEM,
4745                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4746                                                   NULL, "no memory to split "
4747                                                   "metadata flow");
4748                 /*
4749                  * If we are the suffix flow of meter, tag already exist.
4750                  * Set the tag action to void.
4751                  */
4752                 if (mtr_sfx)
4753                         ext_actions[qrss - actions].type =
4754                                                 RTE_FLOW_ACTION_TYPE_VOID;
4755                 else
4756                         ext_actions[qrss - actions].type =
4757                                                 (enum rte_flow_action_type)
4758                                                 MLX5_RTE_FLOW_ACTION_TYPE_TAG;
4759                 /*
4760                  * Create the new actions list with removed Q/RSS action
4761                  * and appended set tag and jump to register copy table
4762                  * (RX_CP_TBL). We should preallocate unique tag ID here
4763                  * in advance, because it is needed for set tag action.
4764                  */
4765                 qrss_id = flow_mreg_split_qrss_prep(dev, ext_actions, actions,
4766                                                     qrss, actions_n, error);
4767                 if (!mtr_sfx && !qrss_id) {
4768                         ret = -rte_errno;
4769                         goto exit;
4770                 }
4771         } else if (attr->egress && !attr->transfer) {
4772                 /*
4773                  * All the actions on NIC Tx should have a metadata register
4774                  * copy action to copy reg_a from WQE to reg_c[meta]
4775                  */
4776                 act_size = sizeof(struct rte_flow_action) * (actions_n + 1) +
4777                            sizeof(struct mlx5_flow_action_copy_mreg);
4778                 ext_actions = mlx5_malloc(MLX5_MEM_ZERO, act_size, 0,
4779                                           SOCKET_ID_ANY);
4780                 if (!ext_actions)
4781                         return rte_flow_error_set(error, ENOMEM,
4782                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4783                                                   NULL, "no memory to split "
4784                                                   "metadata flow");
4785                 /* Create the action list appended with copy register. */
4786                 ret = flow_mreg_tx_copy_prep(dev, ext_actions, actions,
4787                                              actions_n, error, encap_idx);
4788                 if (ret < 0)
4789                         goto exit;
4790         }
4791         /* Add the unmodified original or prefix subflow. */
4792         ret = flow_create_split_inner(dev, flow, &dev_flow, attr,
4793                                       items, ext_actions ? ext_actions :
4794                                       actions, flow_split_info, error);
4795         if (ret < 0)
4796                 goto exit;
4797         MLX5_ASSERT(dev_flow);
4798         if (qrss) {
4799                 const struct rte_flow_attr q_attr = {
4800                         .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP,
4801                         .ingress = 1,
4802                 };
4803                 /* Internal PMD action to set register. */
4804                 struct mlx5_rte_flow_item_tag q_tag_spec = {
4805                         .data = qrss_id,
4806                         .id = REG_NON,
4807                 };
4808                 struct rte_flow_item q_items[] = {
4809                         {
4810                                 .type = (enum rte_flow_item_type)
4811                                         MLX5_RTE_FLOW_ITEM_TYPE_TAG,
4812                                 .spec = &q_tag_spec,
4813                                 .last = NULL,
4814                                 .mask = NULL,
4815                         },
4816                         {
4817                                 .type = RTE_FLOW_ITEM_TYPE_END,
4818                         },
4819                 };
4820                 struct rte_flow_action q_actions[] = {
4821                         {
4822                                 .type = qrss->type,
4823                                 .conf = qrss->conf,
4824                         },
4825                         {
4826                                 .type = RTE_FLOW_ACTION_TYPE_END,
4827                         },
4828                 };
4829                 uint64_t layers = flow_get_prefix_layer_flags(dev_flow);
4830
4831                 /*
4832                  * Configure the tag item only if there is no meter subflow.
4833                  * Since tag is already marked in the meter suffix subflow
4834                  * we can just use the meter suffix items as is.
4835                  */
4836                 if (qrss_id) {
4837                         /* Not meter subflow. */
4838                         MLX5_ASSERT(!mtr_sfx);
4839                         /*
4840                          * Put unique id in prefix flow due to it is destroyed
4841                          * after suffix flow and id will be freed after there
4842                          * is no actual flows with this id and identifier
4843                          * reallocation becomes possible (for example, for
4844                          * other flows in other threads).
4845                          */
4846                         dev_flow->handle->split_flow_id = qrss_id;
4847                         ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0,
4848                                                    error);
4849                         if (ret < 0)
4850                                 goto exit;
4851                         q_tag_spec.id = ret;
4852                 }
4853                 dev_flow = NULL;
4854                 /* Add suffix subflow to execute Q/RSS. */
4855                 flow_split_info->prefix_layers = layers;
4856                 flow_split_info->prefix_mark = 0;
4857                 ret = flow_create_split_inner(dev, flow, &dev_flow,
4858                                               &q_attr, mtr_sfx ? items :
4859                                               q_items, q_actions,
4860                                               flow_split_info, error);
4861                 if (ret < 0)
4862                         goto exit;
4863                 /* qrss ID should be freed if failed. */
4864                 qrss_id = 0;
4865                 MLX5_ASSERT(dev_flow);
4866         }
4867
4868 exit:
4869         /*
4870          * We do not destroy the partially created sub_flows in case of error.
4871          * These ones are included into parent flow list and will be destroyed
4872          * by flow_drv_destroy.
4873          */
4874         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
4875                         qrss_id);
4876         mlx5_free(ext_actions);
4877         return ret;
4878 }
4879
4880 /**
4881  * The splitting for meter feature.
4882  *
4883  * - The meter flow will be split to two flows as prefix and
4884  *   suffix flow. The packets make sense only it pass the prefix
4885  *   meter action.
4886  *
4887  * - Reg_C_5 is used for the packet to match betweend prefix and
4888  *   suffix flow.
4889  *
4890  * @param dev
4891  *   Pointer to Ethernet device.
4892  * @param[in] flow
4893  *   Parent flow structure pointer.
4894  * @param[in] attr
4895  *   Flow rule attributes.
4896  * @param[in] items
4897  *   Pattern specification (list terminated by the END pattern item).
4898  * @param[in] actions
4899  *   Associated actions (list terminated by the END action).
4900  * @param[in] flow_split_info
4901  *   Pointer to flow split info structure.
4902  * @param[out] error
4903  *   Perform verbose error reporting if not NULL.
4904  * @return
4905  *   0 on success, negative value otherwise
4906  */
4907 static int
4908 flow_create_split_meter(struct rte_eth_dev *dev,
4909                         struct rte_flow *flow,
4910                         const struct rte_flow_attr *attr,
4911                         const struct rte_flow_item items[],
4912                         const struct rte_flow_action actions[],
4913                         struct mlx5_flow_split_info *flow_split_info,
4914                         struct rte_flow_error *error)
4915 {
4916         struct mlx5_priv *priv = dev->data->dev_private;
4917         struct rte_flow_action *sfx_actions = NULL;
4918         struct rte_flow_action *pre_actions = NULL;
4919         struct rte_flow_item *sfx_items = NULL;
4920         struct mlx5_flow *dev_flow = NULL;
4921         struct rte_flow_attr sfx_attr = *attr;
4922         uint32_t mtr = 0;
4923         uint32_t mtr_tag_id = 0;
4924         size_t act_size;
4925         size_t item_size;
4926         int actions_n = 0;
4927         int ret;
4928
4929         if (priv->mtr_en)
4930                 actions_n = flow_check_meter_action(actions, &mtr);
4931         if (mtr) {
4932                 /* The five prefix actions: meter, decap, encap, tag, end. */
4933                 act_size = sizeof(struct rte_flow_action) * (actions_n + 5) +
4934                            sizeof(struct mlx5_rte_flow_action_set_tag);
4935                 /* tag, vlan, port id, end. */
4936 #define METER_SUFFIX_ITEM 4
4937                 item_size = sizeof(struct rte_flow_item) * METER_SUFFIX_ITEM +
4938                             sizeof(struct mlx5_rte_flow_item_tag) * 2;
4939                 sfx_actions = mlx5_malloc(MLX5_MEM_ZERO, (act_size + item_size),
4940                                           0, SOCKET_ID_ANY);
4941                 if (!sfx_actions)
4942                         return rte_flow_error_set(error, ENOMEM,
4943                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4944                                                   NULL, "no memory to split "
4945                                                   "meter flow");
4946                 sfx_items = (struct rte_flow_item *)((char *)sfx_actions +
4947                              act_size);
4948                 pre_actions = sfx_actions + actions_n;
4949                 mtr_tag_id = flow_meter_split_prep(dev, items, sfx_items,
4950                                                    actions, sfx_actions,
4951                                                    pre_actions);
4952                 if (!mtr_tag_id) {
4953                         ret = -rte_errno;
4954                         goto exit;
4955                 }
4956                 /* Add the prefix subflow. */
4957                 flow_split_info->prefix_mark = 0;
4958                 ret = flow_create_split_inner(dev, flow, &dev_flow,
4959                                               attr, items, pre_actions,
4960                                               flow_split_info, error);
4961                 if (ret) {
4962                         ret = -rte_errno;
4963                         goto exit;
4964                 }
4965                 dev_flow->handle->split_flow_id = mtr_tag_id;
4966                 /* Setting the sfx group atrr. */
4967                 sfx_attr.group = sfx_attr.transfer ?
4968                                 (MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) :
4969                                  MLX5_FLOW_TABLE_LEVEL_SUFFIX;
4970                 flow_split_info->prefix_layers =
4971                                 flow_get_prefix_layer_flags(dev_flow);
4972                 flow_split_info->prefix_mark = dev_flow->handle->mark;
4973         }
4974         /* Add the prefix subflow. */
4975         ret = flow_create_split_metadata(dev, flow,
4976                                          &sfx_attr, sfx_items ?
4977                                          sfx_items : items,
4978                                          sfx_actions ? sfx_actions : actions,
4979                                          flow_split_info, error);
4980 exit:
4981         if (sfx_actions)
4982                 mlx5_free(sfx_actions);
4983         return ret;
4984 }
4985
4986 /**
4987  * The splitting for sample feature.
4988  *
4989  * Once Sample action is detected in the action list, the flow actions should
4990  * be split into prefix sub flow and suffix sub flow.
4991  *
4992  * The original items remain in the prefix sub flow, all actions preceding the
4993  * sample action and the sample action itself will be copied to the prefix
4994  * sub flow, the actions following the sample action will be copied to the
4995  * suffix sub flow, Queue action always be located in the suffix sub flow.
4996  *
4997  * In order to make the packet from prefix sub flow matches with suffix sub
4998  * flow, an extra tag action be added into prefix sub flow, and the suffix sub
4999  * flow uses tag item with the unique flow id.
5000  *
5001  * @param dev
5002  *   Pointer to Ethernet device.
5003  * @param[in] flow
5004  *   Parent flow structure pointer.
5005  * @param[in] attr
5006  *   Flow rule attributes.
5007  * @param[in] items
5008  *   Pattern specification (list terminated by the END pattern item).
5009  * @param[in] actions
5010  *   Associated actions (list terminated by the END action).
5011  * @param[in] flow_split_info
5012  *   Pointer to flow split info structure.
5013  * @param[out] error
5014  *   Perform verbose error reporting if not NULL.
5015  * @return
5016  *   0 on success, negative value otherwise
5017  */
5018 static int
5019 flow_create_split_sample(struct rte_eth_dev *dev,
5020                          struct rte_flow *flow,
5021                          const struct rte_flow_attr *attr,
5022                          const struct rte_flow_item items[],
5023                          const struct rte_flow_action actions[],
5024                          struct mlx5_flow_split_info *flow_split_info,
5025                          struct rte_flow_error *error)
5026 {
5027         struct mlx5_priv *priv = dev->data->dev_private;
5028         struct rte_flow_action *sfx_actions = NULL;
5029         struct rte_flow_action *pre_actions = NULL;
5030         struct rte_flow_item *sfx_items = NULL;
5031         struct mlx5_flow *dev_flow = NULL;
5032         struct rte_flow_attr sfx_attr = *attr;
5033 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
5034         struct mlx5_flow_dv_sample_resource *sample_res;
5035         struct mlx5_flow_tbl_data_entry *sfx_tbl_data;
5036         struct mlx5_flow_tbl_resource *sfx_tbl;
5037 #endif
5038         size_t act_size;
5039         size_t item_size;
5040         uint32_t fdb_tx = 0;
5041         int32_t tag_id = 0;
5042         int actions_n = 0;
5043         int sample_action_pos;
5044         int qrss_action_pos;
5045         int ret = 0;
5046
5047         if (priv->sampler_en)
5048                 actions_n = flow_check_match_action(actions, attr,
5049                                         RTE_FLOW_ACTION_TYPE_SAMPLE,
5050                                         &sample_action_pos, &qrss_action_pos);
5051         if (actions_n) {
5052                 /* The prefix actions must includes sample, tag, end. */
5053                 act_size = sizeof(struct rte_flow_action) * (actions_n * 2 + 1)
5054                            + sizeof(struct mlx5_rte_flow_action_set_tag);
5055                 item_size = sizeof(struct rte_flow_item) * SAMPLE_SUFFIX_ITEM +
5056                             sizeof(struct mlx5_rte_flow_item_tag) * 2;
5057                 sfx_actions = mlx5_malloc(MLX5_MEM_ZERO, (act_size +
5058                                           item_size), 0, SOCKET_ID_ANY);
5059                 if (!sfx_actions)
5060                         return rte_flow_error_set(error, ENOMEM,
5061                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5062                                                   NULL, "no memory to split "
5063                                                   "sample flow");
5064                 /* The representor_id is -1 for uplink. */
5065                 fdb_tx = (attr->transfer && priv->representor_id != -1);
5066                 if (!fdb_tx)
5067                         sfx_items = (struct rte_flow_item *)((char *)sfx_actions
5068                                         + act_size);
5069                 pre_actions = sfx_actions + actions_n;
5070                 tag_id = flow_sample_split_prep(dev, fdb_tx, sfx_items,
5071                                                 actions, sfx_actions,
5072                                                 pre_actions, actions_n,
5073                                                 sample_action_pos,
5074                                                 qrss_action_pos, error);
5075                 if (tag_id < 0 || (!fdb_tx && !tag_id)) {
5076                         ret = -rte_errno;
5077                         goto exit;
5078                 }
5079                 /* Add the prefix subflow. */
5080                 ret = flow_create_split_inner(dev, flow, &dev_flow, attr,
5081                                               items, pre_actions,
5082                                               flow_split_info, error);
5083                 if (ret) {
5084                         ret = -rte_errno;
5085                         goto exit;
5086                 }
5087                 dev_flow->handle->split_flow_id = tag_id;
5088 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
5089                 /* Set the sfx group attr. */
5090                 sample_res = (struct mlx5_flow_dv_sample_resource *)
5091                                         dev_flow->dv.sample_res;
5092                 sfx_tbl = (struct mlx5_flow_tbl_resource *)
5093                                         sample_res->normal_path_tbl;
5094                 sfx_tbl_data = container_of(sfx_tbl,
5095                                         struct mlx5_flow_tbl_data_entry, tbl);
5096                 sfx_attr.group = sfx_attr.transfer ?
5097                                         (sfx_tbl_data->table_id - 1) :
5098                                          sfx_tbl_data->table_id;
5099                 flow_split_info->prefix_layers =
5100                                 flow_get_prefix_layer_flags(dev_flow);
5101                 flow_split_info->prefix_mark = dev_flow->handle->mark;
5102                 /* Suffix group level already be scaled with factor, set
5103                  * skip_scale to 1 to avoid scale again in translation.
5104                  */
5105                 flow_split_info->skip_scale = 1;
5106 #endif
5107         }
5108         /* Add the suffix subflow. */
5109         ret = flow_create_split_meter(dev, flow, &sfx_attr,
5110                                       sfx_items ? sfx_items : items,
5111                                       sfx_actions ? sfx_actions : actions,
5112                                       flow_split_info, error);
5113 exit:
5114         if (sfx_actions)
5115                 mlx5_free(sfx_actions);
5116         return ret;
5117 }
5118
5119 /**
5120  * Split the flow to subflow set. The splitters might be linked
5121  * in the chain, like this:
5122  * flow_create_split_outer() calls:
5123  *   flow_create_split_meter() calls:
5124  *     flow_create_split_metadata(meter_subflow_0) calls:
5125  *       flow_create_split_inner(metadata_subflow_0)
5126  *       flow_create_split_inner(metadata_subflow_1)
5127  *       flow_create_split_inner(metadata_subflow_2)
5128  *     flow_create_split_metadata(meter_subflow_1) calls:
5129  *       flow_create_split_inner(metadata_subflow_0)
5130  *       flow_create_split_inner(metadata_subflow_1)
5131  *       flow_create_split_inner(metadata_subflow_2)
5132  *
5133  * This provide flexible way to add new levels of flow splitting.
5134  * The all of successfully created subflows are included to the
5135  * parent flow dev_flow list.
5136  *
5137  * @param dev
5138  *   Pointer to Ethernet device.
5139  * @param[in] flow
5140  *   Parent flow structure pointer.
5141  * @param[in] attr
5142  *   Flow rule attributes.
5143  * @param[in] items
5144  *   Pattern specification (list terminated by the END pattern item).
5145  * @param[in] actions
5146  *   Associated actions (list terminated by the END action).
5147  * @param[in] flow_split_info
5148  *   Pointer to flow split info structure.
5149  * @param[out] error
5150  *   Perform verbose error reporting if not NULL.
5151  * @return
5152  *   0 on success, negative value otherwise
5153  */
5154 static int
5155 flow_create_split_outer(struct rte_eth_dev *dev,
5156                         struct rte_flow *flow,
5157                         const struct rte_flow_attr *attr,
5158                         const struct rte_flow_item items[],
5159                         const struct rte_flow_action actions[],
5160                         struct mlx5_flow_split_info *flow_split_info,
5161                         struct rte_flow_error *error)
5162 {
5163         int ret;
5164
5165         ret = flow_create_split_sample(dev, flow, attr, items,
5166                                        actions, flow_split_info, error);
5167         MLX5_ASSERT(ret <= 0);
5168         return ret;
5169 }
5170
5171 static struct mlx5_flow_tunnel *
5172 flow_tunnel_from_rule(struct rte_eth_dev *dev,
5173                       const struct rte_flow_attr *attr,
5174                       const struct rte_flow_item items[],
5175                       const struct rte_flow_action actions[])
5176 {
5177         struct mlx5_flow_tunnel *tunnel;
5178
5179 #pragma GCC diagnostic push
5180 #pragma GCC diagnostic ignored "-Wcast-qual"
5181         if (is_flow_tunnel_match_rule(dev, attr, items, actions))
5182                 tunnel = (struct mlx5_flow_tunnel *)items[0].spec;
5183         else if (is_flow_tunnel_steer_rule(dev, attr, items, actions))
5184                 tunnel = (struct mlx5_flow_tunnel *)actions[0].conf;
5185         else
5186                 tunnel = NULL;
5187 #pragma GCC diagnostic pop
5188
5189         return tunnel;
5190 }
5191
5192 /**
5193  * Adjust flow RSS workspace if needed.
5194  *
5195  * @param wks
5196  *   Pointer to thread flow work space.
5197  * @param rss_desc
5198  *   Pointer to RSS descriptor.
5199  * @param[in] nrssq_num
5200  *   New RSS queue number.
5201  *
5202  * @return
5203  *   0 on success, -1 otherwise and rte_errno is set.
5204  */
5205 static int
5206 flow_rss_workspace_adjust(struct mlx5_flow_workspace *wks,
5207                           struct mlx5_flow_rss_desc *rss_desc,
5208                           uint32_t nrssq_num)
5209 {
5210         if (likely(nrssq_num <= wks->rssq_num))
5211                 return 0;
5212         rss_desc->queue = realloc(rss_desc->queue,
5213                           sizeof(*rss_desc->queue) * RTE_ALIGN(nrssq_num, 2));
5214         if (!rss_desc->queue) {
5215                 rte_errno = ENOMEM;
5216                 return -1;
5217         }
5218         wks->rssq_num = RTE_ALIGN(nrssq_num, 2);
5219         return 0;
5220 }
5221
5222 /**
5223  * Create a flow and add it to @p list.
5224  *
5225  * @param dev
5226  *   Pointer to Ethernet device.
5227  * @param list
5228  *   Pointer to a TAILQ flow list. If this parameter NULL,
5229  *   no list insertion occurred, flow is just created,
5230  *   this is caller's responsibility to track the
5231  *   created flow.
5232  * @param[in] attr
5233  *   Flow rule attributes.
5234  * @param[in] items
5235  *   Pattern specification (list terminated by the END pattern item).
5236  * @param[in] actions
5237  *   Associated actions (list terminated by the END action).
5238  * @param[in] external
5239  *   This flow rule is created by request external to PMD.
5240  * @param[out] error
5241  *   Perform verbose error reporting if not NULL.
5242  *
5243  * @return
5244  *   A flow index on success, 0 otherwise and rte_errno is set.
5245  */
5246 static uint32_t
5247 flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
5248                  const struct rte_flow_attr *attr,
5249                  const struct rte_flow_item items[],
5250                  const struct rte_flow_action original_actions[],
5251                  bool external, struct rte_flow_error *error)
5252 {
5253         struct mlx5_priv *priv = dev->data->dev_private;
5254         struct rte_flow *flow = NULL;
5255         struct mlx5_flow *dev_flow;
5256         const struct rte_flow_action_rss *rss;
5257         struct mlx5_translated_shared_action
5258                 shared_actions[MLX5_MAX_SHARED_ACTIONS];
5259         int shared_actions_n = MLX5_MAX_SHARED_ACTIONS;
5260         union {
5261                 struct mlx5_flow_expand_rss buf;
5262                 uint8_t buffer[2048];
5263         } expand_buffer;
5264         union {
5265                 struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS];
5266                 uint8_t buffer[2048];
5267         } actions_rx;
5268         union {
5269                 struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS];
5270                 uint8_t buffer[2048];
5271         } actions_hairpin_tx;
5272         union {
5273                 struct rte_flow_item items[MLX5_MAX_SPLIT_ITEMS];
5274                 uint8_t buffer[2048];
5275         } items_tx;
5276         struct mlx5_flow_expand_rss *buf = &expand_buffer.buf;
5277         struct mlx5_flow_rss_desc *rss_desc;
5278         const struct rte_flow_action *p_actions_rx;
5279         uint32_t i;
5280         uint32_t idx = 0;
5281         int hairpin_flow;
5282         struct rte_flow_attr attr_tx = { .priority = 0 };
5283         const struct rte_flow_action *actions;
5284         struct rte_flow_action *translated_actions = NULL;
5285         struct mlx5_flow_tunnel *tunnel;
5286         struct tunnel_default_miss_ctx default_miss_ctx = { 0, };
5287         struct mlx5_flow_workspace *wks = mlx5_flow_push_thread_workspace();
5288         struct mlx5_flow_split_info flow_split_info = {
5289                 .external = !!external,
5290                 .skip_scale = 0,
5291                 .flow_idx = 0,
5292                 .prefix_mark = 0,
5293                 .prefix_layers = 0
5294         };
5295         int ret;
5296
5297         MLX5_ASSERT(wks);
5298         rss_desc = &wks->rss_desc;
5299         ret = flow_shared_actions_translate(dev, original_actions,
5300                                             shared_actions,
5301                                             &shared_actions_n,
5302                                             &translated_actions, error);
5303         if (ret < 0) {
5304                 MLX5_ASSERT(translated_actions == NULL);
5305                 return 0;
5306         }
5307         actions = translated_actions ? translated_actions : original_actions;
5308         p_actions_rx = actions;
5309         hairpin_flow = flow_check_hairpin_split(dev, attr, actions);
5310         ret = flow_drv_validate(dev, attr, items, p_actions_rx,
5311                                 external, hairpin_flow, error);
5312         if (ret < 0)
5313                 goto error_before_hairpin_split;
5314         flow = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], &idx);
5315         if (!flow) {
5316                 rte_errno = ENOMEM;
5317                 goto error_before_hairpin_split;
5318         }
5319         if (hairpin_flow > 0) {
5320                 if (hairpin_flow > MLX5_MAX_SPLIT_ACTIONS) {
5321                         rte_errno = EINVAL;
5322                         goto error_before_hairpin_split;
5323                 }
5324                 flow_hairpin_split(dev, actions, actions_rx.actions,
5325                                    actions_hairpin_tx.actions, items_tx.items,
5326                                    idx);
5327                 p_actions_rx = actions_rx.actions;
5328         }
5329         flow_split_info.flow_idx = idx;
5330         flow->drv_type = flow_get_drv_type(dev, attr);
5331         MLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN &&
5332                     flow->drv_type < MLX5_FLOW_TYPE_MAX);
5333         memset(rss_desc, 0, offsetof(struct mlx5_flow_rss_desc, queue));
5334         rss = flow_get_rss_action(p_actions_rx);
5335         if (rss) {
5336                 if (flow_rss_workspace_adjust(wks, rss_desc, rss->queue_num))
5337                         return 0;
5338                 /*
5339                  * The following information is required by
5340                  * mlx5_flow_hashfields_adjust() in advance.
5341                  */
5342                 rss_desc->level = rss->level;
5343                 /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
5344                 rss_desc->types = !rss->types ? ETH_RSS_IP : rss->types;
5345         }
5346         flow->dev_handles = 0;
5347         if (rss && rss->types) {
5348                 unsigned int graph_root;
5349
5350                 graph_root = find_graph_root(items, rss->level);
5351                 ret = mlx5_flow_expand_rss(buf, sizeof(expand_buffer.buffer),
5352                                            items, rss->types,
5353                                            mlx5_support_expansion, graph_root);
5354                 MLX5_ASSERT(ret > 0 &&
5355                        (unsigned int)ret < sizeof(expand_buffer.buffer));
5356         } else {
5357                 buf->entries = 1;
5358                 buf->entry[0].pattern = (void *)(uintptr_t)items;
5359         }
5360         rss_desc->shared_rss = flow_get_shared_rss_action(dev, shared_actions,
5361                                                       shared_actions_n);
5362         for (i = 0; i < buf->entries; ++i) {
5363                 /* Initialize flow split data. */
5364                 flow_split_info.prefix_layers = 0;
5365                 flow_split_info.prefix_mark = 0;
5366                 flow_split_info.skip_scale = 0;
5367                 /*
5368                  * The splitter may create multiple dev_flows,
5369                  * depending on configuration. In the simplest
5370                  * case it just creates unmodified original flow.
5371                  */
5372                 ret = flow_create_split_outer(dev, flow, attr,
5373                                               buf->entry[i].pattern,
5374                                               p_actions_rx, &flow_split_info,
5375                                               error);
5376                 if (ret < 0)
5377                         goto error;
5378                 if (is_flow_tunnel_steer_rule(dev, attr,
5379                                               buf->entry[i].pattern,
5380                                               p_actions_rx)) {
5381                         ret = flow_tunnel_add_default_miss(dev, flow, attr,
5382                                                            p_actions_rx,
5383                                                            idx,
5384                                                            &default_miss_ctx,
5385                                                            error);
5386                         if (ret < 0) {
5387                                 mlx5_free(default_miss_ctx.queue);
5388                                 goto error;
5389                         }
5390                 }
5391         }
5392         /* Create the tx flow. */
5393         if (hairpin_flow) {
5394                 attr_tx.group = MLX5_HAIRPIN_TX_TABLE;
5395                 attr_tx.ingress = 0;
5396                 attr_tx.egress = 1;
5397                 dev_flow = flow_drv_prepare(dev, flow, &attr_tx, items_tx.items,
5398                                          actions_hairpin_tx.actions,
5399                                          idx, error);
5400                 if (!dev_flow)
5401                         goto error;
5402                 dev_flow->flow = flow;
5403                 dev_flow->external = 0;
5404                 SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
5405                               dev_flow->handle, next);
5406                 ret = flow_drv_translate(dev, dev_flow, &attr_tx,
5407                                          items_tx.items,
5408                                          actions_hairpin_tx.actions, error);
5409                 if (ret < 0)
5410                         goto error;
5411         }
5412         /*
5413          * Update the metadata register copy table. If extensive
5414          * metadata feature is enabled and registers are supported
5415          * we might create the extra rte_flow for each unique
5416          * MARK/FLAG action ID.
5417          *
5418          * The table is updated for ingress Flows only, because
5419          * the egress Flows belong to the different device and
5420          * copy table should be updated in peer NIC Rx domain.
5421          */
5422         if (attr->ingress &&
5423             (external || attr->group != MLX5_FLOW_MREG_CP_TABLE_GROUP)) {
5424                 ret = flow_mreg_update_copy_table(dev, flow, actions, error);
5425                 if (ret)
5426                         goto error;
5427         }
5428         /*
5429          * If the flow is external (from application) OR device is started,
5430          * OR mreg discover, then apply immediately.
5431          */
5432         if (external || dev->data->dev_started ||
5433             (attr->group == MLX5_FLOW_MREG_CP_TABLE_GROUP &&
5434              attr->priority == MLX5_FLOW_PRIO_RSVD)) {
5435                 ret = flow_drv_apply(dev, flow, error);
5436                 if (ret < 0)
5437                         goto error;
5438         }
5439         if (list) {
5440                 rte_spinlock_lock(&priv->flow_list_lock);
5441                 ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list, idx,
5442                              flow, next);
5443                 rte_spinlock_unlock(&priv->flow_list_lock);
5444         }
5445         flow_rxq_flags_set(dev, flow);
5446         rte_free(translated_actions);
5447         tunnel = flow_tunnel_from_rule(dev, attr, items, actions);
5448         if (tunnel) {
5449                 flow->tunnel = 1;
5450                 flow->tunnel_id = tunnel->tunnel_id;
5451                 __atomic_add_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED);
5452                 mlx5_free(default_miss_ctx.queue);
5453         }
5454         mlx5_flow_pop_thread_workspace();
5455         return idx;
5456 error:
5457         MLX5_ASSERT(flow);
5458         ret = rte_errno; /* Save rte_errno before cleanup. */
5459         flow_mreg_del_copy_action(dev, flow);
5460         flow_drv_destroy(dev, flow);
5461         if (rss_desc->shared_rss)
5462                 __atomic_sub_fetch(&((struct mlx5_shared_action_rss *)
5463                         mlx5_ipool_get
5464                         (priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
5465                         rss_desc->shared_rss))->refcnt, 1, __ATOMIC_RELAXED);
5466         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], idx);
5467         rte_errno = ret; /* Restore rte_errno. */
5468         ret = rte_errno;
5469         rte_errno = ret;
5470         mlx5_flow_pop_thread_workspace();
5471 error_before_hairpin_split:
5472         rte_free(translated_actions);
5473         return 0;
5474 }
5475
5476 /**
5477  * Create a dedicated flow rule on e-switch table 0 (root table), to direct all
5478  * incoming packets to table 1.
5479  *
5480  * Other flow rules, requested for group n, will be created in
5481  * e-switch table n+1.
5482  * Jump action to e-switch group n will be created to group n+1.
5483  *
5484  * Used when working in switchdev mode, to utilise advantages of table 1
5485  * and above.
5486  *
5487  * @param dev
5488  *   Pointer to Ethernet device.
5489  *
5490  * @return
5491  *   Pointer to flow on success, NULL otherwise and rte_errno is set.
5492  */
5493 struct rte_flow *
5494 mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev)
5495 {
5496         const struct rte_flow_attr attr = {
5497                 .group = 0,
5498                 .priority = 0,
5499                 .ingress = 1,
5500                 .egress = 0,
5501                 .transfer = 1,
5502         };
5503         const struct rte_flow_item pattern = {
5504                 .type = RTE_FLOW_ITEM_TYPE_END,
5505         };
5506         struct rte_flow_action_jump jump = {
5507                 .group = 1,
5508         };
5509         const struct rte_flow_action actions[] = {
5510                 {
5511                         .type = RTE_FLOW_ACTION_TYPE_JUMP,
5512                         .conf = &jump,
5513                 },
5514                 {
5515                         .type = RTE_FLOW_ACTION_TYPE_END,
5516                 },
5517         };
5518         struct mlx5_priv *priv = dev->data->dev_private;
5519         struct rte_flow_error error;
5520
5521         return (void *)(uintptr_t)flow_list_create(dev, &priv->ctrl_flows,
5522                                                    &attr, &pattern,
5523                                                    actions, false, &error);
5524 }
5525
5526 /**
5527  * Validate a flow supported by the NIC.
5528  *
5529  * @see rte_flow_validate()
5530  * @see rte_flow_ops
5531  */
5532 int
5533 mlx5_flow_validate(struct rte_eth_dev *dev,
5534                    const struct rte_flow_attr *attr,
5535                    const struct rte_flow_item items[],
5536                    const struct rte_flow_action original_actions[],
5537                    struct rte_flow_error *error)
5538 {
5539         int hairpin_flow;
5540         struct mlx5_translated_shared_action
5541                 shared_actions[MLX5_MAX_SHARED_ACTIONS];
5542         int shared_actions_n = MLX5_MAX_SHARED_ACTIONS;
5543         const struct rte_flow_action *actions;
5544         struct rte_flow_action *translated_actions = NULL;
5545         int ret = flow_shared_actions_translate(dev, original_actions,
5546                                                 shared_actions,
5547                                                 &shared_actions_n,
5548                                                 &translated_actions, error);
5549
5550         if (ret)
5551                 return ret;
5552         actions = translated_actions ? translated_actions : original_actions;
5553         hairpin_flow = flow_check_hairpin_split(dev, attr, actions);
5554         ret = flow_drv_validate(dev, attr, items, actions,
5555                                 true, hairpin_flow, error);
5556         rte_free(translated_actions);
5557         return ret;
5558 }
5559
5560 /**
5561  * Create a flow.
5562  *
5563  * @see rte_flow_create()
5564  * @see rte_flow_ops
5565  */
5566 struct rte_flow *
5567 mlx5_flow_create(struct rte_eth_dev *dev,
5568                  const struct rte_flow_attr *attr,
5569                  const struct rte_flow_item items[],
5570                  const struct rte_flow_action actions[],
5571                  struct rte_flow_error *error)
5572 {
5573         struct mlx5_priv *priv = dev->data->dev_private;
5574
5575         /*
5576          * If the device is not started yet, it is not allowed to created a
5577          * flow from application. PMD default flows and traffic control flows
5578          * are not affected.
5579          */
5580         if (unlikely(!dev->data->dev_started)) {
5581                 DRV_LOG(DEBUG, "port %u is not started when "
5582                         "inserting a flow", dev->data->port_id);
5583                 rte_flow_error_set(error, ENODEV,
5584                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5585                                    NULL,
5586                                    "port not started");
5587                 return NULL;
5588         }
5589
5590         return (void *)(uintptr_t)flow_list_create(dev, &priv->flows,
5591                                   attr, items, actions, true, error);
5592 }
5593
5594 /**
5595  * Destroy a flow in a list.
5596  *
5597  * @param dev
5598  *   Pointer to Ethernet device.
5599  * @param list
5600  *   Pointer to the Indexed flow list. If this parameter NULL,
5601  *   there is no flow removal from the list. Be noted that as
5602  *   flow is add to the indexed list, memory of the indexed
5603  *   list points to maybe changed as flow destroyed.
5604  * @param[in] flow_idx
5605  *   Index of flow to destroy.
5606  */
5607 static void
5608 flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,
5609                   uint32_t flow_idx)
5610 {
5611         struct mlx5_priv *priv = dev->data->dev_private;
5612         struct rte_flow *flow = mlx5_ipool_get(priv->sh->ipool
5613                                                [MLX5_IPOOL_RTE_FLOW], flow_idx);
5614
5615         if (!flow)
5616                 return;
5617         /*
5618          * Update RX queue flags only if port is started, otherwise it is
5619          * already clean.
5620          */
5621         if (dev->data->dev_started)
5622                 flow_rxq_flags_trim(dev, flow);
5623         flow_drv_destroy(dev, flow);
5624         if (list) {
5625                 rte_spinlock_lock(&priv->flow_list_lock);
5626                 ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list,
5627                              flow_idx, flow, next);
5628                 rte_spinlock_unlock(&priv->flow_list_lock);
5629         }
5630         if (flow->tunnel) {
5631                 struct mlx5_flow_tunnel *tunnel;
5632
5633                 tunnel = mlx5_find_tunnel_id(dev, flow->tunnel_id);
5634                 RTE_VERIFY(tunnel);
5635                 if (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED))
5636                         mlx5_flow_tunnel_free(dev, tunnel);
5637         }
5638         flow_mreg_del_copy_action(dev, flow);
5639         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], flow_idx);
5640 }
5641
5642 /**
5643  * Destroy all flows.
5644  *
5645  * @param dev
5646  *   Pointer to Ethernet device.
5647  * @param list
5648  *   Pointer to the Indexed flow list.
5649  * @param active
5650  *   If flushing is called avtively.
5651  */
5652 void
5653 mlx5_flow_list_flush(struct rte_eth_dev *dev, uint32_t *list, bool active)
5654 {
5655         uint32_t num_flushed = 0;
5656
5657         while (*list) {
5658                 flow_list_destroy(dev, list, *list);
5659                 num_flushed++;
5660         }
5661         if (active) {
5662                 DRV_LOG(INFO, "port %u: %u flows flushed before stopping",
5663                         dev->data->port_id, num_flushed);
5664         }
5665 }
5666
5667 /**
5668  * Stop all default actions for flows.
5669  *
5670  * @param dev
5671  *   Pointer to Ethernet device.
5672  */
5673 void
5674 mlx5_flow_stop_default(struct rte_eth_dev *dev)
5675 {
5676         flow_mreg_del_default_copy_action(dev);
5677         flow_rxq_flags_clear(dev);
5678 }
5679
5680 /**
5681  * Start all default actions for flows.
5682  *
5683  * @param dev
5684  *   Pointer to Ethernet device.
5685  * @return
5686  *   0 on success, a negative errno value otherwise and rte_errno is set.
5687  */
5688 int
5689 mlx5_flow_start_default(struct rte_eth_dev *dev)
5690 {
5691         struct rte_flow_error error;
5692
5693         /* Make sure default copy action (reg_c[0] -> reg_b) is created. */
5694         return flow_mreg_add_default_copy_action(dev, &error);
5695 }
5696
5697 /**
5698  * Release key of thread specific flow workspace data.
5699  */
5700 static void
5701 flow_release_workspace(void *data)
5702 {
5703         struct mlx5_flow_workspace *wks = data;
5704         struct mlx5_flow_workspace *next;
5705
5706         while (wks) {
5707                 next = wks->next;
5708                 free(wks->rss_desc.queue);
5709                 free(wks);
5710                 wks = next;
5711         }
5712 }
5713
5714 /**
5715  * Initialize key of thread specific flow workspace data.
5716  */
5717 static void
5718 flow_alloc_workspace(void)
5719 {
5720         if (pthread_key_create(&key_workspace, flow_release_workspace))
5721                 DRV_LOG(ERR, "Can't create flow workspace data thread key.");
5722 }
5723
5724 /**
5725  * Get thread specific current flow workspace.
5726  *
5727  * @return pointer to thread specific flow workspace data, NULL on error.
5728  */
5729 struct mlx5_flow_workspace*
5730 mlx5_flow_get_thread_workspace(void)
5731 {
5732         struct mlx5_flow_workspace *data;
5733
5734         data = pthread_getspecific(key_workspace);
5735         MLX5_ASSERT(data && data->inuse);
5736         if (!data || !data->inuse)
5737                 DRV_LOG(ERR, "flow workspace not initialized.");
5738         return data;
5739 }
5740
5741 /**
5742  * Allocate and init new flow workspace.
5743  *
5744  * @return pointer to flow workspace data, NULL on error.
5745  */
5746 static struct mlx5_flow_workspace*
5747 flow_alloc_thread_workspace(void)
5748 {
5749         struct mlx5_flow_workspace *data = calloc(1, sizeof(*data));
5750
5751         if (!data) {
5752                 DRV_LOG(ERR, "Failed to allocate flow workspace "
5753                         "memory.");
5754                 return NULL;
5755         }
5756         data->rss_desc.queue = calloc(1,
5757                         sizeof(uint16_t) * MLX5_RSSQ_DEFAULT_NUM);
5758         if (!data->rss_desc.queue)
5759                 goto err;
5760         data->rssq_num = MLX5_RSSQ_DEFAULT_NUM;
5761         return data;
5762 err:
5763         if (data->rss_desc.queue)
5764                 free(data->rss_desc.queue);
5765         free(data);
5766         return NULL;
5767 }
5768
5769 /**
5770  * Get new thread specific flow workspace.
5771  *
5772  * If current workspace inuse, create new one and set as current.
5773  *
5774  * @return pointer to thread specific flow workspace data, NULL on error.
5775  */
5776 static struct mlx5_flow_workspace*
5777 mlx5_flow_push_thread_workspace(void)
5778 {
5779         struct mlx5_flow_workspace *curr;
5780         struct mlx5_flow_workspace *data;
5781
5782         if (pthread_once(&key_workspace_init, flow_alloc_workspace)) {
5783                 DRV_LOG(ERR, "Failed to init flow workspace data thread key.");
5784                 return NULL;
5785         }
5786         curr = pthread_getspecific(key_workspace);
5787         if (!curr) {
5788                 data = flow_alloc_thread_workspace();
5789                 if (!data)
5790                         return NULL;
5791         } else if (!curr->inuse) {
5792                 data = curr;
5793         } else if (curr->next) {
5794                 data = curr->next;
5795         } else {
5796                 data = flow_alloc_thread_workspace();
5797                 if (!data)
5798                         return NULL;
5799                 curr->next = data;
5800                 data->prev = curr;
5801         }
5802         data->inuse = 1;
5803         data->flow_idx = 0;
5804         /* Set as current workspace */
5805         if (pthread_setspecific(key_workspace, data))
5806                 DRV_LOG(ERR, "Failed to set flow workspace to thread.");
5807         return data;
5808 }
5809
5810 /**
5811  * Close current thread specific flow workspace.
5812  *
5813  * If previous workspace available, set it as current.
5814  *
5815  * @return pointer to thread specific flow workspace data, NULL on error.
5816  */
5817 static void
5818 mlx5_flow_pop_thread_workspace(void)
5819 {
5820         struct mlx5_flow_workspace *data = mlx5_flow_get_thread_workspace();
5821
5822         if (!data)
5823                 return;
5824         if (!data->inuse) {
5825                 DRV_LOG(ERR, "Failed to close unused flow workspace.");
5826                 return;
5827         }
5828         data->inuse = 0;
5829         if (!data->prev)
5830                 return;
5831         if (pthread_setspecific(key_workspace, data->prev))
5832                 DRV_LOG(ERR, "Failed to set flow workspace to thread.");
5833 }
5834
5835 /**
5836  * Verify the flow list is empty
5837  *
5838  * @param dev
5839  *  Pointer to Ethernet device.
5840  *
5841  * @return the number of flows not released.
5842  */
5843 int
5844 mlx5_flow_verify(struct rte_eth_dev *dev)
5845 {
5846         struct mlx5_priv *priv = dev->data->dev_private;
5847         struct rte_flow *flow;
5848         uint32_t idx;
5849         int ret = 0;
5850
5851         ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], priv->flows, idx,
5852                       flow, next) {
5853                 DRV_LOG(DEBUG, "port %u flow %p still referenced",
5854                         dev->data->port_id, (void *)flow);
5855                 ++ret;
5856         }
5857         return ret;
5858 }
5859
5860 /**
5861  * Enable default hairpin egress flow.
5862  *
5863  * @param dev
5864  *   Pointer to Ethernet device.
5865  * @param queue
5866  *   The queue index.
5867  *
5868  * @return
5869  *   0 on success, a negative errno value otherwise and rte_errno is set.
5870  */
5871 int
5872 mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev,
5873                             uint32_t queue)
5874 {
5875         struct mlx5_priv *priv = dev->data->dev_private;
5876         const struct rte_flow_attr attr = {
5877                 .egress = 1,
5878                 .priority = 0,
5879         };
5880         struct mlx5_rte_flow_item_tx_queue queue_spec = {
5881                 .queue = queue,
5882         };
5883         struct mlx5_rte_flow_item_tx_queue queue_mask = {
5884                 .queue = UINT32_MAX,
5885         };
5886         struct rte_flow_item items[] = {
5887                 {
5888                         .type = (enum rte_flow_item_type)
5889                                 MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE,
5890                         .spec = &queue_spec,
5891                         .last = NULL,
5892                         .mask = &queue_mask,
5893                 },
5894                 {
5895                         .type = RTE_FLOW_ITEM_TYPE_END,
5896                 },
5897         };
5898         struct rte_flow_action_jump jump = {
5899                 .group = MLX5_HAIRPIN_TX_TABLE,
5900         };
5901         struct rte_flow_action actions[2];
5902         uint32_t flow_idx;
5903         struct rte_flow_error error;
5904
5905         actions[0].type = RTE_FLOW_ACTION_TYPE_JUMP;
5906         actions[0].conf = &jump;
5907         actions[1].type = RTE_FLOW_ACTION_TYPE_END;
5908         flow_idx = flow_list_create(dev, &priv->ctrl_flows,
5909                                 &attr, items, actions, false, &error);
5910         if (!flow_idx) {
5911                 DRV_LOG(DEBUG,
5912                         "Failed to create ctrl flow: rte_errno(%d),"
5913                         " type(%d), message(%s)",
5914                         rte_errno, error.type,
5915                         error.message ? error.message : " (no stated reason)");
5916                 return -rte_errno;
5917         }
5918         return 0;
5919 }
5920
5921 /**
5922  * Enable a control flow configured from the control plane.
5923  *
5924  * @param dev
5925  *   Pointer to Ethernet device.
5926  * @param eth_spec
5927  *   An Ethernet flow spec to apply.
5928  * @param eth_mask
5929  *   An Ethernet flow mask to apply.
5930  * @param vlan_spec
5931  *   A VLAN flow spec to apply.
5932  * @param vlan_mask
5933  *   A VLAN flow mask to apply.
5934  *
5935  * @return
5936  *   0 on success, a negative errno value otherwise and rte_errno is set.
5937  */
5938 int
5939 mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
5940                     struct rte_flow_item_eth *eth_spec,
5941                     struct rte_flow_item_eth *eth_mask,
5942                     struct rte_flow_item_vlan *vlan_spec,
5943                     struct rte_flow_item_vlan *vlan_mask)
5944 {
5945         struct mlx5_priv *priv = dev->data->dev_private;
5946         const struct rte_flow_attr attr = {
5947                 .ingress = 1,
5948                 .priority = MLX5_FLOW_PRIO_RSVD,
5949         };
5950         struct rte_flow_item items[] = {
5951                 {
5952                         .type = RTE_FLOW_ITEM_TYPE_ETH,
5953                         .spec = eth_spec,
5954                         .last = NULL,
5955                         .mask = eth_mask,
5956                 },
5957                 {
5958                         .type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN :
5959                                               RTE_FLOW_ITEM_TYPE_END,
5960                         .spec = vlan_spec,
5961                         .last = NULL,
5962                         .mask = vlan_mask,
5963                 },
5964                 {
5965                         .type = RTE_FLOW_ITEM_TYPE_END,
5966                 },
5967         };
5968         uint16_t queue[priv->reta_idx_n];
5969         struct rte_flow_action_rss action_rss = {
5970                 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
5971                 .level = 0,
5972                 .types = priv->rss_conf.rss_hf,
5973                 .key_len = priv->rss_conf.rss_key_len,
5974                 .queue_num = priv->reta_idx_n,
5975                 .key = priv->rss_conf.rss_key,
5976                 .queue = queue,
5977         };
5978         struct rte_flow_action actions[] = {
5979                 {
5980                         .type = RTE_FLOW_ACTION_TYPE_RSS,
5981                         .conf = &action_rss,
5982                 },
5983                 {
5984                         .type = RTE_FLOW_ACTION_TYPE_END,
5985                 },
5986         };
5987         uint32_t flow_idx;
5988         struct rte_flow_error error;
5989         unsigned int i;
5990
5991         if (!priv->reta_idx_n || !priv->rxqs_n) {
5992                 return 0;
5993         }
5994         if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
5995                 action_rss.types = 0;
5996         for (i = 0; i != priv->reta_idx_n; ++i)
5997                 queue[i] = (*priv->reta_idx)[i];
5998         flow_idx = flow_list_create(dev, &priv->ctrl_flows,
5999                                 &attr, items, actions, false, &error);
6000         if (!flow_idx)
6001                 return -rte_errno;
6002         return 0;
6003 }
6004
6005 /**
6006  * Enable a flow control configured from the control plane.
6007  *
6008  * @param dev
6009  *   Pointer to Ethernet device.
6010  * @param eth_spec
6011  *   An Ethernet flow spec to apply.
6012  * @param eth_mask
6013  *   An Ethernet flow mask to apply.
6014  *
6015  * @return
6016  *   0 on success, a negative errno value otherwise and rte_errno is set.
6017  */
6018 int
6019 mlx5_ctrl_flow(struct rte_eth_dev *dev,
6020                struct rte_flow_item_eth *eth_spec,
6021                struct rte_flow_item_eth *eth_mask)
6022 {
6023         return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL);
6024 }
6025
6026 /**
6027  * Create default miss flow rule matching lacp traffic
6028  *
6029  * @param dev
6030  *   Pointer to Ethernet device.
6031  * @param eth_spec
6032  *   An Ethernet flow spec to apply.
6033  *
6034  * @return
6035  *   0 on success, a negative errno value otherwise and rte_errno is set.
6036  */
6037 int
6038 mlx5_flow_lacp_miss(struct rte_eth_dev *dev)
6039 {
6040         struct mlx5_priv *priv = dev->data->dev_private;
6041         /*
6042          * The LACP matching is done by only using ether type since using
6043          * a multicast dst mac causes kernel to give low priority to this flow.
6044          */
6045         static const struct rte_flow_item_eth lacp_spec = {
6046                 .type = RTE_BE16(0x8809),
6047         };
6048         static const struct rte_flow_item_eth lacp_mask = {
6049                 .type = 0xffff,
6050         };
6051         const struct rte_flow_attr attr = {
6052                 .ingress = 1,
6053         };
6054         struct rte_flow_item items[] = {
6055                 {
6056                         .type = RTE_FLOW_ITEM_TYPE_ETH,
6057                         .spec = &lacp_spec,
6058                         .mask = &lacp_mask,
6059                 },
6060                 {
6061                         .type = RTE_FLOW_ITEM_TYPE_END,
6062                 },
6063         };
6064         struct rte_flow_action actions[] = {
6065                 {
6066                         .type = (enum rte_flow_action_type)
6067                                 MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS,
6068                 },
6069                 {
6070                         .type = RTE_FLOW_ACTION_TYPE_END,
6071                 },
6072         };
6073         struct rte_flow_error error;
6074         uint32_t flow_idx = flow_list_create(dev, &priv->ctrl_flows,
6075                                 &attr, items, actions, false, &error);
6076
6077         if (!flow_idx)
6078                 return -rte_errno;
6079         return 0;
6080 }
6081
6082 /**
6083  * Destroy a flow.
6084  *
6085  * @see rte_flow_destroy()
6086  * @see rte_flow_ops
6087  */
6088 int
6089 mlx5_flow_destroy(struct rte_eth_dev *dev,
6090                   struct rte_flow *flow,
6091                   struct rte_flow_error *error __rte_unused)
6092 {
6093         struct mlx5_priv *priv = dev->data->dev_private;
6094
6095         flow_list_destroy(dev, &priv->flows, (uintptr_t)(void *)flow);
6096         return 0;
6097 }
6098
6099 /**
6100  * Destroy all flows.
6101  *
6102  * @see rte_flow_flush()
6103  * @see rte_flow_ops
6104  */
6105 int
6106 mlx5_flow_flush(struct rte_eth_dev *dev,
6107                 struct rte_flow_error *error __rte_unused)
6108 {
6109         struct mlx5_priv *priv = dev->data->dev_private;
6110
6111         mlx5_flow_list_flush(dev, &priv->flows, false);
6112         return 0;
6113 }
6114
6115 /**
6116  * Isolated mode.
6117  *
6118  * @see rte_flow_isolate()
6119  * @see rte_flow_ops
6120  */
6121 int
6122 mlx5_flow_isolate(struct rte_eth_dev *dev,
6123                   int enable,
6124                   struct rte_flow_error *error)
6125 {
6126         struct mlx5_priv *priv = dev->data->dev_private;
6127
6128         if (dev->data->dev_started) {
6129                 rte_flow_error_set(error, EBUSY,
6130                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6131                                    NULL,
6132                                    "port must be stopped first");
6133                 return -rte_errno;
6134         }
6135         priv->isolated = !!enable;
6136         if (enable)
6137                 dev->dev_ops = &mlx5_os_dev_ops_isolate;
6138         else
6139                 dev->dev_ops = &mlx5_os_dev_ops;
6140
6141         dev->rx_descriptor_status = mlx5_rx_descriptor_status;
6142         dev->tx_descriptor_status = mlx5_tx_descriptor_status;
6143
6144         return 0;
6145 }
6146
6147 /**
6148  * Query a flow.
6149  *
6150  * @see rte_flow_query()
6151  * @see rte_flow_ops
6152  */
6153 static int
6154 flow_drv_query(struct rte_eth_dev *dev,
6155                uint32_t flow_idx,
6156                const struct rte_flow_action *actions,
6157                void *data,
6158                struct rte_flow_error *error)
6159 {
6160         struct mlx5_priv *priv = dev->data->dev_private;
6161         const struct mlx5_flow_driver_ops *fops;
6162         struct rte_flow *flow = mlx5_ipool_get(priv->sh->ipool
6163                                                [MLX5_IPOOL_RTE_FLOW],
6164                                                flow_idx);
6165         enum mlx5_flow_drv_type ftype;
6166
6167         if (!flow) {
6168                 return rte_flow_error_set(error, ENOENT,
6169                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6170                           NULL,
6171                           "invalid flow handle");
6172         }
6173         ftype = flow->drv_type;
6174         MLX5_ASSERT(ftype > MLX5_FLOW_TYPE_MIN && ftype < MLX5_FLOW_TYPE_MAX);
6175         fops = flow_get_drv_ops(ftype);
6176
6177         return fops->query(dev, flow, actions, data, error);
6178 }
6179
6180 /**
6181  * Query a flow.
6182  *
6183  * @see rte_flow_query()
6184  * @see rte_flow_ops
6185  */
6186 int
6187 mlx5_flow_query(struct rte_eth_dev *dev,
6188                 struct rte_flow *flow,
6189                 const struct rte_flow_action *actions,
6190                 void *data,
6191                 struct rte_flow_error *error)
6192 {
6193         int ret;
6194
6195         ret = flow_drv_query(dev, (uintptr_t)(void *)flow, actions, data,
6196                              error);
6197         if (ret < 0)
6198                 return ret;
6199         return 0;
6200 }
6201
6202 /**
6203  * Manage filter operations.
6204  *
6205  * @param dev
6206  *   Pointer to Ethernet device structure.
6207  * @param filter_type
6208  *   Filter type.
6209  * @param filter_op
6210  *   Operation to perform.
6211  * @param arg
6212  *   Pointer to operation-specific structure.
6213  *
6214  * @return
6215  *   0 on success, a negative errno value otherwise and rte_errno is set.
6216  */
6217 int
6218 mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
6219                      enum rte_filter_type filter_type,
6220                      enum rte_filter_op filter_op,
6221                      void *arg)
6222 {
6223         switch (filter_type) {
6224         case RTE_ETH_FILTER_GENERIC:
6225                 if (filter_op != RTE_ETH_FILTER_GET) {
6226                         rte_errno = EINVAL;
6227                         return -rte_errno;
6228                 }
6229                 *(const void **)arg = &mlx5_flow_ops;
6230                 return 0;
6231         default:
6232                 DRV_LOG(ERR, "port %u filter type (%d) not supported",
6233                         dev->data->port_id, filter_type);
6234                 rte_errno = ENOTSUP;
6235                 return -rte_errno;
6236         }
6237         return 0;
6238 }
6239
6240 /**
6241  * Create the needed meter and suffix tables.
6242  *
6243  * @param[in] dev
6244  *   Pointer to Ethernet device.
6245  * @param[in] fm
6246  *   Pointer to the flow meter.
6247  *
6248  * @return
6249  *   Pointer to table set on success, NULL otherwise.
6250  */
6251 struct mlx5_meter_domains_infos *
6252 mlx5_flow_create_mtr_tbls(struct rte_eth_dev *dev,
6253                           const struct mlx5_flow_meter *fm)
6254 {
6255         const struct mlx5_flow_driver_ops *fops;
6256
6257         fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
6258         return fops->create_mtr_tbls(dev, fm);
6259 }
6260
6261 /**
6262  * Destroy the meter table set.
6263  *
6264  * @param[in] dev
6265  *   Pointer to Ethernet device.
6266  * @param[in] tbl
6267  *   Pointer to the meter table set.
6268  *
6269  * @return
6270  *   0 on success.
6271  */
6272 int
6273 mlx5_flow_destroy_mtr_tbls(struct rte_eth_dev *dev,
6274                            struct mlx5_meter_domains_infos *tbls)
6275 {
6276         const struct mlx5_flow_driver_ops *fops;
6277
6278         fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
6279         return fops->destroy_mtr_tbls(dev, tbls);
6280 }
6281
6282 /**
6283  * Create policer rules.
6284  *
6285  * @param[in] dev
6286  *   Pointer to Ethernet device.
6287  * @param[in] fm
6288  *   Pointer to flow meter structure.
6289  * @param[in] attr
6290  *   Pointer to flow attributes.
6291  *
6292  * @return
6293  *   0 on success, -1 otherwise.
6294  */
6295 int
6296 mlx5_flow_create_policer_rules(struct rte_eth_dev *dev,
6297                                struct mlx5_flow_meter *fm,
6298                                const struct rte_flow_attr *attr)
6299 {
6300         const struct mlx5_flow_driver_ops *fops;
6301
6302         fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
6303         return fops->create_policer_rules(dev, fm, attr);
6304 }
6305
6306 /**
6307  * Destroy policer rules.
6308  *
6309  * @param[in] fm
6310  *   Pointer to flow meter structure.
6311  * @param[in] attr
6312  *   Pointer to flow attributes.
6313  *
6314  * @return
6315  *   0 on success, -1 otherwise.
6316  */
6317 int
6318 mlx5_flow_destroy_policer_rules(struct rte_eth_dev *dev,
6319                                 struct mlx5_flow_meter *fm,
6320                                 const struct rte_flow_attr *attr)
6321 {
6322         const struct mlx5_flow_driver_ops *fops;
6323
6324         fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
6325         return fops->destroy_policer_rules(dev, fm, attr);
6326 }
6327
6328 /**
6329  * Allocate a counter.
6330  *
6331  * @param[in] dev
6332  *   Pointer to Ethernet device structure.
6333  *
6334  * @return
6335  *   Index to allocated counter  on success, 0 otherwise.
6336  */
6337 uint32_t
6338 mlx5_counter_alloc(struct rte_eth_dev *dev)
6339 {
6340         const struct mlx5_flow_driver_ops *fops;
6341         struct rte_flow_attr attr = { .transfer = 0 };
6342
6343         if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
6344                 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
6345                 return fops->counter_alloc(dev);
6346         }
6347         DRV_LOG(ERR,
6348                 "port %u counter allocate is not supported.",
6349                  dev->data->port_id);
6350         return 0;
6351 }
6352
6353 /**
6354  * Free a counter.
6355  *
6356  * @param[in] dev
6357  *   Pointer to Ethernet device structure.
6358  * @param[in] cnt
6359  *   Index to counter to be free.
6360  */
6361 void
6362 mlx5_counter_free(struct rte_eth_dev *dev, uint32_t cnt)
6363 {
6364         const struct mlx5_flow_driver_ops *fops;
6365         struct rte_flow_attr attr = { .transfer = 0 };
6366
6367         if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
6368                 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
6369                 fops->counter_free(dev, cnt);
6370                 return;
6371         }
6372         DRV_LOG(ERR,
6373                 "port %u counter free is not supported.",
6374                  dev->data->port_id);
6375 }
6376
6377 /**
6378  * Query counter statistics.
6379  *
6380  * @param[in] dev
6381  *   Pointer to Ethernet device structure.
6382  * @param[in] cnt
6383  *   Index to counter to query.
6384  * @param[in] clear
6385  *   Set to clear counter statistics.
6386  * @param[out] pkts
6387  *   The counter hits packets number to save.
6388  * @param[out] bytes
6389  *   The counter hits bytes number to save.
6390  *
6391  * @return
6392  *   0 on success, a negative errno value otherwise.
6393  */
6394 int
6395 mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt,
6396                    bool clear, uint64_t *pkts, uint64_t *bytes)
6397 {
6398         const struct mlx5_flow_driver_ops *fops;
6399         struct rte_flow_attr attr = { .transfer = 0 };
6400
6401         if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
6402                 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
6403                 return fops->counter_query(dev, cnt, clear, pkts, bytes);
6404         }
6405         DRV_LOG(ERR,
6406                 "port %u counter query is not supported.",
6407                  dev->data->port_id);
6408         return -ENOTSUP;
6409 }
6410
6411 /**
6412  * Allocate a new memory for the counter values wrapped by all the needed
6413  * management.
6414  *
6415  * @param[in] sh
6416  *   Pointer to mlx5_dev_ctx_shared object.
6417  *
6418  * @return
6419  *   0 on success, a negative errno value otherwise.
6420  */
6421 static int
6422 mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh)
6423 {
6424         struct mlx5_devx_mkey_attr mkey_attr;
6425         struct mlx5_counter_stats_mem_mng *mem_mng;
6426         volatile struct flow_counter_stats *raw_data;
6427         int raws_n = MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES;
6428         int size = (sizeof(struct flow_counter_stats) *
6429                         MLX5_COUNTERS_PER_POOL +
6430                         sizeof(struct mlx5_counter_stats_raw)) * raws_n +
6431                         sizeof(struct mlx5_counter_stats_mem_mng);
6432         size_t pgsize = rte_mem_page_size();
6433         uint8_t *mem;
6434         int i;
6435
6436         if (pgsize == (size_t)-1) {
6437                 DRV_LOG(ERR, "Failed to get mem page size");
6438                 rte_errno = ENOMEM;
6439                 return -ENOMEM;
6440         }
6441         mem = mlx5_malloc(MLX5_MEM_ZERO, size, pgsize, SOCKET_ID_ANY);
6442         if (!mem) {
6443                 rte_errno = ENOMEM;
6444                 return -ENOMEM;
6445         }
6446         mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1;
6447         size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n;
6448         mem_mng->umem = mlx5_glue->devx_umem_reg(sh->ctx, mem, size,
6449                                                  IBV_ACCESS_LOCAL_WRITE);
6450         if (!mem_mng->umem) {
6451                 rte_errno = errno;
6452                 mlx5_free(mem);
6453                 return -rte_errno;
6454         }
6455         mkey_attr.addr = (uintptr_t)mem;
6456         mkey_attr.size = size;
6457         mkey_attr.umem_id = mlx5_os_get_umem_id(mem_mng->umem);
6458         mkey_attr.pd = sh->pdn;
6459         mkey_attr.log_entity_size = 0;
6460         mkey_attr.pg_access = 0;
6461         mkey_attr.klm_array = NULL;
6462         mkey_attr.klm_num = 0;
6463         mkey_attr.relaxed_ordering_write = sh->cmng.relaxed_ordering_write;
6464         mkey_attr.relaxed_ordering_read = sh->cmng.relaxed_ordering_read;
6465         mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);
6466         if (!mem_mng->dm) {
6467                 mlx5_glue->devx_umem_dereg(mem_mng->umem);
6468                 rte_errno = errno;
6469                 mlx5_free(mem);
6470                 return -rte_errno;
6471         }
6472         mem_mng->raws = (struct mlx5_counter_stats_raw *)(mem + size);
6473         raw_data = (volatile struct flow_counter_stats *)mem;
6474         for (i = 0; i < raws_n; ++i) {
6475                 mem_mng->raws[i].mem_mng = mem_mng;
6476                 mem_mng->raws[i].data = raw_data + i * MLX5_COUNTERS_PER_POOL;
6477         }
6478         for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i)
6479                 LIST_INSERT_HEAD(&sh->cmng.free_stat_raws,
6480                                  mem_mng->raws + MLX5_CNT_CONTAINER_RESIZE + i,
6481                                  next);
6482         LIST_INSERT_HEAD(&sh->cmng.mem_mngs, mem_mng, next);
6483         sh->cmng.mem_mng = mem_mng;
6484         return 0;
6485 }
6486
6487 /**
6488  * Set the statistic memory to the new counter pool.
6489  *
6490  * @param[in] sh
6491  *   Pointer to mlx5_dev_ctx_shared object.
6492  * @param[in] pool
6493  *   Pointer to the pool to set the statistic memory.
6494  *
6495  * @return
6496  *   0 on success, a negative errno value otherwise.
6497  */
6498 static int
6499 mlx5_flow_set_counter_stat_mem(struct mlx5_dev_ctx_shared *sh,
6500                                struct mlx5_flow_counter_pool *pool)
6501 {
6502         struct mlx5_flow_counter_mng *cmng = &sh->cmng;
6503         /* Resize statistic memory once used out. */
6504         if (!(pool->index % MLX5_CNT_CONTAINER_RESIZE) &&
6505             mlx5_flow_create_counter_stat_mem_mng(sh)) {
6506                 DRV_LOG(ERR, "Cannot resize counter stat mem.");
6507                 return -1;
6508         }
6509         rte_spinlock_lock(&pool->sl);
6510         pool->raw = cmng->mem_mng->raws + pool->index %
6511                     MLX5_CNT_CONTAINER_RESIZE;
6512         rte_spinlock_unlock(&pool->sl);
6513         pool->raw_hw = NULL;
6514         return 0;
6515 }
6516
6517 #define MLX5_POOL_QUERY_FREQ_US 1000000
6518
6519 /**
6520  * Set the periodic procedure for triggering asynchronous batch queries for all
6521  * the counter pools.
6522  *
6523  * @param[in] sh
6524  *   Pointer to mlx5_dev_ctx_shared object.
6525  */
6526 void
6527 mlx5_set_query_alarm(struct mlx5_dev_ctx_shared *sh)
6528 {
6529         uint32_t pools_n, us;
6530
6531         pools_n = __atomic_load_n(&sh->cmng.n_valid, __ATOMIC_RELAXED);
6532         us = MLX5_POOL_QUERY_FREQ_US / pools_n;
6533         DRV_LOG(DEBUG, "Set alarm for %u pools each %u us", pools_n, us);
6534         if (rte_eal_alarm_set(us, mlx5_flow_query_alarm, sh)) {
6535                 sh->cmng.query_thread_on = 0;
6536                 DRV_LOG(ERR, "Cannot reinitialize query alarm");
6537         } else {
6538                 sh->cmng.query_thread_on = 1;
6539         }
6540 }
6541
6542 /**
6543  * The periodic procedure for triggering asynchronous batch queries for all the
6544  * counter pools. This function is probably called by the host thread.
6545  *
6546  * @param[in] arg
6547  *   The parameter for the alarm process.
6548  */
6549 void
6550 mlx5_flow_query_alarm(void *arg)
6551 {
6552         struct mlx5_dev_ctx_shared *sh = arg;
6553         int ret;
6554         uint16_t pool_index = sh->cmng.pool_index;
6555         struct mlx5_flow_counter_mng *cmng = &sh->cmng;
6556         struct mlx5_flow_counter_pool *pool;
6557         uint16_t n_valid;
6558
6559         if (sh->cmng.pending_queries >= MLX5_MAX_PENDING_QUERIES)
6560                 goto set_alarm;
6561         rte_spinlock_lock(&cmng->pool_update_sl);
6562         pool = cmng->pools[pool_index];
6563         n_valid = cmng->n_valid;
6564         rte_spinlock_unlock(&cmng->pool_update_sl);
6565         /* Set the statistic memory to the new created pool. */
6566         if ((!pool->raw && mlx5_flow_set_counter_stat_mem(sh, pool)))
6567                 goto set_alarm;
6568         if (pool->raw_hw)
6569                 /* There is a pool query in progress. */
6570                 goto set_alarm;
6571         pool->raw_hw =
6572                 LIST_FIRST(&sh->cmng.free_stat_raws);
6573         if (!pool->raw_hw)
6574                 /* No free counter statistics raw memory. */
6575                 goto set_alarm;
6576         /*
6577          * Identify the counters released between query trigger and query
6578          * handle more efficiently. The counter released in this gap period
6579          * should wait for a new round of query as the new arrived packets
6580          * will not be taken into account.
6581          */
6582         pool->query_gen++;
6583         ret = mlx5_devx_cmd_flow_counter_query(pool->min_dcs, 0,
6584                                                MLX5_COUNTERS_PER_POOL,
6585                                                NULL, NULL,
6586                                                pool->raw_hw->mem_mng->dm->id,
6587                                                (void *)(uintptr_t)
6588                                                pool->raw_hw->data,
6589                                                sh->devx_comp,
6590                                                (uint64_t)(uintptr_t)pool);
6591         if (ret) {
6592                 DRV_LOG(ERR, "Failed to trigger asynchronous query for dcs ID"
6593                         " %d", pool->min_dcs->id);
6594                 pool->raw_hw = NULL;
6595                 goto set_alarm;
6596         }
6597         LIST_REMOVE(pool->raw_hw, next);
6598         sh->cmng.pending_queries++;
6599         pool_index++;
6600         if (pool_index >= n_valid)
6601                 pool_index = 0;
6602 set_alarm:
6603         sh->cmng.pool_index = pool_index;
6604         mlx5_set_query_alarm(sh);
6605 }
6606
6607 /**
6608  * Check and callback event for new aged flow in the counter pool
6609  *
6610  * @param[in] sh
6611  *   Pointer to mlx5_dev_ctx_shared object.
6612  * @param[in] pool
6613  *   Pointer to Current counter pool.
6614  */
6615 static void
6616 mlx5_flow_aging_check(struct mlx5_dev_ctx_shared *sh,
6617                    struct mlx5_flow_counter_pool *pool)
6618 {
6619         struct mlx5_priv *priv;
6620         struct mlx5_flow_counter *cnt;
6621         struct mlx5_age_info *age_info;
6622         struct mlx5_age_param *age_param;
6623         struct mlx5_counter_stats_raw *cur = pool->raw_hw;
6624         struct mlx5_counter_stats_raw *prev = pool->raw;
6625         const uint64_t curr_time = MLX5_CURR_TIME_SEC;
6626         const uint32_t time_delta = curr_time - pool->time_of_last_age_check;
6627         uint16_t expected = AGE_CANDIDATE;
6628         uint32_t i;
6629
6630         pool->time_of_last_age_check = curr_time;
6631         for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
6632                 cnt = MLX5_POOL_GET_CNT(pool, i);
6633                 age_param = MLX5_CNT_TO_AGE(cnt);
6634                 if (__atomic_load_n(&age_param->state,
6635                                     __ATOMIC_RELAXED) != AGE_CANDIDATE)
6636                         continue;
6637                 if (cur->data[i].hits != prev->data[i].hits) {
6638                         __atomic_store_n(&age_param->sec_since_last_hit, 0,
6639                                          __ATOMIC_RELAXED);
6640                         continue;
6641                 }
6642                 if (__atomic_add_fetch(&age_param->sec_since_last_hit,
6643                                        time_delta,
6644                                        __ATOMIC_RELAXED) <= age_param->timeout)
6645                         continue;
6646                 /**
6647                  * Hold the lock first, or if between the
6648                  * state AGE_TMOUT and tailq operation the
6649                  * release happened, the release procedure
6650                  * may delete a non-existent tailq node.
6651                  */
6652                 priv = rte_eth_devices[age_param->port_id].data->dev_private;
6653                 age_info = GET_PORT_AGE_INFO(priv);
6654                 rte_spinlock_lock(&age_info->aged_sl);
6655                 if (__atomic_compare_exchange_n(&age_param->state, &expected,
6656                                                 AGE_TMOUT, false,
6657                                                 __ATOMIC_RELAXED,
6658                                                 __ATOMIC_RELAXED)) {
6659                         TAILQ_INSERT_TAIL(&age_info->aged_counters, cnt, next);
6660                         MLX5_AGE_SET(age_info, MLX5_AGE_EVENT_NEW);
6661                 }
6662                 rte_spinlock_unlock(&age_info->aged_sl);
6663         }
6664         mlx5_age_event_prepare(sh);
6665 }
6666
6667 /**
6668  * Handler for the HW respond about ready values from an asynchronous batch
6669  * query. This function is probably called by the host thread.
6670  *
6671  * @param[in] sh
6672  *   The pointer to the shared device context.
6673  * @param[in] async_id
6674  *   The Devx async ID.
6675  * @param[in] status
6676  *   The status of the completion.
6677  */
6678 void
6679 mlx5_flow_async_pool_query_handle(struct mlx5_dev_ctx_shared *sh,
6680                                   uint64_t async_id, int status)
6681 {
6682         struct mlx5_flow_counter_pool *pool =
6683                 (struct mlx5_flow_counter_pool *)(uintptr_t)async_id;
6684         struct mlx5_counter_stats_raw *raw_to_free;
6685         uint8_t query_gen = pool->query_gen ^ 1;
6686         struct mlx5_flow_counter_mng *cmng = &sh->cmng;
6687         enum mlx5_counter_type cnt_type =
6688                 pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
6689                                 MLX5_COUNTER_TYPE_ORIGIN;
6690
6691         if (unlikely(status)) {
6692                 raw_to_free = pool->raw_hw;
6693         } else {
6694                 raw_to_free = pool->raw;
6695                 if (pool->is_aged)
6696                         mlx5_flow_aging_check(sh, pool);
6697                 rte_spinlock_lock(&pool->sl);
6698                 pool->raw = pool->raw_hw;
6699                 rte_spinlock_unlock(&pool->sl);
6700                 /* Be sure the new raw counters data is updated in memory. */
6701                 rte_io_wmb();
6702                 if (!TAILQ_EMPTY(&pool->counters[query_gen])) {
6703                         rte_spinlock_lock(&cmng->csl[cnt_type]);
6704                         TAILQ_CONCAT(&cmng->counters[cnt_type],
6705                                      &pool->counters[query_gen], next);
6706                         rte_spinlock_unlock(&cmng->csl[cnt_type]);
6707                 }
6708         }
6709         LIST_INSERT_HEAD(&sh->cmng.free_stat_raws, raw_to_free, next);
6710         pool->raw_hw = NULL;
6711         sh->cmng.pending_queries--;
6712 }
6713
6714 static int
6715 flow_group_to_table(uint32_t port_id, uint32_t group, uint32_t *table,
6716                     const struct flow_grp_info *grp_info,
6717                     struct rte_flow_error *error)
6718 {
6719         if (grp_info->transfer && grp_info->external &&
6720             grp_info->fdb_def_rule) {
6721                 if (group == UINT32_MAX)
6722                         return rte_flow_error_set
6723                                                 (error, EINVAL,
6724                                                  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
6725                                                  NULL,
6726                                                  "group index not supported");
6727                 *table = group + 1;
6728         } else {
6729                 *table = group;
6730         }
6731         DRV_LOG(DEBUG, "port %u group=%#x table=%#x", port_id, group, *table);
6732         return 0;
6733 }
6734
6735 /**
6736  * Translate the rte_flow group index to HW table value.
6737  *
6738  * If tunnel offload is disabled, all group ids converted to flow table
6739  * id using the standard method.
6740  * If tunnel offload is enabled, group id can be converted using the
6741  * standard or tunnel conversion method. Group conversion method
6742  * selection depends on flags in `grp_info` parameter:
6743  * - Internal (grp_info.external == 0) groups conversion uses the
6744  *   standard method.
6745  * - Group ids in JUMP action converted with the tunnel conversion.
6746  * - Group id in rule attribute conversion depends on a rule type and
6747  *   group id value:
6748  *   ** non zero group attributes converted with the tunnel method
6749  *   ** zero group attribute in non-tunnel rule is converted using the
6750  *      standard method - there's only one root table
6751  *   ** zero group attribute in steer tunnel rule is converted with the
6752  *      standard method - single root table
6753  *   ** zero group attribute in match tunnel rule is a special OvS
6754  *      case: that value is used for portability reasons. That group
6755  *      id is converted with the tunnel conversion method.
6756  *
6757  * @param[in] dev
6758  *   Port device
6759  * @param[in] tunnel
6760  *   PMD tunnel offload object
6761  * @param[in] group
6762  *   rte_flow group index value.
6763  * @param[out] table
6764  *   HW table value.
6765  * @param[in] grp_info
6766  *   flags used for conversion
6767  * @param[out] error
6768  *   Pointer to error structure.
6769  *
6770  * @return
6771  *   0 on success, a negative errno value otherwise and rte_errno is set.
6772  */
6773 int
6774 mlx5_flow_group_to_table(struct rte_eth_dev *dev,
6775                          const struct mlx5_flow_tunnel *tunnel,
6776                          uint32_t group, uint32_t *table,
6777                          const struct flow_grp_info *grp_info,
6778                          struct rte_flow_error *error)
6779 {
6780         int ret;
6781         bool standard_translation;
6782
6783         if (!grp_info->skip_scale && grp_info->external &&
6784             group < MLX5_MAX_TABLES_EXTERNAL)
6785                 group *= MLX5_FLOW_TABLE_FACTOR;
6786         if (is_tunnel_offload_active(dev)) {
6787                 standard_translation = !grp_info->external ||
6788                                         grp_info->std_tbl_fix;
6789         } else {
6790                 standard_translation = true;
6791         }
6792         DRV_LOG(DEBUG,
6793                 "port %u group=%u transfer=%d external=%d fdb_def_rule=%d translate=%s",
6794                 dev->data->port_id, group, grp_info->transfer,
6795                 grp_info->external, grp_info->fdb_def_rule,
6796                 standard_translation ? "STANDARD" : "TUNNEL");
6797         if (standard_translation)
6798                 ret = flow_group_to_table(dev->data->port_id, group, table,
6799                                           grp_info, error);
6800         else
6801                 ret = tunnel_flow_group_to_flow_table(dev, tunnel, group,
6802                                                       table, error);
6803
6804         return ret;
6805 }
6806
6807 /**
6808  * Discover availability of metadata reg_c's.
6809  *
6810  * Iteratively use test flows to check availability.
6811  *
6812  * @param[in] dev
6813  *   Pointer to the Ethernet device structure.
6814  *
6815  * @return
6816  *   0 on success, a negative errno value otherwise and rte_errno is set.
6817  */
6818 int
6819 mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev)
6820 {
6821         struct mlx5_priv *priv = dev->data->dev_private;
6822         struct mlx5_dev_config *config = &priv->config;
6823         enum modify_reg idx;
6824         int n = 0;
6825
6826         /* reg_c[0] and reg_c[1] are reserved. */
6827         config->flow_mreg_c[n++] = REG_C_0;
6828         config->flow_mreg_c[n++] = REG_C_1;
6829         /* Discover availability of other reg_c's. */
6830         for (idx = REG_C_2; idx <= REG_C_7; ++idx) {
6831                 struct rte_flow_attr attr = {
6832                         .group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
6833                         .priority = MLX5_FLOW_PRIO_RSVD,
6834                         .ingress = 1,
6835                 };
6836                 struct rte_flow_item items[] = {
6837                         [0] = {
6838                                 .type = RTE_FLOW_ITEM_TYPE_END,
6839                         },
6840                 };
6841                 struct rte_flow_action actions[] = {
6842                         [0] = {
6843                                 .type = (enum rte_flow_action_type)
6844                                         MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
6845                                 .conf = &(struct mlx5_flow_action_copy_mreg){
6846                                         .src = REG_C_1,
6847                                         .dst = idx,
6848                                 },
6849                         },
6850                         [1] = {
6851                                 .type = RTE_FLOW_ACTION_TYPE_JUMP,
6852                                 .conf = &(struct rte_flow_action_jump){
6853                                         .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP,
6854                                 },
6855                         },
6856                         [2] = {
6857                                 .type = RTE_FLOW_ACTION_TYPE_END,
6858                         },
6859                 };
6860                 uint32_t flow_idx;
6861                 struct rte_flow *flow;
6862                 struct rte_flow_error error;
6863
6864                 if (!config->dv_flow_en)
6865                         break;
6866                 /* Create internal flow, validation skips copy action. */
6867                 flow_idx = flow_list_create(dev, NULL, &attr, items,
6868                                             actions, false, &error);
6869                 flow = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW],
6870                                       flow_idx);
6871                 if (!flow)
6872                         continue;
6873                 config->flow_mreg_c[n++] = idx;
6874                 flow_list_destroy(dev, NULL, flow_idx);
6875         }
6876         for (; n < MLX5_MREG_C_NUM; ++n)
6877                 config->flow_mreg_c[n] = REG_NON;
6878         return 0;
6879 }
6880
6881 /**
6882  * Dump flow raw hw data to file
6883  *
6884  * @param[in] dev
6885  *    The pointer to Ethernet device.
6886  * @param[in] file
6887  *   A pointer to a file for output.
6888  * @param[out] error
6889  *   Perform verbose error reporting if not NULL. PMDs initialize this
6890  *   structure in case of error only.
6891  * @return
6892  *   0 on success, a nagative value otherwise.
6893  */
6894 int
6895 mlx5_flow_dev_dump(struct rte_eth_dev *dev,
6896                    FILE *file,
6897                    struct rte_flow_error *error __rte_unused)
6898 {
6899         struct mlx5_priv *priv = dev->data->dev_private;
6900         struct mlx5_dev_ctx_shared *sh = priv->sh;
6901
6902         if (!priv->config.dv_flow_en) {
6903                 if (fputs("device dv flow disabled\n", file) <= 0)
6904                         return -errno;
6905                 return -ENOTSUP;
6906         }
6907         return mlx5_devx_cmd_flow_dump(sh->fdb_domain, sh->rx_domain,
6908                                        sh->tx_domain, file);
6909 }
6910
6911 /**
6912  * Get aged-out flows.
6913  *
6914  * @param[in] dev
6915  *   Pointer to the Ethernet device structure.
6916  * @param[in] context
6917  *   The address of an array of pointers to the aged-out flows contexts.
6918  * @param[in] nb_countexts
6919  *   The length of context array pointers.
6920  * @param[out] error
6921  *   Perform verbose error reporting if not NULL. Initialized in case of
6922  *   error only.
6923  *
6924  * @return
6925  *   how many contexts get in success, otherwise negative errno value.
6926  *   if nb_contexts is 0, return the amount of all aged contexts.
6927  *   if nb_contexts is not 0 , return the amount of aged flows reported
6928  *   in the context array.
6929  */
6930 int
6931 mlx5_flow_get_aged_flows(struct rte_eth_dev *dev, void **contexts,
6932                         uint32_t nb_contexts, struct rte_flow_error *error)
6933 {
6934         const struct mlx5_flow_driver_ops *fops;
6935         struct rte_flow_attr attr = { .transfer = 0 };
6936
6937         if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
6938                 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
6939                 return fops->get_aged_flows(dev, contexts, nb_contexts,
6940                                                     error);
6941         }
6942         DRV_LOG(ERR,
6943                 "port %u get aged flows is not supported.",
6944                  dev->data->port_id);
6945         return -ENOTSUP;
6946 }
6947
6948 /* Wrapper for driver action_validate op callback */
6949 static int
6950 flow_drv_action_validate(struct rte_eth_dev *dev,
6951                          const struct rte_flow_shared_action_conf *conf,
6952                          const struct rte_flow_action *action,
6953                          const struct mlx5_flow_driver_ops *fops,
6954                          struct rte_flow_error *error)
6955 {
6956         static const char err_msg[] = "shared action validation unsupported";
6957
6958         if (!fops->action_validate) {
6959                 DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
6960                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
6961                                    NULL, err_msg);
6962                 return -rte_errno;
6963         }
6964         return fops->action_validate(dev, conf, action, error);
6965 }
6966
6967 /**
6968  * Destroys the shared action by handle.
6969  *
6970  * @param dev
6971  *   Pointer to Ethernet device structure.
6972  * @param[in] action
6973  *   Handle for the shared action to be destroyed.
6974  * @param[out] error
6975  *   Perform verbose error reporting if not NULL. PMDs initialize this
6976  *   structure in case of error only.
6977  *
6978  * @return
6979  *   0 on success, a negative errno value otherwise and rte_errno is set.
6980  *
6981  * @note: wrapper for driver action_create op callback.
6982  */
6983 static int
6984 mlx5_shared_action_destroy(struct rte_eth_dev *dev,
6985                            struct rte_flow_shared_action *action,
6986                            struct rte_flow_error *error)
6987 {
6988         static const char err_msg[] = "shared action destruction unsupported";
6989         struct rte_flow_attr attr = { .transfer = 0 };
6990         const struct mlx5_flow_driver_ops *fops =
6991                         flow_get_drv_ops(flow_get_drv_type(dev, &attr));
6992
6993         if (!fops->action_destroy) {
6994                 DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
6995                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
6996                                    NULL, err_msg);
6997                 return -rte_errno;
6998         }
6999         return fops->action_destroy(dev, action, error);
7000 }
7001
7002 /* Wrapper for driver action_destroy op callback */
7003 static int
7004 flow_drv_action_update(struct rte_eth_dev *dev,
7005                        struct rte_flow_shared_action *action,
7006                        const void *action_conf,
7007                        const struct mlx5_flow_driver_ops *fops,
7008                        struct rte_flow_error *error)
7009 {
7010         static const char err_msg[] = "shared action update unsupported";
7011
7012         if (!fops->action_update) {
7013                 DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
7014                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
7015                                    NULL, err_msg);
7016                 return -rte_errno;
7017         }
7018         return fops->action_update(dev, action, action_conf, error);
7019 }
7020
7021 /* Wrapper for driver action_destroy op callback */
7022 static int
7023 flow_drv_action_query(struct rte_eth_dev *dev,
7024                       const struct rte_flow_shared_action *action,
7025                       void *data,
7026                       const struct mlx5_flow_driver_ops *fops,
7027                       struct rte_flow_error *error)
7028 {
7029         static const char err_msg[] = "shared action query unsupported";
7030
7031         if (!fops->action_query) {
7032                 DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
7033                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
7034                                    NULL, err_msg);
7035                 return -rte_errno;
7036         }
7037         return fops->action_query(dev, action, data, error);
7038 }
7039
7040 /**
7041  * Create shared action for reuse in multiple flow rules.
7042  *
7043  * @param dev
7044  *   Pointer to Ethernet device structure.
7045  * @param[in] action
7046  *   Action configuration for shared action creation.
7047  * @param[out] error
7048  *   Perform verbose error reporting if not NULL. PMDs initialize this
7049  *   structure in case of error only.
7050  * @return
7051  *   A valid handle in case of success, NULL otherwise and rte_errno is set.
7052  */
7053 static struct rte_flow_shared_action *
7054 mlx5_shared_action_create(struct rte_eth_dev *dev,
7055                           const struct rte_flow_shared_action_conf *conf,
7056                           const struct rte_flow_action *action,
7057                           struct rte_flow_error *error)
7058 {
7059         static const char err_msg[] = "shared action creation unsupported";
7060         struct rte_flow_attr attr = { .transfer = 0 };
7061         const struct mlx5_flow_driver_ops *fops =
7062                         flow_get_drv_ops(flow_get_drv_type(dev, &attr));
7063
7064         if (flow_drv_action_validate(dev, conf, action, fops, error))
7065                 return NULL;
7066         if (!fops->action_create) {
7067                 DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
7068                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
7069                                    NULL, err_msg);
7070                 return NULL;
7071         }
7072         return fops->action_create(dev, conf, action, error);
7073 }
7074
7075 /**
7076  * Updates inplace the shared action configuration pointed by *action* handle
7077  * with the configuration provided as *action* argument.
7078  * The update of the shared action configuration effects all flow rules reusing
7079  * the action via handle.
7080  *
7081  * @param dev
7082  *   Pointer to Ethernet device structure.
7083  * @param[in] shared_action
7084  *   Handle for the shared action to be updated.
7085  * @param[in] action
7086  *   Action specification used to modify the action pointed by handle.
7087  *   *action* should be of same type with the action pointed by the *action*
7088  *   handle argument, otherwise considered as invalid.
7089  * @param[out] error
7090  *   Perform verbose error reporting if not NULL. PMDs initialize this
7091  *   structure in case of error only.
7092  *
7093  * @return
7094  *   0 on success, a negative errno value otherwise and rte_errno is set.
7095  */
7096 static int
7097 mlx5_shared_action_update(struct rte_eth_dev *dev,
7098                 struct rte_flow_shared_action *shared_action,
7099                 const struct rte_flow_action *action,
7100                 struct rte_flow_error *error)
7101 {
7102         struct rte_flow_attr attr = { .transfer = 0 };
7103         const struct mlx5_flow_driver_ops *fops =
7104                         flow_get_drv_ops(flow_get_drv_type(dev, &attr));
7105         int ret;
7106
7107         ret = flow_drv_action_validate(dev, NULL, action, fops, error);
7108         if (ret)
7109                 return ret;
7110         return flow_drv_action_update(dev, shared_action, action->conf, fops,
7111                                       error);
7112 }
7113
7114 /**
7115  * Query the shared action by handle.
7116  *
7117  * This function allows retrieving action-specific data such as counters.
7118  * Data is gathered by special action which may be present/referenced in
7119  * more than one flow rule definition.
7120  *
7121  * \see RTE_FLOW_ACTION_TYPE_COUNT
7122  *
7123  * @param dev
7124  *   Pointer to Ethernet device structure.
7125  * @param[in] action
7126  *   Handle for the shared action to query.
7127  * @param[in, out] data
7128  *   Pointer to storage for the associated query data type.
7129  * @param[out] error
7130  *   Perform verbose error reporting if not NULL. PMDs initialize this
7131  *   structure in case of error only.
7132  *
7133  * @return
7134  *   0 on success, a negative errno value otherwise and rte_errno is set.
7135  */
7136 static int
7137 mlx5_shared_action_query(struct rte_eth_dev *dev,
7138                          const struct rte_flow_shared_action *action,
7139                          void *data,
7140                          struct rte_flow_error *error)
7141 {
7142         struct rte_flow_attr attr = { .transfer = 0 };
7143         const struct mlx5_flow_driver_ops *fops =
7144                         flow_get_drv_ops(flow_get_drv_type(dev, &attr));
7145
7146         return flow_drv_action_query(dev, action, data, fops, error);
7147 }
7148
7149 /**
7150  * Destroy all shared actions.
7151  *
7152  * @param dev
7153  *   Pointer to Ethernet device.
7154  *
7155  * @return
7156  *   0 on success, a negative errno value otherwise and rte_errno is set.
7157  */
7158 int
7159 mlx5_shared_action_flush(struct rte_eth_dev *dev)
7160 {
7161         struct rte_flow_error error;
7162         struct mlx5_priv *priv = dev->data->dev_private;
7163         struct mlx5_shared_action_rss *action;
7164         int ret = 0;
7165         uint32_t idx;
7166
7167         ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RSS_SHARED_ACTIONS],
7168                       priv->rss_shared_actions, idx, action, next) {
7169                 ret |= mlx5_shared_action_destroy(dev,
7170                        (struct rte_flow_shared_action *)(uintptr_t)idx, &error);
7171         }
7172         return ret;
7173 }
7174
7175 #ifndef HAVE_MLX5DV_DR
7176 #define MLX5_DOMAIN_SYNC_FLOW ((1 << 0) | (1 << 1))
7177 #else
7178 #define MLX5_DOMAIN_SYNC_FLOW \
7179         (MLX5DV_DR_DOMAIN_SYNC_FLAGS_SW | MLX5DV_DR_DOMAIN_SYNC_FLAGS_HW)
7180 #endif
7181
7182 int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains)
7183 {
7184         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
7185         const struct mlx5_flow_driver_ops *fops;
7186         int ret;
7187         struct rte_flow_attr attr = { .transfer = 0 };
7188
7189         fops = flow_get_drv_ops(flow_get_drv_type(dev, &attr));
7190         ret = fops->sync_domain(dev, domains, MLX5_DOMAIN_SYNC_FLOW);
7191         if (ret > 0)
7192                 ret = -ret;
7193         return ret;
7194 }
7195
7196 /**
7197  * tunnel offload functionalilty is defined for DV environment only
7198  */
7199 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
7200 __extension__
7201 union tunnel_offload_mark {
7202         uint32_t val;
7203         struct {
7204                 uint32_t app_reserve:8;
7205                 uint32_t table_id:15;
7206                 uint32_t transfer:1;
7207                 uint32_t _unused_:8;
7208         };
7209 };
7210
7211 static bool
7212 mlx5_access_tunnel_offload_db
7213         (struct rte_eth_dev *dev,
7214          bool (*match)(struct rte_eth_dev *,
7215                        struct mlx5_flow_tunnel *, const void *),
7216          void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *),
7217          void (*miss)(struct rte_eth_dev *, void *),
7218          void *ctx, bool lock_op);
7219
7220 static int
7221 flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
7222                              struct rte_flow *flow,
7223                              const struct rte_flow_attr *attr,
7224                              const struct rte_flow_action *app_actions,
7225                              uint32_t flow_idx,
7226                              struct tunnel_default_miss_ctx *ctx,
7227                              struct rte_flow_error *error)
7228 {
7229         struct mlx5_priv *priv = dev->data->dev_private;
7230         struct mlx5_flow *dev_flow;
7231         struct rte_flow_attr miss_attr = *attr;
7232         const struct mlx5_flow_tunnel *tunnel = app_actions[0].conf;
7233         const struct rte_flow_item miss_items[2] = {
7234                 {
7235                         .type = RTE_FLOW_ITEM_TYPE_ETH,
7236                         .spec = NULL,
7237                         .last = NULL,
7238                         .mask = NULL
7239                 },
7240                 {
7241                         .type = RTE_FLOW_ITEM_TYPE_END,
7242                         .spec = NULL,
7243                         .last = NULL,
7244                         .mask = NULL
7245                 }
7246         };
7247         union tunnel_offload_mark mark_id;
7248         struct rte_flow_action_mark miss_mark;
7249         struct rte_flow_action miss_actions[3] = {
7250                 [0] = { .type = RTE_FLOW_ACTION_TYPE_MARK, .conf = &miss_mark },
7251                 [2] = { .type = RTE_FLOW_ACTION_TYPE_END,  .conf = NULL }
7252         };
7253         const struct rte_flow_action_jump *jump_data;
7254         uint32_t i, flow_table = 0; /* prevent compilation warning */
7255         struct flow_grp_info grp_info = {
7256                 .external = 1,
7257                 .transfer = attr->transfer,
7258                 .fdb_def_rule = !!priv->fdb_def_rule,
7259                 .std_tbl_fix = 0,
7260         };
7261         int ret;
7262
7263         if (!attr->transfer) {
7264                 uint32_t q_size;
7265
7266                 miss_actions[1].type = RTE_FLOW_ACTION_TYPE_RSS;
7267                 q_size = priv->reta_idx_n * sizeof(ctx->queue[0]);
7268                 ctx->queue = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, q_size,
7269                                          0, SOCKET_ID_ANY);
7270                 if (!ctx->queue)
7271                         return rte_flow_error_set
7272                                 (error, ENOMEM,
7273                                 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
7274                                 NULL, "invalid default miss RSS");
7275                 ctx->action_rss.func = RTE_ETH_HASH_FUNCTION_DEFAULT,
7276                 ctx->action_rss.level = 0,
7277                 ctx->action_rss.types = priv->rss_conf.rss_hf,
7278                 ctx->action_rss.key_len = priv->rss_conf.rss_key_len,
7279                 ctx->action_rss.queue_num = priv->reta_idx_n,
7280                 ctx->action_rss.key = priv->rss_conf.rss_key,
7281                 ctx->action_rss.queue = ctx->queue;
7282                 if (!priv->reta_idx_n || !priv->rxqs_n)
7283                         return rte_flow_error_set
7284                                 (error, EINVAL,
7285                                 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
7286                                 NULL, "invalid port configuration");
7287                 if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
7288                         ctx->action_rss.types = 0;
7289                 for (i = 0; i != priv->reta_idx_n; ++i)
7290                         ctx->queue[i] = (*priv->reta_idx)[i];
7291         } else {
7292                 miss_actions[1].type = RTE_FLOW_ACTION_TYPE_JUMP;
7293                 ctx->miss_jump.group = MLX5_TNL_MISS_FDB_JUMP_GRP;
7294         }
7295         miss_actions[1].conf = (typeof(miss_actions[1].conf))ctx->raw;
7296         for (; app_actions->type != RTE_FLOW_ACTION_TYPE_JUMP; app_actions++);
7297         jump_data = app_actions->conf;
7298         miss_attr.priority = MLX5_TNL_MISS_RULE_PRIORITY;
7299         miss_attr.group = jump_data->group;
7300         ret = mlx5_flow_group_to_table(dev, tunnel, jump_data->group,
7301                                        &flow_table, &grp_info, error);
7302         if (ret)
7303                 return rte_flow_error_set(error, EINVAL,
7304                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
7305                                           NULL, "invalid tunnel id");
7306         mark_id.app_reserve = 0;
7307         mark_id.table_id = tunnel_flow_tbl_to_id(flow_table);
7308         mark_id.transfer = !!attr->transfer;
7309         mark_id._unused_ = 0;
7310         miss_mark.id = mark_id.val;
7311         dev_flow = flow_drv_prepare(dev, flow, &miss_attr,
7312                                     miss_items, miss_actions, flow_idx, error);
7313         if (!dev_flow)
7314                 return -rte_errno;
7315         dev_flow->flow = flow;
7316         dev_flow->external = true;
7317         dev_flow->tunnel = tunnel;
7318         /* Subflow object was created, we must include one in the list. */
7319         SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
7320                       dev_flow->handle, next);
7321         DRV_LOG(DEBUG,
7322                 "port %u tunnel type=%d id=%u miss rule priority=%u group=%u",
7323                 dev->data->port_id, tunnel->app_tunnel.type,
7324                 tunnel->tunnel_id, miss_attr.priority, miss_attr.group);
7325         ret = flow_drv_translate(dev, dev_flow, &miss_attr, miss_items,
7326                                   miss_actions, error);
7327         if (!ret)
7328                 ret = flow_mreg_update_copy_table(dev, flow, miss_actions,
7329                                                   error);
7330
7331         return ret;
7332 }
7333
7334 static const struct mlx5_flow_tbl_data_entry  *
7335 tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark)
7336 {
7337         struct mlx5_priv *priv = dev->data->dev_private;
7338         struct mlx5_dev_ctx_shared *sh = priv->sh;
7339         struct mlx5_hlist_entry *he;
7340         union tunnel_offload_mark mbits = { .val = mark };
7341         union mlx5_flow_tbl_key table_key = {
7342                 {
7343                         .table_id = tunnel_id_to_flow_tbl(mbits.table_id),
7344                         .dummy = 0,
7345                         .domain = !!mbits.transfer,
7346                         .direction = 0,
7347                 }
7348         };
7349         he = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, NULL);
7350         return he ?
7351                container_of(he, struct mlx5_flow_tbl_data_entry, entry) : NULL;
7352 }
7353
7354 static void
7355 mlx5_flow_tunnel_grp2tbl_remove_cb(struct mlx5_hlist *list,
7356                                    struct mlx5_hlist_entry *entry)
7357 {
7358         struct mlx5_dev_ctx_shared *sh = list->ctx;
7359         struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash);
7360
7361         mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
7362                         tunnel_flow_tbl_to_id(tte->flow_table));
7363         mlx5_free(tte);
7364 }
7365
7366 static int
7367 mlx5_flow_tunnel_grp2tbl_match_cb(struct mlx5_hlist *list __rte_unused,
7368                                   struct mlx5_hlist_entry *entry,
7369                                   uint64_t key, void *cb_ctx __rte_unused)
7370 {
7371         union tunnel_tbl_key tbl = {
7372                 .val = key,
7373         };
7374         struct tunnel_tbl_entry *tte = container_of(entry, typeof(*tte), hash);
7375
7376         return tbl.tunnel_id != tte->tunnel_id || tbl.group != tte->group;
7377 }
7378
7379 static struct mlx5_hlist_entry *
7380 mlx5_flow_tunnel_grp2tbl_create_cb(struct mlx5_hlist *list, uint64_t key,
7381                                    void *ctx __rte_unused)
7382 {
7383         struct mlx5_dev_ctx_shared *sh = list->ctx;
7384         struct tunnel_tbl_entry *tte;
7385         union tunnel_tbl_key tbl = {
7386                 .val = key,
7387         };
7388
7389         tte = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO,
7390                           sizeof(*tte), 0,
7391                           SOCKET_ID_ANY);
7392         if (!tte)
7393                 goto err;
7394         mlx5_ipool_malloc(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
7395                           &tte->flow_table);
7396         if (tte->flow_table >= MLX5_MAX_TABLES) {
7397                 DRV_LOG(ERR, "Tunnel TBL ID %d exceed max limit.",
7398                         tte->flow_table);
7399                 mlx5_ipool_free(sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
7400                                 tte->flow_table);
7401                 goto err;
7402         } else if (!tte->flow_table) {
7403                 goto err;
7404         }
7405         tte->flow_table = tunnel_id_to_flow_tbl(tte->flow_table);
7406         tte->tunnel_id = tbl.tunnel_id;
7407         tte->group = tbl.group;
7408         return &tte->hash;
7409 err:
7410         if (tte)
7411                 mlx5_free(tte);
7412         return NULL;
7413 }
7414
7415 static uint32_t
7416 tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev,
7417                                 const struct mlx5_flow_tunnel *tunnel,
7418                                 uint32_t group, uint32_t *table,
7419                                 struct rte_flow_error *error)
7420 {
7421         struct mlx5_hlist_entry *he;
7422         struct tunnel_tbl_entry *tte;
7423         union tunnel_tbl_key key = {
7424                 .tunnel_id = tunnel ? tunnel->tunnel_id : 0,
7425                 .group = group
7426         };
7427         struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
7428         struct mlx5_hlist *group_hash;
7429
7430         group_hash = tunnel ? tunnel->groups : thub->groups;
7431         he = mlx5_hlist_register(group_hash, key.val, NULL);
7432         if (!he)
7433                 return rte_flow_error_set(error, EINVAL,
7434                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
7435                                           NULL,
7436                                           "tunnel group index not supported");
7437         tte = container_of(he, typeof(*tte), hash);
7438         *table = tte->flow_table;
7439         DRV_LOG(DEBUG, "port %u tunnel %u group=%#x table=%#x",
7440                 dev->data->port_id, key.tunnel_id, group, *table);
7441         return 0;
7442 }
7443
7444 static void
7445 mlx5_flow_tunnel_free(struct rte_eth_dev *dev,
7446                       struct mlx5_flow_tunnel *tunnel)
7447 {
7448         struct mlx5_priv *priv = dev->data->dev_private;
7449         struct mlx5_indexed_pool *ipool;
7450
7451         DRV_LOG(DEBUG, "port %u release pmd tunnel id=0x%x",
7452                 dev->data->port_id, tunnel->tunnel_id);
7453         LIST_REMOVE(tunnel, chain);
7454         mlx5_hlist_destroy(tunnel->groups);
7455         ipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID];
7456         mlx5_ipool_free(ipool, tunnel->tunnel_id);
7457 }
7458
7459 static bool
7460 mlx5_access_tunnel_offload_db
7461         (struct rte_eth_dev *dev,
7462          bool (*match)(struct rte_eth_dev *,
7463                        struct mlx5_flow_tunnel *, const void *),
7464          void (*hit)(struct rte_eth_dev *, struct mlx5_flow_tunnel *, void *),
7465          void (*miss)(struct rte_eth_dev *, void *),
7466          void *ctx, bool lock_op)
7467 {
7468         bool verdict = false;
7469         struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
7470         struct mlx5_flow_tunnel *tunnel;
7471
7472         rte_spinlock_lock(&thub->sl);
7473         LIST_FOREACH(tunnel, &thub->tunnels, chain) {
7474                 verdict = match(dev, tunnel, (const void *)ctx);
7475                 if (verdict)
7476                         break;
7477         }
7478         if (!lock_op)
7479                 rte_spinlock_unlock(&thub->sl);
7480         if (verdict && hit)
7481                 hit(dev, tunnel, ctx);
7482         if (!verdict && miss)
7483                 miss(dev, ctx);
7484         if (lock_op)
7485                 rte_spinlock_unlock(&thub->sl);
7486
7487         return verdict;
7488 }
7489
7490 struct tunnel_db_find_tunnel_id_ctx {
7491         uint32_t tunnel_id;
7492         struct mlx5_flow_tunnel *tunnel;
7493 };
7494
7495 static bool
7496 find_tunnel_id_match(struct rte_eth_dev *dev,
7497                      struct mlx5_flow_tunnel *tunnel, const void *x)
7498 {
7499         const struct tunnel_db_find_tunnel_id_ctx *ctx = x;
7500
7501         RTE_SET_USED(dev);
7502         return tunnel->tunnel_id == ctx->tunnel_id;
7503 }
7504
7505 static void
7506 find_tunnel_id_hit(struct rte_eth_dev *dev,
7507                    struct mlx5_flow_tunnel *tunnel, void *x)
7508 {
7509         struct tunnel_db_find_tunnel_id_ctx *ctx = x;
7510         RTE_SET_USED(dev);
7511         ctx->tunnel = tunnel;
7512 }
7513
7514 static struct mlx5_flow_tunnel *
7515 mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id)
7516 {
7517         struct tunnel_db_find_tunnel_id_ctx ctx = {
7518                 .tunnel_id = id,
7519         };
7520
7521         mlx5_access_tunnel_offload_db(dev, find_tunnel_id_match,
7522                                       find_tunnel_id_hit, NULL, &ctx, true);
7523
7524         return ctx.tunnel;
7525 }
7526
7527 static struct mlx5_flow_tunnel *
7528 mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev,
7529                           const struct rte_flow_tunnel *app_tunnel)
7530 {
7531         struct mlx5_priv *priv = dev->data->dev_private;
7532         struct mlx5_indexed_pool *ipool;
7533         struct mlx5_flow_tunnel *tunnel;
7534         uint32_t id;
7535
7536         ipool = priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID];
7537         tunnel = mlx5_ipool_zmalloc(ipool, &id);
7538         if (!tunnel)
7539                 return NULL;
7540         if (id >= MLX5_MAX_TUNNELS) {
7541                 mlx5_ipool_free(ipool, id);
7542                 DRV_LOG(ERR, "Tunnel ID %d exceed max limit.", id);
7543                 return NULL;
7544         }
7545         tunnel->groups = mlx5_hlist_create("tunnel groups", 1024, 0, 0,
7546                                            mlx5_flow_tunnel_grp2tbl_create_cb,
7547                                            mlx5_flow_tunnel_grp2tbl_match_cb,
7548                                            mlx5_flow_tunnel_grp2tbl_remove_cb);
7549         if (!tunnel->groups) {
7550                 mlx5_ipool_free(ipool, id);
7551                 return NULL;
7552         }
7553         tunnel->groups->ctx = priv->sh;
7554         /* initiate new PMD tunnel */
7555         memcpy(&tunnel->app_tunnel, app_tunnel, sizeof(*app_tunnel));
7556         tunnel->tunnel_id = id;
7557         tunnel->action.type = (typeof(tunnel->action.type))
7558                               MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET;
7559         tunnel->action.conf = tunnel;
7560         tunnel->item.type = (typeof(tunnel->item.type))
7561                             MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL;
7562         tunnel->item.spec = tunnel;
7563         tunnel->item.last = NULL;
7564         tunnel->item.mask = NULL;
7565
7566         DRV_LOG(DEBUG, "port %u new pmd tunnel id=0x%x",
7567                 dev->data->port_id, tunnel->tunnel_id);
7568
7569         return tunnel;
7570 }
7571
7572 struct tunnel_db_get_tunnel_ctx {
7573         const struct rte_flow_tunnel *app_tunnel;
7574         struct mlx5_flow_tunnel *tunnel;
7575 };
7576
7577 static bool get_tunnel_match(struct rte_eth_dev *dev,
7578                              struct mlx5_flow_tunnel *tunnel, const void *x)
7579 {
7580         const struct tunnel_db_get_tunnel_ctx *ctx = x;
7581
7582         RTE_SET_USED(dev);
7583         return !memcmp(ctx->app_tunnel, &tunnel->app_tunnel,
7584                        sizeof(*ctx->app_tunnel));
7585 }
7586
7587 static void get_tunnel_hit(struct rte_eth_dev *dev,
7588                            struct mlx5_flow_tunnel *tunnel, void *x)
7589 {
7590         /* called under tunnel spinlock protection */
7591         struct tunnel_db_get_tunnel_ctx *ctx = x;
7592
7593         RTE_SET_USED(dev);
7594         tunnel->refctn++;
7595         ctx->tunnel = tunnel;
7596 }
7597
7598 static void get_tunnel_miss(struct rte_eth_dev *dev, void *x)
7599 {
7600         /* called under tunnel spinlock protection */
7601         struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
7602         struct tunnel_db_get_tunnel_ctx *ctx = x;
7603
7604         rte_spinlock_unlock(&thub->sl);
7605         ctx->tunnel = mlx5_flow_tunnel_allocate(dev, ctx->app_tunnel);
7606         ctx->tunnel->refctn = 1;
7607         rte_spinlock_lock(&thub->sl);
7608         if (ctx->tunnel)
7609                 LIST_INSERT_HEAD(&thub->tunnels, ctx->tunnel, chain);
7610 }
7611
7612
7613 static int
7614 mlx5_get_flow_tunnel(struct rte_eth_dev *dev,
7615                      const struct rte_flow_tunnel *app_tunnel,
7616                      struct mlx5_flow_tunnel **tunnel)
7617 {
7618         struct tunnel_db_get_tunnel_ctx ctx = {
7619                 .app_tunnel = app_tunnel,
7620         };
7621
7622         mlx5_access_tunnel_offload_db(dev, get_tunnel_match, get_tunnel_hit,
7623                                       get_tunnel_miss, &ctx, true);
7624         *tunnel = ctx.tunnel;
7625         return ctx.tunnel ? 0 : -ENOMEM;
7626 }
7627
7628 void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id)
7629 {
7630         struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
7631
7632         if (!thub)
7633                 return;
7634         if (!LIST_EMPTY(&thub->tunnels))
7635                 DRV_LOG(WARNING, "port %u tunnels present\n", port_id);
7636         mlx5_hlist_destroy(thub->groups);
7637         mlx5_free(thub);
7638 }
7639
7640 int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh)
7641 {
7642         int err;
7643         struct mlx5_flow_tunnel_hub *thub;
7644
7645         thub = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, sizeof(*thub),
7646                            0, SOCKET_ID_ANY);
7647         if (!thub)
7648                 return -ENOMEM;
7649         LIST_INIT(&thub->tunnels);
7650         rte_spinlock_init(&thub->sl);
7651         thub->groups = mlx5_hlist_create("flow groups", MLX5_MAX_TABLES, 0,
7652                                          0, mlx5_flow_tunnel_grp2tbl_create_cb,
7653                                          mlx5_flow_tunnel_grp2tbl_match_cb,
7654                                          mlx5_flow_tunnel_grp2tbl_remove_cb);
7655         if (!thub->groups) {
7656                 err = -rte_errno;
7657                 goto err;
7658         }
7659         thub->groups->ctx = sh;
7660         sh->tunnel_hub = thub;
7661
7662         return 0;
7663
7664 err:
7665         if (thub->groups)
7666                 mlx5_hlist_destroy(thub->groups);
7667         if (thub)
7668                 mlx5_free(thub);
7669         return err;
7670 }
7671
7672 static inline bool
7673 mlx5_flow_tunnel_validate(struct rte_eth_dev *dev,
7674                           struct rte_flow_tunnel *tunnel,
7675                           const char *err_msg)
7676 {
7677         err_msg = NULL;
7678         if (!is_tunnel_offload_active(dev)) {
7679                 err_msg = "tunnel offload was not activated";
7680                 goto out;
7681         } else if (!tunnel) {
7682                 err_msg = "no application tunnel";
7683                 goto out;
7684         }
7685
7686         switch (tunnel->type) {
7687         default:
7688                 err_msg = "unsupported tunnel type";
7689                 goto out;
7690         case RTE_FLOW_ITEM_TYPE_VXLAN:
7691                 break;
7692         }
7693
7694 out:
7695         return !err_msg;
7696 }
7697
7698 static int
7699 mlx5_flow_tunnel_decap_set(struct rte_eth_dev *dev,
7700                     struct rte_flow_tunnel *app_tunnel,
7701                     struct rte_flow_action **actions,
7702                     uint32_t *num_of_actions,
7703                     struct rte_flow_error *error)
7704 {
7705         int ret;
7706         struct mlx5_flow_tunnel *tunnel;
7707         const char *err_msg = NULL;
7708         bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);
7709
7710         if (!verdict)
7711                 return rte_flow_error_set(error, EINVAL,
7712                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
7713                                           err_msg);
7714         ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
7715         if (ret < 0) {
7716                 return rte_flow_error_set(error, ret,
7717                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
7718                                           "failed to initialize pmd tunnel");
7719         }
7720         *actions = &tunnel->action;
7721         *num_of_actions = 1;
7722         return 0;
7723 }
7724
7725 static int
7726 mlx5_flow_tunnel_match(struct rte_eth_dev *dev,
7727                        struct rte_flow_tunnel *app_tunnel,
7728                        struct rte_flow_item **items,
7729                        uint32_t *num_of_items,
7730                        struct rte_flow_error *error)
7731 {
7732         int ret;
7733         struct mlx5_flow_tunnel *tunnel;
7734         const char *err_msg = NULL;
7735         bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);
7736
7737         if (!verdict)
7738                 return rte_flow_error_set(error, EINVAL,
7739                                           RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
7740                                           err_msg);
7741         ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
7742         if (ret < 0) {
7743                 return rte_flow_error_set(error, ret,
7744                                           RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
7745                                           "failed to initialize pmd tunnel");
7746         }
7747         *items = &tunnel->item;
7748         *num_of_items = 1;
7749         return 0;
7750 }
7751
7752 struct tunnel_db_element_release_ctx {
7753         struct rte_flow_item *items;
7754         struct rte_flow_action *actions;
7755         uint32_t num_elements;
7756         struct rte_flow_error *error;
7757         int ret;
7758 };
7759
7760 static bool
7761 tunnel_element_release_match(struct rte_eth_dev *dev,
7762                              struct mlx5_flow_tunnel *tunnel, const void *x)
7763 {
7764         const struct tunnel_db_element_release_ctx *ctx = x;
7765
7766         RTE_SET_USED(dev);
7767         if (ctx->num_elements != 1)
7768                 return false;
7769         else if (ctx->items)
7770                 return ctx->items == &tunnel->item;
7771         else if (ctx->actions)
7772                 return ctx->actions == &tunnel->action;
7773
7774         return false;
7775 }
7776
7777 static void
7778 tunnel_element_release_hit(struct rte_eth_dev *dev,
7779                            struct mlx5_flow_tunnel *tunnel, void *x)
7780 {
7781         struct tunnel_db_element_release_ctx *ctx = x;
7782         ctx->ret = 0;
7783         if (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED))
7784                 mlx5_flow_tunnel_free(dev, tunnel);
7785 }
7786
7787 static void
7788 tunnel_element_release_miss(struct rte_eth_dev *dev, void *x)
7789 {
7790         struct tunnel_db_element_release_ctx *ctx = x;
7791         RTE_SET_USED(dev);
7792         ctx->ret = rte_flow_error_set(ctx->error, EINVAL,
7793                                       RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
7794                                       "invalid argument");
7795 }
7796
7797 static int
7798 mlx5_flow_tunnel_item_release(struct rte_eth_dev *dev,
7799                        struct rte_flow_item *pmd_items,
7800                        uint32_t num_items, struct rte_flow_error *err)
7801 {
7802         struct tunnel_db_element_release_ctx ctx = {
7803                 .items = pmd_items,
7804                 .actions = NULL,
7805                 .num_elements = num_items,
7806                 .error = err,
7807         };
7808
7809         mlx5_access_tunnel_offload_db(dev, tunnel_element_release_match,
7810                                       tunnel_element_release_hit,
7811                                       tunnel_element_release_miss, &ctx, false);
7812
7813         return ctx.ret;
7814 }
7815
7816 static int
7817 mlx5_flow_tunnel_action_release(struct rte_eth_dev *dev,
7818                          struct rte_flow_action *pmd_actions,
7819                          uint32_t num_actions, struct rte_flow_error *err)
7820 {
7821         struct tunnel_db_element_release_ctx ctx = {
7822                 .items = NULL,
7823                 .actions = pmd_actions,
7824                 .num_elements = num_actions,
7825                 .error = err,
7826         };
7827
7828         mlx5_access_tunnel_offload_db(dev, tunnel_element_release_match,
7829                                       tunnel_element_release_hit,
7830                                       tunnel_element_release_miss, &ctx, false);
7831
7832         return ctx.ret;
7833 }
7834
7835 static int
7836 mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev,
7837                                   struct rte_mbuf *m,
7838                                   struct rte_flow_restore_info *info,
7839                                   struct rte_flow_error *err)
7840 {
7841         uint64_t ol_flags = m->ol_flags;
7842         const struct mlx5_flow_tbl_data_entry *tble;
7843         const uint64_t mask = PKT_RX_FDIR | PKT_RX_FDIR_ID;
7844
7845         if (!is_tunnel_offload_active(dev)) {
7846                 info->flags = 0;
7847                 return 0;
7848         }
7849
7850         if ((ol_flags & mask) != mask)
7851                 goto err;
7852         tble = tunnel_mark_decode(dev, m->hash.fdir.hi);
7853         if (!tble) {
7854                 DRV_LOG(DEBUG, "port %u invalid miss tunnel mark %#x",
7855                         dev->data->port_id, m->hash.fdir.hi);
7856                 goto err;
7857         }
7858         MLX5_ASSERT(tble->tunnel);
7859         memcpy(&info->tunnel, &tble->tunnel->app_tunnel, sizeof(info->tunnel));
7860         info->group_id = tble->group_id;
7861         info->flags = RTE_FLOW_RESTORE_INFO_TUNNEL |
7862                       RTE_FLOW_RESTORE_INFO_GROUP_ID |
7863                       RTE_FLOW_RESTORE_INFO_ENCAPSULATED;
7864
7865         return 0;
7866
7867 err:
7868         return rte_flow_error_set(err, EINVAL,
7869                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7870                                   "failed to get restore info");
7871 }
7872
7873 #else /* HAVE_IBV_FLOW_DV_SUPPORT */
7874 static int
7875 mlx5_flow_tunnel_decap_set(__rte_unused struct rte_eth_dev *dev,
7876                            __rte_unused struct rte_flow_tunnel *app_tunnel,
7877                            __rte_unused struct rte_flow_action **actions,
7878                            __rte_unused uint32_t *num_of_actions,
7879                            __rte_unused struct rte_flow_error *error)
7880 {
7881         return -ENOTSUP;
7882 }
7883
7884 static int
7885 mlx5_flow_tunnel_match(__rte_unused struct rte_eth_dev *dev,
7886                        __rte_unused struct rte_flow_tunnel *app_tunnel,
7887                        __rte_unused struct rte_flow_item **items,
7888                        __rte_unused uint32_t *num_of_items,
7889                        __rte_unused struct rte_flow_error *error)
7890 {
7891         return -ENOTSUP;
7892 }
7893
7894 static int
7895 mlx5_flow_tunnel_item_release(__rte_unused struct rte_eth_dev *dev,
7896                               __rte_unused struct rte_flow_item *pmd_items,
7897                               __rte_unused uint32_t num_items,
7898                               __rte_unused struct rte_flow_error *err)
7899 {
7900         return -ENOTSUP;
7901 }
7902
7903 static int
7904 mlx5_flow_tunnel_action_release(__rte_unused struct rte_eth_dev *dev,
7905                                 __rte_unused struct rte_flow_action *pmd_action,
7906                                 __rte_unused uint32_t num_actions,
7907                                 __rte_unused struct rte_flow_error *err)
7908 {
7909         return -ENOTSUP;
7910 }
7911
7912 static int
7913 mlx5_flow_tunnel_get_restore_info(__rte_unused struct rte_eth_dev *dev,
7914                                   __rte_unused struct rte_mbuf *m,
7915                                   __rte_unused struct rte_flow_restore_info *i,
7916                                   __rte_unused struct rte_flow_error *err)
7917 {
7918         return -ENOTSUP;
7919 }
7920
7921 static int
7922 flow_tunnel_add_default_miss(__rte_unused struct rte_eth_dev *dev,
7923                              __rte_unused struct rte_flow *flow,
7924                              __rte_unused const struct rte_flow_attr *attr,
7925                              __rte_unused const struct rte_flow_action *actions,
7926                              __rte_unused uint32_t flow_idx,
7927                              __rte_unused struct tunnel_default_miss_ctx *ctx,
7928                              __rte_unused struct rte_flow_error *error)
7929 {
7930         return -ENOTSUP;
7931 }
7932
7933 static struct mlx5_flow_tunnel *
7934 mlx5_find_tunnel_id(__rte_unused struct rte_eth_dev *dev,
7935                     __rte_unused uint32_t id)
7936 {
7937         return NULL;
7938 }
7939
7940 static void
7941 mlx5_flow_tunnel_free(__rte_unused struct rte_eth_dev *dev,
7942                       __rte_unused struct mlx5_flow_tunnel *tunnel)
7943 {
7944 }
7945
7946 static uint32_t
7947 tunnel_flow_group_to_flow_table(__rte_unused struct rte_eth_dev *dev,
7948                                 __rte_unused const struct mlx5_flow_tunnel *t,
7949                                 __rte_unused uint32_t group,
7950                                 __rte_unused uint32_t *table,
7951                                 struct rte_flow_error *error)
7952 {
7953         return rte_flow_error_set(error, ENOTSUP,
7954                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
7955                                   "tunnel offload requires DV support");
7956 }
7957
7958 void
7959 mlx5_release_tunnel_hub(__rte_unused struct mlx5_dev_ctx_shared *sh,
7960                         __rte_unused  uint16_t port_id)
7961 {
7962 }
7963 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */
7964