net/mlx5: remove unused mreg copy
[dpdk.git] / drivers / net / mlx5 / mlx5_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  * Copyright 2016 Mellanox Technologies, Ltd
4  */
5
6 #include <netinet/in.h>
7 #include <sys/queue.h>
8 #include <stdalign.h>
9 #include <stdint.h>
10 #include <string.h>
11 #include <stdbool.h>
12
13 #include <rte_common.h>
14 #include <rte_ether.h>
15 #include <rte_ethdev_driver.h>
16 #include <rte_eal_paging.h>
17 #include <rte_flow.h>
18 #include <rte_cycles.h>
19 #include <rte_flow_driver.h>
20 #include <rte_malloc.h>
21 #include <rte_ip.h>
22
23 #include <mlx5_glue.h>
24 #include <mlx5_devx_cmds.h>
25 #include <mlx5_prm.h>
26 #include <mlx5_malloc.h>
27
28 #include "mlx5_defs.h"
29 #include "mlx5.h"
30 #include "mlx5_flow.h"
31 #include "mlx5_flow_os.h"
32 #include "mlx5_rxtx.h"
33 #include "mlx5_common_os.h"
34 #include "rte_pmd_mlx5.h"
35
36 static struct mlx5_flow_tunnel *
37 mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id);
38 static void
39 mlx5_flow_tunnel_free(struct rte_eth_dev *dev, struct mlx5_flow_tunnel *tunnel);
40 static const struct mlx5_flow_tbl_data_entry  *
41 tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark);
42 static int
43 mlx5_get_flow_tunnel(struct rte_eth_dev *dev,
44                      const struct rte_flow_tunnel *app_tunnel,
45                      struct mlx5_flow_tunnel **tunnel);
46
47
48 /** Device flow drivers. */
49 extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops;
50
51 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops;
52
53 const struct mlx5_flow_driver_ops *flow_drv_ops[] = {
54         [MLX5_FLOW_TYPE_MIN] = &mlx5_flow_null_drv_ops,
55 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
56         [MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops,
57 #endif
58         [MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops,
59         [MLX5_FLOW_TYPE_MAX] = &mlx5_flow_null_drv_ops
60 };
61
62 /** Helper macro to build input graph for mlx5_flow_expand_rss(). */
63 #define MLX5_FLOW_EXPAND_RSS_NEXT(...) \
64         (const int []){ \
65                 __VA_ARGS__, 0, \
66         }
67
68 /** Node object of input graph for mlx5_flow_expand_rss(). */
69 struct mlx5_flow_expand_node {
70         const int *const next;
71         /**<
72          * List of next node indexes. Index 0 is interpreted as a terminator.
73          */
74         const enum rte_flow_item_type type;
75         /**< Pattern item type of current node. */
76         uint64_t rss_types;
77         /**<
78          * RSS types bit-field associated with this node
79          * (see ETH_RSS_* definitions).
80          */
81 };
82
83 /** Object returned by mlx5_flow_expand_rss(). */
84 struct mlx5_flow_expand_rss {
85         uint32_t entries;
86         /**< Number of entries @p patterns and @p priorities. */
87         struct {
88                 struct rte_flow_item *pattern; /**< Expanded pattern array. */
89                 uint32_t priority; /**< Priority offset for each expansion. */
90         } entry[];
91 };
92
93 static enum rte_flow_item_type
94 mlx5_flow_expand_rss_item_complete(const struct rte_flow_item *item)
95 {
96         enum rte_flow_item_type ret = RTE_FLOW_ITEM_TYPE_VOID;
97         uint16_t ether_type = 0;
98         uint16_t ether_type_m;
99         uint8_t ip_next_proto = 0;
100         uint8_t ip_next_proto_m;
101
102         if (item == NULL || item->spec == NULL)
103                 return ret;
104         switch (item->type) {
105         case RTE_FLOW_ITEM_TYPE_ETH:
106                 if (item->mask)
107                         ether_type_m = ((const struct rte_flow_item_eth *)
108                                                 (item->mask))->type;
109                 else
110                         ether_type_m = rte_flow_item_eth_mask.type;
111                 if (ether_type_m != RTE_BE16(0xFFFF))
112                         break;
113                 ether_type = ((const struct rte_flow_item_eth *)
114                                 (item->spec))->type;
115                 if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4)
116                         ret = RTE_FLOW_ITEM_TYPE_IPV4;
117                 else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6)
118                         ret = RTE_FLOW_ITEM_TYPE_IPV6;
119                 else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN)
120                         ret = RTE_FLOW_ITEM_TYPE_VLAN;
121                 else
122                         ret = RTE_FLOW_ITEM_TYPE_END;
123                 break;
124         case RTE_FLOW_ITEM_TYPE_VLAN:
125                 if (item->mask)
126                         ether_type_m = ((const struct rte_flow_item_vlan *)
127                                                 (item->mask))->inner_type;
128                 else
129                         ether_type_m = rte_flow_item_vlan_mask.inner_type;
130                 if (ether_type_m != RTE_BE16(0xFFFF))
131                         break;
132                 ether_type = ((const struct rte_flow_item_vlan *)
133                                 (item->spec))->inner_type;
134                 if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV4)
135                         ret = RTE_FLOW_ITEM_TYPE_IPV4;
136                 else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_IPV6)
137                         ret = RTE_FLOW_ITEM_TYPE_IPV6;
138                 else if (rte_be_to_cpu_16(ether_type) == RTE_ETHER_TYPE_VLAN)
139                         ret = RTE_FLOW_ITEM_TYPE_VLAN;
140                 else
141                         ret = RTE_FLOW_ITEM_TYPE_END;
142                 break;
143         case RTE_FLOW_ITEM_TYPE_IPV4:
144                 if (item->mask)
145                         ip_next_proto_m = ((const struct rte_flow_item_ipv4 *)
146                                         (item->mask))->hdr.next_proto_id;
147                 else
148                         ip_next_proto_m =
149                                 rte_flow_item_ipv4_mask.hdr.next_proto_id;
150                 if (ip_next_proto_m != 0xFF)
151                         break;
152                 ip_next_proto = ((const struct rte_flow_item_ipv4 *)
153                                 (item->spec))->hdr.next_proto_id;
154                 if (ip_next_proto == IPPROTO_UDP)
155                         ret = RTE_FLOW_ITEM_TYPE_UDP;
156                 else if (ip_next_proto == IPPROTO_TCP)
157                         ret = RTE_FLOW_ITEM_TYPE_TCP;
158                 else if (ip_next_proto == IPPROTO_IP)
159                         ret = RTE_FLOW_ITEM_TYPE_IPV4;
160                 else if (ip_next_proto == IPPROTO_IPV6)
161                         ret = RTE_FLOW_ITEM_TYPE_IPV6;
162                 else
163                         ret = RTE_FLOW_ITEM_TYPE_END;
164                 break;
165         case RTE_FLOW_ITEM_TYPE_IPV6:
166                 if (item->mask)
167                         ip_next_proto_m = ((const struct rte_flow_item_ipv6 *)
168                                                 (item->mask))->hdr.proto;
169                 else
170                         ip_next_proto_m =
171                                 rte_flow_item_ipv6_mask.hdr.proto;
172                 if (ip_next_proto_m != 0xFF)
173                         break;
174                 ip_next_proto = ((const struct rte_flow_item_ipv6 *)
175                                 (item->spec))->hdr.proto;
176                 if (ip_next_proto == IPPROTO_UDP)
177                         ret = RTE_FLOW_ITEM_TYPE_UDP;
178                 else if (ip_next_proto == IPPROTO_TCP)
179                         ret = RTE_FLOW_ITEM_TYPE_TCP;
180                 else if (ip_next_proto == IPPROTO_IP)
181                         ret = RTE_FLOW_ITEM_TYPE_IPV4;
182                 else if (ip_next_proto == IPPROTO_IPV6)
183                         ret = RTE_FLOW_ITEM_TYPE_IPV6;
184                 else
185                         ret = RTE_FLOW_ITEM_TYPE_END;
186                 break;
187         default:
188                 ret = RTE_FLOW_ITEM_TYPE_VOID;
189                 break;
190         }
191         return ret;
192 }
193
194 /**
195  * Expand RSS flows into several possible flows according to the RSS hash
196  * fields requested and the driver capabilities.
197  *
198  * @param[out] buf
199  *   Buffer to store the result expansion.
200  * @param[in] size
201  *   Buffer size in bytes. If 0, @p buf can be NULL.
202  * @param[in] pattern
203  *   User flow pattern.
204  * @param[in] types
205  *   RSS types to expand (see ETH_RSS_* definitions).
206  * @param[in] graph
207  *   Input graph to expand @p pattern according to @p types.
208  * @param[in] graph_root_index
209  *   Index of root node in @p graph, typically 0.
210  *
211  * @return
212  *   A positive value representing the size of @p buf in bytes regardless of
213  *   @p size on success, a negative errno value otherwise and rte_errno is
214  *   set, the following errors are defined:
215  *
216  *   -E2BIG: graph-depth @p graph is too deep.
217  */
218 static int
219 mlx5_flow_expand_rss(struct mlx5_flow_expand_rss *buf, size_t size,
220                      const struct rte_flow_item *pattern, uint64_t types,
221                      const struct mlx5_flow_expand_node graph[],
222                      int graph_root_index)
223 {
224         const int elt_n = 8;
225         const struct rte_flow_item *item;
226         const struct mlx5_flow_expand_node *node = &graph[graph_root_index];
227         const int *next_node;
228         const int *stack[elt_n];
229         int stack_pos = 0;
230         struct rte_flow_item flow_items[elt_n];
231         unsigned int i;
232         size_t lsize;
233         size_t user_pattern_size = 0;
234         void *addr = NULL;
235         const struct mlx5_flow_expand_node *next = NULL;
236         struct rte_flow_item missed_item;
237         int missed = 0;
238         int elt = 0;
239         const struct rte_flow_item *last_item = NULL;
240
241         memset(&missed_item, 0, sizeof(missed_item));
242         lsize = offsetof(struct mlx5_flow_expand_rss, entry) +
243                 elt_n * sizeof(buf->entry[0]);
244         if (lsize <= size) {
245                 buf->entry[0].priority = 0;
246                 buf->entry[0].pattern = (void *)&buf->entry[elt_n];
247                 buf->entries = 0;
248                 addr = buf->entry[0].pattern;
249         }
250         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
251                 if (item->type != RTE_FLOW_ITEM_TYPE_VOID)
252                         last_item = item;
253                 for (i = 0; node->next && node->next[i]; ++i) {
254                         next = &graph[node->next[i]];
255                         if (next->type == item->type)
256                                 break;
257                 }
258                 if (next)
259                         node = next;
260                 user_pattern_size += sizeof(*item);
261         }
262         user_pattern_size += sizeof(*item); /* Handle END item. */
263         lsize += user_pattern_size;
264         /* Copy the user pattern in the first entry of the buffer. */
265         if (lsize <= size) {
266                 rte_memcpy(addr, pattern, user_pattern_size);
267                 addr = (void *)(((uintptr_t)addr) + user_pattern_size);
268                 buf->entries = 1;
269         }
270         /* Start expanding. */
271         memset(flow_items, 0, sizeof(flow_items));
272         user_pattern_size -= sizeof(*item);
273         /*
274          * Check if the last valid item has spec set, need complete pattern,
275          * and the pattern can be used for expansion.
276          */
277         missed_item.type = mlx5_flow_expand_rss_item_complete(last_item);
278         if (missed_item.type == RTE_FLOW_ITEM_TYPE_END) {
279                 /* Item type END indicates expansion is not required. */
280                 return lsize;
281         }
282         if (missed_item.type != RTE_FLOW_ITEM_TYPE_VOID) {
283                 next = NULL;
284                 missed = 1;
285                 for (i = 0; node->next && node->next[i]; ++i) {
286                         next = &graph[node->next[i]];
287                         if (next->type == missed_item.type) {
288                                 flow_items[0].type = missed_item.type;
289                                 flow_items[1].type = RTE_FLOW_ITEM_TYPE_END;
290                                 break;
291                         }
292                         next = NULL;
293                 }
294         }
295         if (next && missed) {
296                 elt = 2; /* missed item + item end. */
297                 node = next;
298                 lsize += elt * sizeof(*item) + user_pattern_size;
299                 if ((node->rss_types & types) && lsize <= size) {
300                         buf->entry[buf->entries].priority = 1;
301                         buf->entry[buf->entries].pattern = addr;
302                         buf->entries++;
303                         rte_memcpy(addr, buf->entry[0].pattern,
304                                    user_pattern_size);
305                         addr = (void *)(((uintptr_t)addr) + user_pattern_size);
306                         rte_memcpy(addr, flow_items, elt * sizeof(*item));
307                         addr = (void *)(((uintptr_t)addr) +
308                                         elt * sizeof(*item));
309                 }
310         }
311         memset(flow_items, 0, sizeof(flow_items));
312         next_node = node->next;
313         stack[stack_pos] = next_node;
314         node = next_node ? &graph[*next_node] : NULL;
315         while (node) {
316                 flow_items[stack_pos].type = node->type;
317                 if (node->rss_types & types) {
318                         /*
319                          * compute the number of items to copy from the
320                          * expansion and copy it.
321                          * When the stack_pos is 0, there are 1 element in it,
322                          * plus the addition END item.
323                          */
324                         elt = stack_pos + 2;
325                         flow_items[stack_pos + 1].type = RTE_FLOW_ITEM_TYPE_END;
326                         lsize += elt * sizeof(*item) + user_pattern_size;
327                         if (lsize <= size) {
328                                 size_t n = elt * sizeof(*item);
329
330                                 buf->entry[buf->entries].priority =
331                                         stack_pos + 1 + missed;
332                                 buf->entry[buf->entries].pattern = addr;
333                                 buf->entries++;
334                                 rte_memcpy(addr, buf->entry[0].pattern,
335                                            user_pattern_size);
336                                 addr = (void *)(((uintptr_t)addr) +
337                                                 user_pattern_size);
338                                 rte_memcpy(addr, &missed_item,
339                                            missed * sizeof(*item));
340                                 addr = (void *)(((uintptr_t)addr) +
341                                         missed * sizeof(*item));
342                                 rte_memcpy(addr, flow_items, n);
343                                 addr = (void *)(((uintptr_t)addr) + n);
344                         }
345                 }
346                 /* Go deeper. */
347                 if (node->next) {
348                         next_node = node->next;
349                         if (stack_pos++ == elt_n) {
350                                 rte_errno = E2BIG;
351                                 return -rte_errno;
352                         }
353                         stack[stack_pos] = next_node;
354                 } else if (*(next_node + 1)) {
355                         /* Follow up with the next possibility. */
356                         ++next_node;
357                 } else {
358                         /* Move to the next path. */
359                         if (stack_pos)
360                                 next_node = stack[--stack_pos];
361                         next_node++;
362                         stack[stack_pos] = next_node;
363                 }
364                 node = *next_node ? &graph[*next_node] : NULL;
365         };
366         /* no expanded flows but we have missed item, create one rule for it */
367         if (buf->entries == 1 && missed != 0) {
368                 elt = 2;
369                 lsize += elt * sizeof(*item) + user_pattern_size;
370                 if (lsize <= size) {
371                         buf->entry[buf->entries].priority = 1;
372                         buf->entry[buf->entries].pattern = addr;
373                         buf->entries++;
374                         flow_items[0].type = missed_item.type;
375                         flow_items[1].type = RTE_FLOW_ITEM_TYPE_END;
376                         rte_memcpy(addr, buf->entry[0].pattern,
377                                    user_pattern_size);
378                         addr = (void *)(((uintptr_t)addr) + user_pattern_size);
379                         rte_memcpy(addr, flow_items, elt * sizeof(*item));
380                         addr = (void *)(((uintptr_t)addr) +
381                                         elt * sizeof(*item));
382                 }
383         }
384         return lsize;
385 }
386
387 enum mlx5_expansion {
388         MLX5_EXPANSION_ROOT,
389         MLX5_EXPANSION_ROOT_OUTER,
390         MLX5_EXPANSION_ROOT_ETH_VLAN,
391         MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN,
392         MLX5_EXPANSION_OUTER_ETH,
393         MLX5_EXPANSION_OUTER_ETH_VLAN,
394         MLX5_EXPANSION_OUTER_VLAN,
395         MLX5_EXPANSION_OUTER_IPV4,
396         MLX5_EXPANSION_OUTER_IPV4_UDP,
397         MLX5_EXPANSION_OUTER_IPV4_TCP,
398         MLX5_EXPANSION_OUTER_IPV6,
399         MLX5_EXPANSION_OUTER_IPV6_UDP,
400         MLX5_EXPANSION_OUTER_IPV6_TCP,
401         MLX5_EXPANSION_VXLAN,
402         MLX5_EXPANSION_VXLAN_GPE,
403         MLX5_EXPANSION_GRE,
404         MLX5_EXPANSION_MPLS,
405         MLX5_EXPANSION_ETH,
406         MLX5_EXPANSION_ETH_VLAN,
407         MLX5_EXPANSION_VLAN,
408         MLX5_EXPANSION_IPV4,
409         MLX5_EXPANSION_IPV4_UDP,
410         MLX5_EXPANSION_IPV4_TCP,
411         MLX5_EXPANSION_IPV6,
412         MLX5_EXPANSION_IPV6_UDP,
413         MLX5_EXPANSION_IPV6_TCP,
414 };
415
416 /** Supported expansion of items. */
417 static const struct mlx5_flow_expand_node mlx5_support_expansion[] = {
418         [MLX5_EXPANSION_ROOT] = {
419                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
420                                                   MLX5_EXPANSION_IPV4,
421                                                   MLX5_EXPANSION_IPV6),
422                 .type = RTE_FLOW_ITEM_TYPE_END,
423         },
424         [MLX5_EXPANSION_ROOT_OUTER] = {
425                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH,
426                                                   MLX5_EXPANSION_OUTER_IPV4,
427                                                   MLX5_EXPANSION_OUTER_IPV6),
428                 .type = RTE_FLOW_ITEM_TYPE_END,
429         },
430         [MLX5_EXPANSION_ROOT_ETH_VLAN] = {
431                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH_VLAN),
432                 .type = RTE_FLOW_ITEM_TYPE_END,
433         },
434         [MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN] = {
435                 .next = MLX5_FLOW_EXPAND_RSS_NEXT
436                                                 (MLX5_EXPANSION_OUTER_ETH_VLAN),
437                 .type = RTE_FLOW_ITEM_TYPE_END,
438         },
439         [MLX5_EXPANSION_OUTER_ETH] = {
440                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
441                                                   MLX5_EXPANSION_OUTER_IPV6,
442                                                   MLX5_EXPANSION_MPLS),
443                 .type = RTE_FLOW_ITEM_TYPE_ETH,
444                 .rss_types = 0,
445         },
446         [MLX5_EXPANSION_OUTER_ETH_VLAN] = {
447                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_VLAN),
448                 .type = RTE_FLOW_ITEM_TYPE_ETH,
449                 .rss_types = 0,
450         },
451         [MLX5_EXPANSION_OUTER_VLAN] = {
452                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
453                                                   MLX5_EXPANSION_OUTER_IPV6),
454                 .type = RTE_FLOW_ITEM_TYPE_VLAN,
455         },
456         [MLX5_EXPANSION_OUTER_IPV4] = {
457                 .next = MLX5_FLOW_EXPAND_RSS_NEXT
458                         (MLX5_EXPANSION_OUTER_IPV4_UDP,
459                          MLX5_EXPANSION_OUTER_IPV4_TCP,
460                          MLX5_EXPANSION_GRE,
461                          MLX5_EXPANSION_IPV4,
462                          MLX5_EXPANSION_IPV6),
463                 .type = RTE_FLOW_ITEM_TYPE_IPV4,
464                 .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
465                         ETH_RSS_NONFRAG_IPV4_OTHER,
466         },
467         [MLX5_EXPANSION_OUTER_IPV4_UDP] = {
468                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
469                                                   MLX5_EXPANSION_VXLAN_GPE),
470                 .type = RTE_FLOW_ITEM_TYPE_UDP,
471                 .rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
472         },
473         [MLX5_EXPANSION_OUTER_IPV4_TCP] = {
474                 .type = RTE_FLOW_ITEM_TYPE_TCP,
475                 .rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
476         },
477         [MLX5_EXPANSION_OUTER_IPV6] = {
478                 .next = MLX5_FLOW_EXPAND_RSS_NEXT
479                         (MLX5_EXPANSION_OUTER_IPV6_UDP,
480                          MLX5_EXPANSION_OUTER_IPV6_TCP,
481                          MLX5_EXPANSION_IPV4,
482                          MLX5_EXPANSION_IPV6),
483                 .type = RTE_FLOW_ITEM_TYPE_IPV6,
484                 .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
485                         ETH_RSS_NONFRAG_IPV6_OTHER,
486         },
487         [MLX5_EXPANSION_OUTER_IPV6_UDP] = {
488                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
489                                                   MLX5_EXPANSION_VXLAN_GPE),
490                 .type = RTE_FLOW_ITEM_TYPE_UDP,
491                 .rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
492         },
493         [MLX5_EXPANSION_OUTER_IPV6_TCP] = {
494                 .type = RTE_FLOW_ITEM_TYPE_TCP,
495                 .rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
496         },
497         [MLX5_EXPANSION_VXLAN] = {
498                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
499                                                   MLX5_EXPANSION_IPV4,
500                                                   MLX5_EXPANSION_IPV6),
501                 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
502         },
503         [MLX5_EXPANSION_VXLAN_GPE] = {
504                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
505                                                   MLX5_EXPANSION_IPV4,
506                                                   MLX5_EXPANSION_IPV6),
507                 .type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
508         },
509         [MLX5_EXPANSION_GRE] = {
510                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4),
511                 .type = RTE_FLOW_ITEM_TYPE_GRE,
512         },
513         [MLX5_EXPANSION_MPLS] = {
514                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
515                                                   MLX5_EXPANSION_IPV6),
516                 .type = RTE_FLOW_ITEM_TYPE_MPLS,
517         },
518         [MLX5_EXPANSION_ETH] = {
519                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
520                                                   MLX5_EXPANSION_IPV6),
521                 .type = RTE_FLOW_ITEM_TYPE_ETH,
522         },
523         [MLX5_EXPANSION_ETH_VLAN] = {
524                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VLAN),
525                 .type = RTE_FLOW_ITEM_TYPE_ETH,
526         },
527         [MLX5_EXPANSION_VLAN] = {
528                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
529                                                   MLX5_EXPANSION_IPV6),
530                 .type = RTE_FLOW_ITEM_TYPE_VLAN,
531         },
532         [MLX5_EXPANSION_IPV4] = {
533                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP,
534                                                   MLX5_EXPANSION_IPV4_TCP),
535                 .type = RTE_FLOW_ITEM_TYPE_IPV4,
536                 .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
537                         ETH_RSS_NONFRAG_IPV4_OTHER,
538         },
539         [MLX5_EXPANSION_IPV4_UDP] = {
540                 .type = RTE_FLOW_ITEM_TYPE_UDP,
541                 .rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
542         },
543         [MLX5_EXPANSION_IPV4_TCP] = {
544                 .type = RTE_FLOW_ITEM_TYPE_TCP,
545                 .rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
546         },
547         [MLX5_EXPANSION_IPV6] = {
548                 .next = MLX5_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP,
549                                                   MLX5_EXPANSION_IPV6_TCP),
550                 .type = RTE_FLOW_ITEM_TYPE_IPV6,
551                 .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
552                         ETH_RSS_NONFRAG_IPV6_OTHER,
553         },
554         [MLX5_EXPANSION_IPV6_UDP] = {
555                 .type = RTE_FLOW_ITEM_TYPE_UDP,
556                 .rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
557         },
558         [MLX5_EXPANSION_IPV6_TCP] = {
559                 .type = RTE_FLOW_ITEM_TYPE_TCP,
560                 .rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
561         },
562 };
563
564 static struct rte_flow_shared_action *
565 mlx5_shared_action_create(struct rte_eth_dev *dev,
566                           const struct rte_flow_shared_action_conf *conf,
567                           const struct rte_flow_action *action,
568                           struct rte_flow_error *error);
569 static int mlx5_shared_action_destroy
570                                 (struct rte_eth_dev *dev,
571                                  struct rte_flow_shared_action *shared_action,
572                                  struct rte_flow_error *error);
573 static int mlx5_shared_action_update
574                                 (struct rte_eth_dev *dev,
575                                  struct rte_flow_shared_action *shared_action,
576                                  const struct rte_flow_action *action,
577                                  struct rte_flow_error *error);
578 static int mlx5_shared_action_query
579                                 (struct rte_eth_dev *dev,
580                                  const struct rte_flow_shared_action *action,
581                                  void *data,
582                                  struct rte_flow_error *error);
583 static inline bool
584 mlx5_flow_tunnel_validate(struct rte_eth_dev *dev,
585                           struct rte_flow_tunnel *tunnel,
586                           const char *err_msg)
587 {
588         err_msg = NULL;
589         if (!is_tunnel_offload_active(dev)) {
590                 err_msg = "tunnel offload was not activated";
591                 goto out;
592         } else if (!tunnel) {
593                 err_msg = "no application tunnel";
594                 goto out;
595         }
596
597         switch (tunnel->type) {
598         default:
599                 err_msg = "unsupported tunnel type";
600                 goto out;
601         case RTE_FLOW_ITEM_TYPE_VXLAN:
602                 break;
603         }
604
605 out:
606         return !err_msg;
607 }
608
609
610 static int
611 mlx5_flow_tunnel_decap_set(struct rte_eth_dev *dev,
612                     struct rte_flow_tunnel *app_tunnel,
613                     struct rte_flow_action **actions,
614                     uint32_t *num_of_actions,
615                     struct rte_flow_error *error)
616 {
617         int ret;
618         struct mlx5_flow_tunnel *tunnel;
619         const char *err_msg = NULL;
620         bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);
621
622         if (!verdict)
623                 return rte_flow_error_set(error, EINVAL,
624                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
625                                           err_msg);
626         ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
627         if (ret < 0) {
628                 return rte_flow_error_set(error, ret,
629                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
630                                           "failed to initialize pmd tunnel");
631         }
632         *actions = &tunnel->action;
633         *num_of_actions = 1;
634         return 0;
635 }
636
637 static int
638 mlx5_flow_tunnel_match(struct rte_eth_dev *dev,
639                        struct rte_flow_tunnel *app_tunnel,
640                        struct rte_flow_item **items,
641                        uint32_t *num_of_items,
642                        struct rte_flow_error *error)
643 {
644         int ret;
645         struct mlx5_flow_tunnel *tunnel;
646         const char *err_msg = NULL;
647         bool verdict = mlx5_flow_tunnel_validate(dev, app_tunnel, err_msg);
648
649         if (!verdict)
650                 return rte_flow_error_set(error, EINVAL,
651                                           RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
652                                           err_msg);
653         ret = mlx5_get_flow_tunnel(dev, app_tunnel, &tunnel);
654         if (ret < 0) {
655                 return rte_flow_error_set(error, ret,
656                                           RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
657                                           "failed to initialize pmd tunnel");
658         }
659         *items = &tunnel->item;
660         *num_of_items = 1;
661         return 0;
662 }
663
664 static int
665 mlx5_flow_item_release(struct rte_eth_dev *dev,
666                        struct rte_flow_item *pmd_items,
667                        uint32_t num_items, struct rte_flow_error *err)
668 {
669         struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
670         struct mlx5_flow_tunnel *tun;
671
672         LIST_FOREACH(tun, &thub->tunnels, chain) {
673                 if (&tun->item == pmd_items)
674                         break;
675         }
676         if (!tun || num_items != 1)
677                 return rte_flow_error_set(err, EINVAL,
678                                           RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
679                                           "invalid argument");
680         if (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED))
681                 mlx5_flow_tunnel_free(dev, tun);
682         return 0;
683 }
684
685 static int
686 mlx5_flow_action_release(struct rte_eth_dev *dev,
687                          struct rte_flow_action *pmd_actions,
688                          uint32_t num_actions, struct rte_flow_error *err)
689 {
690         struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
691         struct mlx5_flow_tunnel *tun;
692
693         LIST_FOREACH(tun, &thub->tunnels, chain) {
694                 if (&tun->action == pmd_actions)
695                         break;
696         }
697         if (!tun || num_actions != 1)
698                 return rte_flow_error_set(err, EINVAL,
699                                           RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
700                                           "invalid argument");
701         if (!__atomic_sub_fetch(&tun->refctn, 1, __ATOMIC_RELAXED))
702                 mlx5_flow_tunnel_free(dev, tun);
703
704         return 0;
705 }
706
707 static int
708 mlx5_flow_tunnel_get_restore_info(struct rte_eth_dev *dev,
709                                   struct rte_mbuf *m,
710                                   struct rte_flow_restore_info *info,
711                                   struct rte_flow_error *err)
712 {
713         uint64_t ol_flags = m->ol_flags;
714         const struct mlx5_flow_tbl_data_entry *tble;
715         const uint64_t mask = PKT_RX_FDIR | PKT_RX_FDIR_ID;
716
717         if ((ol_flags & mask) != mask)
718                 goto err;
719         tble = tunnel_mark_decode(dev, m->hash.fdir.hi);
720         if (!tble) {
721                 DRV_LOG(DEBUG, "port %u invalid miss tunnel mark %#x",
722                         dev->data->port_id, m->hash.fdir.hi);
723                 goto err;
724         }
725         MLX5_ASSERT(tble->tunnel);
726         memcpy(&info->tunnel, &tble->tunnel->app_tunnel, sizeof(info->tunnel));
727         info->group_id = tble->group_id;
728         info->flags = RTE_FLOW_RESTORE_INFO_TUNNEL |
729                       RTE_FLOW_RESTORE_INFO_GROUP_ID |
730                       RTE_FLOW_RESTORE_INFO_ENCAPSULATED;
731
732         return 0;
733
734 err:
735         return rte_flow_error_set(err, EINVAL,
736                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
737                                   "failed to get restore info");
738 }
739
740 static const struct rte_flow_ops mlx5_flow_ops = {
741         .validate = mlx5_flow_validate,
742         .create = mlx5_flow_create,
743         .destroy = mlx5_flow_destroy,
744         .flush = mlx5_flow_flush,
745         .isolate = mlx5_flow_isolate,
746         .query = mlx5_flow_query,
747         .dev_dump = mlx5_flow_dev_dump,
748         .get_aged_flows = mlx5_flow_get_aged_flows,
749         .shared_action_create = mlx5_shared_action_create,
750         .shared_action_destroy = mlx5_shared_action_destroy,
751         .shared_action_update = mlx5_shared_action_update,
752         .shared_action_query = mlx5_shared_action_query,
753         .tunnel_decap_set = mlx5_flow_tunnel_decap_set,
754         .tunnel_match = mlx5_flow_tunnel_match,
755         .tunnel_action_decap_release = mlx5_flow_action_release,
756         .tunnel_item_release = mlx5_flow_item_release,
757         .get_restore_info = mlx5_flow_tunnel_get_restore_info,
758 };
759
760 /* Convert FDIR request to Generic flow. */
761 struct mlx5_fdir {
762         struct rte_flow_attr attr;
763         struct rte_flow_item items[4];
764         struct rte_flow_item_eth l2;
765         struct rte_flow_item_eth l2_mask;
766         union {
767                 struct rte_flow_item_ipv4 ipv4;
768                 struct rte_flow_item_ipv6 ipv6;
769         } l3;
770         union {
771                 struct rte_flow_item_ipv4 ipv4;
772                 struct rte_flow_item_ipv6 ipv6;
773         } l3_mask;
774         union {
775                 struct rte_flow_item_udp udp;
776                 struct rte_flow_item_tcp tcp;
777         } l4;
778         union {
779                 struct rte_flow_item_udp udp;
780                 struct rte_flow_item_tcp tcp;
781         } l4_mask;
782         struct rte_flow_action actions[2];
783         struct rte_flow_action_queue queue;
784 };
785
786 /* Tunnel information. */
787 struct mlx5_flow_tunnel_info {
788         uint64_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */
789         uint32_t ptype; /**< Tunnel Ptype (see RTE_PTYPE_*). */
790 };
791
792 static struct mlx5_flow_tunnel_info tunnels_info[] = {
793         {
794                 .tunnel = MLX5_FLOW_LAYER_VXLAN,
795                 .ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP,
796         },
797         {
798                 .tunnel = MLX5_FLOW_LAYER_GENEVE,
799                 .ptype = RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L4_UDP,
800         },
801         {
802                 .tunnel = MLX5_FLOW_LAYER_VXLAN_GPE,
803                 .ptype = RTE_PTYPE_TUNNEL_VXLAN_GPE | RTE_PTYPE_L4_UDP,
804         },
805         {
806                 .tunnel = MLX5_FLOW_LAYER_GRE,
807                 .ptype = RTE_PTYPE_TUNNEL_GRE,
808         },
809         {
810                 .tunnel = MLX5_FLOW_LAYER_MPLS | MLX5_FLOW_LAYER_OUTER_L4_UDP,
811                 .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_UDP | RTE_PTYPE_L4_UDP,
812         },
813         {
814                 .tunnel = MLX5_FLOW_LAYER_MPLS,
815                 .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE,
816         },
817         {
818                 .tunnel = MLX5_FLOW_LAYER_NVGRE,
819                 .ptype = RTE_PTYPE_TUNNEL_NVGRE,
820         },
821         {
822                 .tunnel = MLX5_FLOW_LAYER_IPIP,
823                 .ptype = RTE_PTYPE_TUNNEL_IP,
824         },
825         {
826                 .tunnel = MLX5_FLOW_LAYER_IPV6_ENCAP,
827                 .ptype = RTE_PTYPE_TUNNEL_IP,
828         },
829         {
830                 .tunnel = MLX5_FLOW_LAYER_GTP,
831                 .ptype = RTE_PTYPE_TUNNEL_GTPU,
832         },
833 };
834
835 /* Key of thread specific flow workspace data. */
836 static pthread_key_t key_workspace;
837
838 /* Thread specific flow workspace data once initialization data. */
839 static pthread_once_t key_workspace_init;
840
841
842 /**
843  * Translate tag ID to register.
844  *
845  * @param[in] dev
846  *   Pointer to the Ethernet device structure.
847  * @param[in] feature
848  *   The feature that request the register.
849  * @param[in] id
850  *   The request register ID.
851  * @param[out] error
852  *   Error description in case of any.
853  *
854  * @return
855  *   The request register on success, a negative errno
856  *   value otherwise and rte_errno is set.
857  */
858 int
859 mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
860                      enum mlx5_feature_name feature,
861                      uint32_t id,
862                      struct rte_flow_error *error)
863 {
864         struct mlx5_priv *priv = dev->data->dev_private;
865         struct mlx5_dev_config *config = &priv->config;
866         enum modify_reg start_reg;
867         bool skip_mtr_reg = false;
868
869         switch (feature) {
870         case MLX5_HAIRPIN_RX:
871                 return REG_B;
872         case MLX5_HAIRPIN_TX:
873                 return REG_A;
874         case MLX5_METADATA_RX:
875                 switch (config->dv_xmeta_en) {
876                 case MLX5_XMETA_MODE_LEGACY:
877                         return REG_B;
878                 case MLX5_XMETA_MODE_META16:
879                         return REG_C_0;
880                 case MLX5_XMETA_MODE_META32:
881                         return REG_C_1;
882                 }
883                 break;
884         case MLX5_METADATA_TX:
885                 return REG_A;
886         case MLX5_METADATA_FDB:
887                 switch (config->dv_xmeta_en) {
888                 case MLX5_XMETA_MODE_LEGACY:
889                         return REG_NON;
890                 case MLX5_XMETA_MODE_META16:
891                         return REG_C_0;
892                 case MLX5_XMETA_MODE_META32:
893                         return REG_C_1;
894                 }
895                 break;
896         case MLX5_FLOW_MARK:
897                 switch (config->dv_xmeta_en) {
898                 case MLX5_XMETA_MODE_LEGACY:
899                         return REG_NON;
900                 case MLX5_XMETA_MODE_META16:
901                         return REG_C_1;
902                 case MLX5_XMETA_MODE_META32:
903                         return REG_C_0;
904                 }
905                 break;
906         case MLX5_MTR_SFX:
907                 /*
908                  * If meter color and flow match share one register, flow match
909                  * should use the meter color register for match.
910                  */
911                 if (priv->mtr_reg_share)
912                         return priv->mtr_color_reg;
913                 else
914                         return priv->mtr_color_reg != REG_C_2 ? REG_C_2 :
915                                REG_C_3;
916         case MLX5_MTR_COLOR:
917                 MLX5_ASSERT(priv->mtr_color_reg != REG_NON);
918                 return priv->mtr_color_reg;
919         case MLX5_COPY_MARK:
920                 /*
921                  * Metadata COPY_MARK register using is in meter suffix sub
922                  * flow while with meter. It's safe to share the same register.
923                  */
924                 return priv->mtr_color_reg != REG_C_2 ? REG_C_2 : REG_C_3;
925         case MLX5_APP_TAG:
926                 /*
927                  * If meter is enable, it will engage the register for color
928                  * match and flow match. If meter color match is not using the
929                  * REG_C_2, need to skip the REG_C_x be used by meter color
930                  * match.
931                  * If meter is disable, free to use all available registers.
932                  */
933                 start_reg = priv->mtr_color_reg != REG_C_2 ? REG_C_2 :
934                             (priv->mtr_reg_share ? REG_C_3 : REG_C_4);
935                 skip_mtr_reg = !!(priv->mtr_en && start_reg == REG_C_2);
936                 if (id > (REG_C_7 - start_reg))
937                         return rte_flow_error_set(error, EINVAL,
938                                                   RTE_FLOW_ERROR_TYPE_ITEM,
939                                                   NULL, "invalid tag id");
940                 if (config->flow_mreg_c[id + start_reg - REG_C_0] == REG_NON)
941                         return rte_flow_error_set(error, ENOTSUP,
942                                                   RTE_FLOW_ERROR_TYPE_ITEM,
943                                                   NULL, "unsupported tag id");
944                 /*
945                  * This case means meter is using the REG_C_x great than 2.
946                  * Take care not to conflict with meter color REG_C_x.
947                  * If the available index REG_C_y >= REG_C_x, skip the
948                  * color register.
949                  */
950                 if (skip_mtr_reg && config->flow_mreg_c
951                     [id + start_reg - REG_C_0] >= priv->mtr_color_reg) {
952                         if (id >= (REG_C_7 - start_reg))
953                                 return rte_flow_error_set(error, EINVAL,
954                                                        RTE_FLOW_ERROR_TYPE_ITEM,
955                                                         NULL, "invalid tag id");
956                         if (config->flow_mreg_c
957                             [id + 1 + start_reg - REG_C_0] != REG_NON)
958                                 return config->flow_mreg_c
959                                                [id + 1 + start_reg - REG_C_0];
960                         return rte_flow_error_set(error, ENOTSUP,
961                                                   RTE_FLOW_ERROR_TYPE_ITEM,
962                                                   NULL, "unsupported tag id");
963                 }
964                 return config->flow_mreg_c[id + start_reg - REG_C_0];
965         }
966         MLX5_ASSERT(false);
967         return rte_flow_error_set(error, EINVAL,
968                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
969                                   NULL, "invalid feature name");
970 }
971
972 /**
973  * Check extensive flow metadata register support.
974  *
975  * @param dev
976  *   Pointer to rte_eth_dev structure.
977  *
978  * @return
979  *   True if device supports extensive flow metadata register, otherwise false.
980  */
981 bool
982 mlx5_flow_ext_mreg_supported(struct rte_eth_dev *dev)
983 {
984         struct mlx5_priv *priv = dev->data->dev_private;
985         struct mlx5_dev_config *config = &priv->config;
986
987         /*
988          * Having available reg_c can be regarded inclusively as supporting
989          * extensive flow metadata register, which could mean,
990          * - metadata register copy action by modify header.
991          * - 16 modify header actions is supported.
992          * - reg_c's are preserved across different domain (FDB and NIC) on
993          *   packet loopback by flow lookup miss.
994          */
995         return config->flow_mreg_c[2] != REG_NON;
996 }
997
998 /**
999  * Verify the @p item specifications (spec, last, mask) are compatible with the
1000  * NIC capabilities.
1001  *
1002  * @param[in] item
1003  *   Item specification.
1004  * @param[in] mask
1005  *   @p item->mask or flow default bit-masks.
1006  * @param[in] nic_mask
1007  *   Bit-masks covering supported fields by the NIC to compare with user mask.
1008  * @param[in] size
1009  *   Bit-masks size in bytes.
1010  * @param[in] range_accepted
1011  *   True if range of values is accepted for specific fields, false otherwise.
1012  * @param[out] error
1013  *   Pointer to error structure.
1014  *
1015  * @return
1016  *   0 on success, a negative errno value otherwise and rte_errno is set.
1017  */
1018 int
1019 mlx5_flow_item_acceptable(const struct rte_flow_item *item,
1020                           const uint8_t *mask,
1021                           const uint8_t *nic_mask,
1022                           unsigned int size,
1023                           bool range_accepted,
1024                           struct rte_flow_error *error)
1025 {
1026         unsigned int i;
1027
1028         MLX5_ASSERT(nic_mask);
1029         for (i = 0; i < size; ++i)
1030                 if ((nic_mask[i] | mask[i]) != nic_mask[i])
1031                         return rte_flow_error_set(error, ENOTSUP,
1032                                                   RTE_FLOW_ERROR_TYPE_ITEM,
1033                                                   item,
1034                                                   "mask enables non supported"
1035                                                   " bits");
1036         if (!item->spec && (item->mask || item->last))
1037                 return rte_flow_error_set(error, EINVAL,
1038                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1039                                           "mask/last without a spec is not"
1040                                           " supported");
1041         if (item->spec && item->last && !range_accepted) {
1042                 uint8_t spec[size];
1043                 uint8_t last[size];
1044                 unsigned int i;
1045                 int ret;
1046
1047                 for (i = 0; i < size; ++i) {
1048                         spec[i] = ((const uint8_t *)item->spec)[i] & mask[i];
1049                         last[i] = ((const uint8_t *)item->last)[i] & mask[i];
1050                 }
1051                 ret = memcmp(spec, last, size);
1052                 if (ret != 0)
1053                         return rte_flow_error_set(error, EINVAL,
1054                                                   RTE_FLOW_ERROR_TYPE_ITEM,
1055                                                   item,
1056                                                   "range is not valid");
1057         }
1058         return 0;
1059 }
1060
1061 /**
1062  * Adjust the hash fields according to the @p flow information.
1063  *
1064  * @param[in] dev_flow.
1065  *   Pointer to the mlx5_flow.
1066  * @param[in] tunnel
1067  *   1 when the hash field is for a tunnel item.
1068  * @param[in] layer_types
1069  *   ETH_RSS_* types.
1070  * @param[in] hash_fields
1071  *   Item hash fields.
1072  *
1073  * @return
1074  *   The hash fields that should be used.
1075  */
1076 uint64_t
1077 mlx5_flow_hashfields_adjust(struct mlx5_flow_rss_desc *rss_desc,
1078                             int tunnel __rte_unused, uint64_t layer_types,
1079                             uint64_t hash_fields)
1080 {
1081 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
1082         int rss_request_inner = rss_desc->level >= 2;
1083
1084         /* Check RSS hash level for tunnel. */
1085         if (tunnel && rss_request_inner)
1086                 hash_fields |= IBV_RX_HASH_INNER;
1087         else if (tunnel || rss_request_inner)
1088                 return 0;
1089 #endif
1090         /* Check if requested layer matches RSS hash fields. */
1091         if (!(rss_desc->types & layer_types))
1092                 return 0;
1093         return hash_fields;
1094 }
1095
1096 /**
1097  * Lookup and set the ptype in the data Rx part.  A single Ptype can be used,
1098  * if several tunnel rules are used on this queue, the tunnel ptype will be
1099  * cleared.
1100  *
1101  * @param rxq_ctrl
1102  *   Rx queue to update.
1103  */
1104 static void
1105 flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl)
1106 {
1107         unsigned int i;
1108         uint32_t tunnel_ptype = 0;
1109
1110         /* Look up for the ptype to use. */
1111         for (i = 0; i != MLX5_FLOW_TUNNEL; ++i) {
1112                 if (!rxq_ctrl->flow_tunnels_n[i])
1113                         continue;
1114                 if (!tunnel_ptype) {
1115                         tunnel_ptype = tunnels_info[i].ptype;
1116                 } else {
1117                         tunnel_ptype = 0;
1118                         break;
1119                 }
1120         }
1121         rxq_ctrl->rxq.tunnel = tunnel_ptype;
1122 }
1123
1124 /**
1125  * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the devive
1126  * flow.
1127  *
1128  * @param[in] dev
1129  *   Pointer to the Ethernet device structure.
1130  * @param[in] dev_handle
1131  *   Pointer to device flow handle structure.
1132  */
1133 static void
1134 flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
1135                        struct mlx5_flow_handle *dev_handle)
1136 {
1137         struct mlx5_priv *priv = dev->data->dev_private;
1138         const int mark = dev_handle->mark;
1139         const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
1140         struct mlx5_hrxq *hrxq;
1141         unsigned int i;
1142
1143         if (dev_handle->fate_action != MLX5_FLOW_FATE_QUEUE)
1144                 return;
1145         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
1146                               dev_handle->rix_hrxq);
1147         if (!hrxq)
1148                 return;
1149         for (i = 0; i != hrxq->ind_table->queues_n; ++i) {
1150                 int idx = hrxq->ind_table->queues[i];
1151                 struct mlx5_rxq_ctrl *rxq_ctrl =
1152                         container_of((*priv->rxqs)[idx],
1153                                      struct mlx5_rxq_ctrl, rxq);
1154
1155                 /*
1156                  * To support metadata register copy on Tx loopback,
1157                  * this must be always enabled (metadata may arive
1158                  * from other port - not from local flows only.
1159                  */
1160                 if (priv->config.dv_flow_en &&
1161                     priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
1162                     mlx5_flow_ext_mreg_supported(dev)) {
1163                         rxq_ctrl->rxq.mark = 1;
1164                         rxq_ctrl->flow_mark_n = 1;
1165                 } else if (mark) {
1166                         rxq_ctrl->rxq.mark = 1;
1167                         rxq_ctrl->flow_mark_n++;
1168                 }
1169                 if (tunnel) {
1170                         unsigned int j;
1171
1172                         /* Increase the counter matching the flow. */
1173                         for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
1174                                 if ((tunnels_info[j].tunnel &
1175                                      dev_handle->layers) ==
1176                                     tunnels_info[j].tunnel) {
1177                                         rxq_ctrl->flow_tunnels_n[j]++;
1178                                         break;
1179                                 }
1180                         }
1181                         flow_rxq_tunnel_ptype_update(rxq_ctrl);
1182                 }
1183         }
1184 }
1185
1186 /**
1187  * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) for a flow
1188  *
1189  * @param[in] dev
1190  *   Pointer to the Ethernet device structure.
1191  * @param[in] flow
1192  *   Pointer to flow structure.
1193  */
1194 static void
1195 flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
1196 {
1197         struct mlx5_priv *priv = dev->data->dev_private;
1198         uint32_t handle_idx;
1199         struct mlx5_flow_handle *dev_handle;
1200
1201         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
1202                        handle_idx, dev_handle, next)
1203                 flow_drv_rxq_flags_set(dev, dev_handle);
1204 }
1205
1206 /**
1207  * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the
1208  * device flow if no other flow uses it with the same kind of request.
1209  *
1210  * @param dev
1211  *   Pointer to Ethernet device.
1212  * @param[in] dev_handle
1213  *   Pointer to the device flow handle structure.
1214  */
1215 static void
1216 flow_drv_rxq_flags_trim(struct rte_eth_dev *dev,
1217                         struct mlx5_flow_handle *dev_handle)
1218 {
1219         struct mlx5_priv *priv = dev->data->dev_private;
1220         const int mark = dev_handle->mark;
1221         const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
1222         struct mlx5_hrxq *hrxq;
1223         unsigned int i;
1224
1225         if (dev_handle->fate_action != MLX5_FLOW_FATE_QUEUE)
1226                 return;
1227         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
1228                               dev_handle->rix_hrxq);
1229         if (!hrxq)
1230                 return;
1231         MLX5_ASSERT(dev->data->dev_started);
1232         for (i = 0; i != hrxq->ind_table->queues_n; ++i) {
1233                 int idx = hrxq->ind_table->queues[i];
1234                 struct mlx5_rxq_ctrl *rxq_ctrl =
1235                         container_of((*priv->rxqs)[idx],
1236                                      struct mlx5_rxq_ctrl, rxq);
1237
1238                 if (priv->config.dv_flow_en &&
1239                     priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
1240                     mlx5_flow_ext_mreg_supported(dev)) {
1241                         rxq_ctrl->rxq.mark = 1;
1242                         rxq_ctrl->flow_mark_n = 1;
1243                 } else if (mark) {
1244                         rxq_ctrl->flow_mark_n--;
1245                         rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n;
1246                 }
1247                 if (tunnel) {
1248                         unsigned int j;
1249
1250                         /* Decrease the counter matching the flow. */
1251                         for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
1252                                 if ((tunnels_info[j].tunnel &
1253                                      dev_handle->layers) ==
1254                                     tunnels_info[j].tunnel) {
1255                                         rxq_ctrl->flow_tunnels_n[j]--;
1256                                         break;
1257                                 }
1258                         }
1259                         flow_rxq_tunnel_ptype_update(rxq_ctrl);
1260                 }
1261         }
1262 }
1263
1264 /**
1265  * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the
1266  * @p flow if no other flow uses it with the same kind of request.
1267  *
1268  * @param dev
1269  *   Pointer to Ethernet device.
1270  * @param[in] flow
1271  *   Pointer to the flow.
1272  */
1273 static void
1274 flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
1275 {
1276         struct mlx5_priv *priv = dev->data->dev_private;
1277         uint32_t handle_idx;
1278         struct mlx5_flow_handle *dev_handle;
1279
1280         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
1281                        handle_idx, dev_handle, next)
1282                 flow_drv_rxq_flags_trim(dev, dev_handle);
1283 }
1284
1285 /**
1286  * Clear the Mark/Flag and Tunnel ptype information in all Rx queues.
1287  *
1288  * @param dev
1289  *   Pointer to Ethernet device.
1290  */
1291 static void
1292 flow_rxq_flags_clear(struct rte_eth_dev *dev)
1293 {
1294         struct mlx5_priv *priv = dev->data->dev_private;
1295         unsigned int i;
1296
1297         for (i = 0; i != priv->rxqs_n; ++i) {
1298                 struct mlx5_rxq_ctrl *rxq_ctrl;
1299                 unsigned int j;
1300
1301                 if (!(*priv->rxqs)[i])
1302                         continue;
1303                 rxq_ctrl = container_of((*priv->rxqs)[i],
1304                                         struct mlx5_rxq_ctrl, rxq);
1305                 rxq_ctrl->flow_mark_n = 0;
1306                 rxq_ctrl->rxq.mark = 0;
1307                 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j)
1308                         rxq_ctrl->flow_tunnels_n[j] = 0;
1309                 rxq_ctrl->rxq.tunnel = 0;
1310         }
1311 }
1312
1313 /**
1314  * Set the Rx queue dynamic metadata (mask and offset) for a flow
1315  *
1316  * @param[in] dev
1317  *   Pointer to the Ethernet device structure.
1318  */
1319 void
1320 mlx5_flow_rxq_dynf_metadata_set(struct rte_eth_dev *dev)
1321 {
1322         struct mlx5_priv *priv = dev->data->dev_private;
1323         struct mlx5_rxq_data *data;
1324         unsigned int i;
1325
1326         for (i = 0; i != priv->rxqs_n; ++i) {
1327                 if (!(*priv->rxqs)[i])
1328                         continue;
1329                 data = (*priv->rxqs)[i];
1330                 if (!rte_flow_dynf_metadata_avail()) {
1331                         data->dynf_meta = 0;
1332                         data->flow_meta_mask = 0;
1333                         data->flow_meta_offset = -1;
1334                 } else {
1335                         data->dynf_meta = 1;
1336                         data->flow_meta_mask = rte_flow_dynf_metadata_mask;
1337                         data->flow_meta_offset = rte_flow_dynf_metadata_offs;
1338                 }
1339         }
1340 }
1341
1342 /*
1343  * return a pointer to the desired action in the list of actions.
1344  *
1345  * @param[in] actions
1346  *   The list of actions to search the action in.
1347  * @param[in] action
1348  *   The action to find.
1349  *
1350  * @return
1351  *   Pointer to the action in the list, if found. NULL otherwise.
1352  */
1353 const struct rte_flow_action *
1354 mlx5_flow_find_action(const struct rte_flow_action *actions,
1355                       enum rte_flow_action_type action)
1356 {
1357         if (actions == NULL)
1358                 return NULL;
1359         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++)
1360                 if (actions->type == action)
1361                         return actions;
1362         return NULL;
1363 }
1364
1365 /*
1366  * Validate the flag action.
1367  *
1368  * @param[in] action_flags
1369  *   Bit-fields that holds the actions detected until now.
1370  * @param[in] attr
1371  *   Attributes of flow that includes this action.
1372  * @param[out] error
1373  *   Pointer to error structure.
1374  *
1375  * @return
1376  *   0 on success, a negative errno value otherwise and rte_errno is set.
1377  */
1378 int
1379 mlx5_flow_validate_action_flag(uint64_t action_flags,
1380                                const struct rte_flow_attr *attr,
1381                                struct rte_flow_error *error)
1382 {
1383         if (action_flags & MLX5_FLOW_ACTION_MARK)
1384                 return rte_flow_error_set(error, EINVAL,
1385                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1386                                           "can't mark and flag in same flow");
1387         if (action_flags & MLX5_FLOW_ACTION_FLAG)
1388                 return rte_flow_error_set(error, EINVAL,
1389                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1390                                           "can't have 2 flag"
1391                                           " actions in same flow");
1392         if (attr->egress)
1393                 return rte_flow_error_set(error, ENOTSUP,
1394                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1395                                           "flag action not supported for "
1396                                           "egress");
1397         return 0;
1398 }
1399
1400 /*
1401  * Validate the mark action.
1402  *
1403  * @param[in] action
1404  *   Pointer to the queue action.
1405  * @param[in] action_flags
1406  *   Bit-fields that holds the actions detected until now.
1407  * @param[in] attr
1408  *   Attributes of flow that includes this action.
1409  * @param[out] error
1410  *   Pointer to error structure.
1411  *
1412  * @return
1413  *   0 on success, a negative errno value otherwise and rte_errno is set.
1414  */
1415 int
1416 mlx5_flow_validate_action_mark(const struct rte_flow_action *action,
1417                                uint64_t action_flags,
1418                                const struct rte_flow_attr *attr,
1419                                struct rte_flow_error *error)
1420 {
1421         const struct rte_flow_action_mark *mark = action->conf;
1422
1423         if (!mark)
1424                 return rte_flow_error_set(error, EINVAL,
1425                                           RTE_FLOW_ERROR_TYPE_ACTION,
1426                                           action,
1427                                           "configuration cannot be null");
1428         if (mark->id >= MLX5_FLOW_MARK_MAX)
1429                 return rte_flow_error_set(error, EINVAL,
1430                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1431                                           &mark->id,
1432                                           "mark id must in 0 <= id < "
1433                                           RTE_STR(MLX5_FLOW_MARK_MAX));
1434         if (action_flags & MLX5_FLOW_ACTION_FLAG)
1435                 return rte_flow_error_set(error, EINVAL,
1436                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1437                                           "can't flag and mark in same flow");
1438         if (action_flags & MLX5_FLOW_ACTION_MARK)
1439                 return rte_flow_error_set(error, EINVAL,
1440                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1441                                           "can't have 2 mark actions in same"
1442                                           " flow");
1443         if (attr->egress)
1444                 return rte_flow_error_set(error, ENOTSUP,
1445                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1446                                           "mark action not supported for "
1447                                           "egress");
1448         return 0;
1449 }
1450
1451 /*
1452  * Validate the drop action.
1453  *
1454  * @param[in] action_flags
1455  *   Bit-fields that holds the actions detected until now.
1456  * @param[in] attr
1457  *   Attributes of flow that includes this action.
1458  * @param[out] error
1459  *   Pointer to error structure.
1460  *
1461  * @return
1462  *   0 on success, a negative errno value otherwise and rte_errno is set.
1463  */
1464 int
1465 mlx5_flow_validate_action_drop(uint64_t action_flags __rte_unused,
1466                                const struct rte_flow_attr *attr,
1467                                struct rte_flow_error *error)
1468 {
1469         if (attr->egress)
1470                 return rte_flow_error_set(error, ENOTSUP,
1471                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1472                                           "drop action not supported for "
1473                                           "egress");
1474         return 0;
1475 }
1476
1477 /*
1478  * Validate the queue action.
1479  *
1480  * @param[in] action
1481  *   Pointer to the queue action.
1482  * @param[in] action_flags
1483  *   Bit-fields that holds the actions detected until now.
1484  * @param[in] dev
1485  *   Pointer to the Ethernet device structure.
1486  * @param[in] attr
1487  *   Attributes of flow that includes this action.
1488  * @param[out] error
1489  *   Pointer to error structure.
1490  *
1491  * @return
1492  *   0 on success, a negative errno value otherwise and rte_errno is set.
1493  */
1494 int
1495 mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
1496                                 uint64_t action_flags,
1497                                 struct rte_eth_dev *dev,
1498                                 const struct rte_flow_attr *attr,
1499                                 struct rte_flow_error *error)
1500 {
1501         struct mlx5_priv *priv = dev->data->dev_private;
1502         const struct rte_flow_action_queue *queue = action->conf;
1503
1504         if (action_flags & MLX5_FLOW_FATE_ACTIONS)
1505                 return rte_flow_error_set(error, EINVAL,
1506                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1507                                           "can't have 2 fate actions in"
1508                                           " same flow");
1509         if (!priv->rxqs_n)
1510                 return rte_flow_error_set(error, EINVAL,
1511                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1512                                           NULL, "No Rx queues configured");
1513         if (queue->index >= priv->rxqs_n)
1514                 return rte_flow_error_set(error, EINVAL,
1515                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1516                                           &queue->index,
1517                                           "queue index out of range");
1518         if (!(*priv->rxqs)[queue->index])
1519                 return rte_flow_error_set(error, EINVAL,
1520                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1521                                           &queue->index,
1522                                           "queue is not configured");
1523         if (attr->egress)
1524                 return rte_flow_error_set(error, ENOTSUP,
1525                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1526                                           "queue action not supported for "
1527                                           "egress");
1528         return 0;
1529 }
1530
1531 /*
1532  * Validate the rss action.
1533  *
1534  * @param[in] dev
1535  *   Pointer to the Ethernet device structure.
1536  * @param[in] action
1537  *   Pointer to the queue action.
1538  * @param[out] error
1539  *   Pointer to error structure.
1540  *
1541  * @return
1542  *   0 on success, a negative errno value otherwise and rte_errno is set.
1543  */
1544 int
1545 mlx5_validate_action_rss(struct rte_eth_dev *dev,
1546                          const struct rte_flow_action *action,
1547                          struct rte_flow_error *error)
1548 {
1549         struct mlx5_priv *priv = dev->data->dev_private;
1550         const struct rte_flow_action_rss *rss = action->conf;
1551         unsigned int i;
1552
1553         if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT &&
1554             rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ)
1555                 return rte_flow_error_set(error, ENOTSUP,
1556                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1557                                           &rss->func,
1558                                           "RSS hash function not supported");
1559 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
1560         if (rss->level > 2)
1561 #else
1562         if (rss->level > 1)
1563 #endif
1564                 return rte_flow_error_set(error, ENOTSUP,
1565                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1566                                           &rss->level,
1567                                           "tunnel RSS is not supported");
1568         /* allow RSS key_len 0 in case of NULL (default) RSS key. */
1569         if (rss->key_len == 0 && rss->key != NULL)
1570                 return rte_flow_error_set(error, ENOTSUP,
1571                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1572                                           &rss->key_len,
1573                                           "RSS hash key length 0");
1574         if (rss->key_len > 0 && rss->key_len < MLX5_RSS_HASH_KEY_LEN)
1575                 return rte_flow_error_set(error, ENOTSUP,
1576                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1577                                           &rss->key_len,
1578                                           "RSS hash key too small");
1579         if (rss->key_len > MLX5_RSS_HASH_KEY_LEN)
1580                 return rte_flow_error_set(error, ENOTSUP,
1581                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1582                                           &rss->key_len,
1583                                           "RSS hash key too large");
1584         if (rss->queue_num > priv->config.ind_table_max_size)
1585                 return rte_flow_error_set(error, ENOTSUP,
1586                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1587                                           &rss->queue_num,
1588                                           "number of queues too large");
1589         if (rss->types & MLX5_RSS_HF_MASK)
1590                 return rte_flow_error_set(error, ENOTSUP,
1591                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1592                                           &rss->types,
1593                                           "some RSS protocols are not"
1594                                           " supported");
1595         if ((rss->types & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY)) &&
1596             !(rss->types & ETH_RSS_IP))
1597                 return rte_flow_error_set(error, EINVAL,
1598                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
1599                                           "L3 partial RSS requested but L3 RSS"
1600                                           " type not specified");
1601         if ((rss->types & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)) &&
1602             !(rss->types & (ETH_RSS_UDP | ETH_RSS_TCP)))
1603                 return rte_flow_error_set(error, EINVAL,
1604                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
1605                                           "L4 partial RSS requested but L4 RSS"
1606                                           " type not specified");
1607         if (!priv->rxqs_n)
1608                 return rte_flow_error_set(error, EINVAL,
1609                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1610                                           NULL, "No Rx queues configured");
1611         if (!rss->queue_num)
1612                 return rte_flow_error_set(error, EINVAL,
1613                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1614                                           NULL, "No queues configured");
1615         for (i = 0; i != rss->queue_num; ++i) {
1616                 if (rss->queue[i] >= priv->rxqs_n)
1617                         return rte_flow_error_set
1618                                 (error, EINVAL,
1619                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1620                                  &rss->queue[i], "queue index out of range");
1621                 if (!(*priv->rxqs)[rss->queue[i]])
1622                         return rte_flow_error_set
1623                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1624                                  &rss->queue[i], "queue is not configured");
1625         }
1626         return 0;
1627 }
1628
1629 /*
1630  * Validate the rss action.
1631  *
1632  * @param[in] action
1633  *   Pointer to the queue action.
1634  * @param[in] action_flags
1635  *   Bit-fields that holds the actions detected until now.
1636  * @param[in] dev
1637  *   Pointer to the Ethernet device structure.
1638  * @param[in] attr
1639  *   Attributes of flow that includes this action.
1640  * @param[in] item_flags
1641  *   Items that were detected.
1642  * @param[out] error
1643  *   Pointer to error structure.
1644  *
1645  * @return
1646  *   0 on success, a negative errno value otherwise and rte_errno is set.
1647  */
1648 int
1649 mlx5_flow_validate_action_rss(const struct rte_flow_action *action,
1650                               uint64_t action_flags,
1651                               struct rte_eth_dev *dev,
1652                               const struct rte_flow_attr *attr,
1653                               uint64_t item_flags,
1654                               struct rte_flow_error *error)
1655 {
1656         const struct rte_flow_action_rss *rss = action->conf;
1657         int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1658         int ret;
1659
1660         if (action_flags & MLX5_FLOW_FATE_ACTIONS)
1661                 return rte_flow_error_set(error, EINVAL,
1662                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1663                                           "can't have 2 fate actions"
1664                                           " in same flow");
1665         ret = mlx5_validate_action_rss(dev, action, error);
1666         if (ret)
1667                 return ret;
1668         if (attr->egress)
1669                 return rte_flow_error_set(error, ENOTSUP,
1670                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1671                                           "rss action not supported for "
1672                                           "egress");
1673         if (rss->level > 1 && !tunnel)
1674                 return rte_flow_error_set(error, EINVAL,
1675                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
1676                                           "inner RSS is not supported for "
1677                                           "non-tunnel flows");
1678         if ((item_flags & MLX5_FLOW_LAYER_ECPRI) &&
1679             !(item_flags & MLX5_FLOW_LAYER_INNER_L4_UDP)) {
1680                 return rte_flow_error_set(error, EINVAL,
1681                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
1682                                           "RSS on eCPRI is not supported now");
1683         }
1684         return 0;
1685 }
1686
1687 /*
1688  * Validate the default miss action.
1689  *
1690  * @param[in] action_flags
1691  *   Bit-fields that holds the actions detected until now.
1692  * @param[out] error
1693  *   Pointer to error structure.
1694  *
1695  * @return
1696  *   0 on success, a negative errno value otherwise and rte_errno is set.
1697  */
1698 int
1699 mlx5_flow_validate_action_default_miss(uint64_t action_flags,
1700                                 const struct rte_flow_attr *attr,
1701                                 struct rte_flow_error *error)
1702 {
1703         if (action_flags & MLX5_FLOW_FATE_ACTIONS)
1704                 return rte_flow_error_set(error, EINVAL,
1705                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1706                                           "can't have 2 fate actions in"
1707                                           " same flow");
1708         if (attr->egress)
1709                 return rte_flow_error_set(error, ENOTSUP,
1710                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1711                                           "default miss action not supported "
1712                                           "for egress");
1713         if (attr->group)
1714                 return rte_flow_error_set(error, ENOTSUP,
1715                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL,
1716                                           "only group 0 is supported");
1717         if (attr->transfer)
1718                 return rte_flow_error_set(error, ENOTSUP,
1719                                           RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1720                                           NULL, "transfer is not supported");
1721         return 0;
1722 }
1723
1724 /*
1725  * Validate the count action.
1726  *
1727  * @param[in] dev
1728  *   Pointer to the Ethernet device structure.
1729  * @param[in] attr
1730  *   Attributes of flow that includes this action.
1731  * @param[out] error
1732  *   Pointer to error structure.
1733  *
1734  * @return
1735  *   0 on success, a negative errno value otherwise and rte_errno is set.
1736  */
1737 int
1738 mlx5_flow_validate_action_count(struct rte_eth_dev *dev __rte_unused,
1739                                 const struct rte_flow_attr *attr,
1740                                 struct rte_flow_error *error)
1741 {
1742         if (attr->egress)
1743                 return rte_flow_error_set(error, ENOTSUP,
1744                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1745                                           "count action not supported for "
1746                                           "egress");
1747         return 0;
1748 }
1749
1750 /**
1751  * Verify the @p attributes will be correctly understood by the NIC and store
1752  * them in the @p flow if everything is correct.
1753  *
1754  * @param[in] dev
1755  *   Pointer to the Ethernet device structure.
1756  * @param[in] attributes
1757  *   Pointer to flow attributes
1758  * @param[out] error
1759  *   Pointer to error structure.
1760  *
1761  * @return
1762  *   0 on success, a negative errno value otherwise and rte_errno is set.
1763  */
1764 int
1765 mlx5_flow_validate_attributes(struct rte_eth_dev *dev,
1766                               const struct rte_flow_attr *attributes,
1767                               struct rte_flow_error *error)
1768 {
1769         struct mlx5_priv *priv = dev->data->dev_private;
1770         uint32_t priority_max = priv->config.flow_prio - 1;
1771
1772         if (attributes->group)
1773                 return rte_flow_error_set(error, ENOTSUP,
1774                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1775                                           NULL, "groups is not supported");
1776         if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
1777             attributes->priority >= priority_max)
1778                 return rte_flow_error_set(error, ENOTSUP,
1779                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1780                                           NULL, "priority out of range");
1781         if (attributes->egress)
1782                 return rte_flow_error_set(error, ENOTSUP,
1783                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1784                                           "egress is not supported");
1785         if (attributes->transfer && !priv->config.dv_esw_en)
1786                 return rte_flow_error_set(error, ENOTSUP,
1787                                           RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1788                                           NULL, "transfer is not supported");
1789         if (!attributes->ingress)
1790                 return rte_flow_error_set(error, EINVAL,
1791                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1792                                           NULL,
1793                                           "ingress attribute is mandatory");
1794         return 0;
1795 }
1796
1797 /**
1798  * Validate ICMP6 item.
1799  *
1800  * @param[in] item
1801  *   Item specification.
1802  * @param[in] item_flags
1803  *   Bit-fields that holds the items detected until now.
1804  * @param[in] ext_vlan_sup
1805  *   Whether extended VLAN features are supported or not.
1806  * @param[out] error
1807  *   Pointer to error structure.
1808  *
1809  * @return
1810  *   0 on success, a negative errno value otherwise and rte_errno is set.
1811  */
1812 int
1813 mlx5_flow_validate_item_icmp6(const struct rte_flow_item *item,
1814                                uint64_t item_flags,
1815                                uint8_t target_protocol,
1816                                struct rte_flow_error *error)
1817 {
1818         const struct rte_flow_item_icmp6 *mask = item->mask;
1819         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1820         const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1821                                       MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1822         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1823                                       MLX5_FLOW_LAYER_OUTER_L4;
1824         int ret;
1825
1826         if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMPV6)
1827                 return rte_flow_error_set(error, EINVAL,
1828                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1829                                           "protocol filtering not compatible"
1830                                           " with ICMP6 layer");
1831         if (!(item_flags & l3m))
1832                 return rte_flow_error_set(error, EINVAL,
1833                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1834                                           "IPv6 is mandatory to filter on"
1835                                           " ICMP6");
1836         if (item_flags & l4m)
1837                 return rte_flow_error_set(error, EINVAL,
1838                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1839                                           "multiple L4 layers not supported");
1840         if (!mask)
1841                 mask = &rte_flow_item_icmp6_mask;
1842         ret = mlx5_flow_item_acceptable
1843                 (item, (const uint8_t *)mask,
1844                  (const uint8_t *)&rte_flow_item_icmp6_mask,
1845                  sizeof(struct rte_flow_item_icmp6),
1846                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1847         if (ret < 0)
1848                 return ret;
1849         return 0;
1850 }
1851
1852 /**
1853  * Validate ICMP item.
1854  *
1855  * @param[in] item
1856  *   Item specification.
1857  * @param[in] item_flags
1858  *   Bit-fields that holds the items detected until now.
1859  * @param[out] error
1860  *   Pointer to error structure.
1861  *
1862  * @return
1863  *   0 on success, a negative errno value otherwise and rte_errno is set.
1864  */
1865 int
1866 mlx5_flow_validate_item_icmp(const struct rte_flow_item *item,
1867                              uint64_t item_flags,
1868                              uint8_t target_protocol,
1869                              struct rte_flow_error *error)
1870 {
1871         const struct rte_flow_item_icmp *mask = item->mask;
1872         const struct rte_flow_item_icmp nic_mask = {
1873                 .hdr.icmp_type = 0xff,
1874                 .hdr.icmp_code = 0xff,
1875                 .hdr.icmp_ident = RTE_BE16(0xffff),
1876                 .hdr.icmp_seq_nb = RTE_BE16(0xffff),
1877         };
1878         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1879         const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1880                                       MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1881         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1882                                       MLX5_FLOW_LAYER_OUTER_L4;
1883         int ret;
1884
1885         if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMP)
1886                 return rte_flow_error_set(error, EINVAL,
1887                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1888                                           "protocol filtering not compatible"
1889                                           " with ICMP layer");
1890         if (!(item_flags & l3m))
1891                 return rte_flow_error_set(error, EINVAL,
1892                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1893                                           "IPv4 is mandatory to filter"
1894                                           " on ICMP");
1895         if (item_flags & l4m)
1896                 return rte_flow_error_set(error, EINVAL,
1897                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1898                                           "multiple L4 layers not supported");
1899         if (!mask)
1900                 mask = &nic_mask;
1901         ret = mlx5_flow_item_acceptable
1902                 (item, (const uint8_t *)mask,
1903                  (const uint8_t *)&nic_mask,
1904                  sizeof(struct rte_flow_item_icmp),
1905                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1906         if (ret < 0)
1907                 return ret;
1908         return 0;
1909 }
1910
1911 /**
1912  * Validate Ethernet item.
1913  *
1914  * @param[in] item
1915  *   Item specification.
1916  * @param[in] item_flags
1917  *   Bit-fields that holds the items detected until now.
1918  * @param[out] error
1919  *   Pointer to error structure.
1920  *
1921  * @return
1922  *   0 on success, a negative errno value otherwise and rte_errno is set.
1923  */
1924 int
1925 mlx5_flow_validate_item_eth(const struct rte_flow_item *item,
1926                             uint64_t item_flags, bool ext_vlan_sup,
1927                             struct rte_flow_error *error)
1928 {
1929         const struct rte_flow_item_eth *mask = item->mask;
1930         const struct rte_flow_item_eth nic_mask = {
1931                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
1932                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
1933                 .type = RTE_BE16(0xffff),
1934                 .has_vlan = ext_vlan_sup ? 1 : 0,
1935         };
1936         int ret;
1937         int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1938         const uint64_t ethm = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1939                                        MLX5_FLOW_LAYER_OUTER_L2;
1940
1941         if (item_flags & ethm)
1942                 return rte_flow_error_set(error, ENOTSUP,
1943                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1944                                           "multiple L2 layers not supported");
1945         if ((!tunnel && (item_flags & MLX5_FLOW_LAYER_OUTER_L3)) ||
1946             (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_L3)))
1947                 return rte_flow_error_set(error, EINVAL,
1948                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1949                                           "L2 layer should not follow "
1950                                           "L3 layers");
1951         if ((!tunnel && (item_flags & MLX5_FLOW_LAYER_OUTER_VLAN)) ||
1952             (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_VLAN)))
1953                 return rte_flow_error_set(error, EINVAL,
1954                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1955                                           "L2 layer should not follow VLAN");
1956         if (!mask)
1957                 mask = &rte_flow_item_eth_mask;
1958         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1959                                         (const uint8_t *)&nic_mask,
1960                                         sizeof(struct rte_flow_item_eth),
1961                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
1962         return ret;
1963 }
1964
1965 /**
1966  * Validate VLAN item.
1967  *
1968  * @param[in] item
1969  *   Item specification.
1970  * @param[in] item_flags
1971  *   Bit-fields that holds the items detected until now.
1972  * @param[in] dev
1973  *   Ethernet device flow is being created on.
1974  * @param[out] error
1975  *   Pointer to error structure.
1976  *
1977  * @return
1978  *   0 on success, a negative errno value otherwise and rte_errno is set.
1979  */
1980 int
1981 mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
1982                              uint64_t item_flags,
1983                              struct rte_eth_dev *dev,
1984                              struct rte_flow_error *error)
1985 {
1986         const struct rte_flow_item_vlan *spec = item->spec;
1987         const struct rte_flow_item_vlan *mask = item->mask;
1988         const struct rte_flow_item_vlan nic_mask = {
1989                 .tci = RTE_BE16(UINT16_MAX),
1990                 .inner_type = RTE_BE16(UINT16_MAX),
1991         };
1992         uint16_t vlan_tag = 0;
1993         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1994         int ret;
1995         const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
1996                                         MLX5_FLOW_LAYER_INNER_L4) :
1997                                        (MLX5_FLOW_LAYER_OUTER_L3 |
1998                                         MLX5_FLOW_LAYER_OUTER_L4);
1999         const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
2000                                         MLX5_FLOW_LAYER_OUTER_VLAN;
2001
2002         if (item_flags & vlanm)
2003                 return rte_flow_error_set(error, EINVAL,
2004                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2005                                           "multiple VLAN layers not supported");
2006         else if ((item_flags & l34m) != 0)
2007                 return rte_flow_error_set(error, EINVAL,
2008                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2009                                           "VLAN cannot follow L3/L4 layer");
2010         if (!mask)
2011                 mask = &rte_flow_item_vlan_mask;
2012         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2013                                         (const uint8_t *)&nic_mask,
2014                                         sizeof(struct rte_flow_item_vlan),
2015                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2016         if (ret)
2017                 return ret;
2018         if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
2019                 struct mlx5_priv *priv = dev->data->dev_private;
2020
2021                 if (priv->vmwa_context) {
2022                         /*
2023                          * Non-NULL context means we have a virtual machine
2024                          * and SR-IOV enabled, we have to create VLAN interface
2025                          * to make hypervisor to setup E-Switch vport
2026                          * context correctly. We avoid creating the multiple
2027                          * VLAN interfaces, so we cannot support VLAN tag mask.
2028                          */
2029                         return rte_flow_error_set(error, EINVAL,
2030                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2031                                                   item,
2032                                                   "VLAN tag mask is not"
2033                                                   " supported in virtual"
2034                                                   " environment");
2035                 }
2036         }
2037         if (spec) {
2038                 vlan_tag = spec->tci;
2039                 vlan_tag &= mask->tci;
2040         }
2041         /*
2042          * From verbs perspective an empty VLAN is equivalent
2043          * to a packet without VLAN layer.
2044          */
2045         if (!vlan_tag)
2046                 return rte_flow_error_set(error, EINVAL,
2047                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
2048                                           item->spec,
2049                                           "VLAN cannot be empty");
2050         return 0;
2051 }
2052
2053 /**
2054  * Validate IPV4 item.
2055  *
2056  * @param[in] item
2057  *   Item specification.
2058  * @param[in] item_flags
2059  *   Bit-fields that holds the items detected until now.
2060  * @param[in] last_item
2061  *   Previous validated item in the pattern items.
2062  * @param[in] ether_type
2063  *   Type in the ethernet layer header (including dot1q).
2064  * @param[in] acc_mask
2065  *   Acceptable mask, if NULL default internal default mask
2066  *   will be used to check whether item fields are supported.
2067  * @param[in] range_accepted
2068  *   True if range of values is accepted for specific fields, false otherwise.
2069  * @param[out] error
2070  *   Pointer to error structure.
2071  *
2072  * @return
2073  *   0 on success, a negative errno value otherwise and rte_errno is set.
2074  */
2075 int
2076 mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
2077                              uint64_t item_flags,
2078                              uint64_t last_item,
2079                              uint16_t ether_type,
2080                              const struct rte_flow_item_ipv4 *acc_mask,
2081                              bool range_accepted,
2082                              struct rte_flow_error *error)
2083 {
2084         const struct rte_flow_item_ipv4 *mask = item->mask;
2085         const struct rte_flow_item_ipv4 *spec = item->spec;
2086         const struct rte_flow_item_ipv4 nic_mask = {
2087                 .hdr = {
2088                         .src_addr = RTE_BE32(0xffffffff),
2089                         .dst_addr = RTE_BE32(0xffffffff),
2090                         .type_of_service = 0xff,
2091                         .next_proto_id = 0xff,
2092                 },
2093         };
2094         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2095         const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
2096                                       MLX5_FLOW_LAYER_OUTER_L3;
2097         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2098                                       MLX5_FLOW_LAYER_OUTER_L4;
2099         int ret;
2100         uint8_t next_proto = 0xFF;
2101         const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 |
2102                                   MLX5_FLOW_LAYER_OUTER_VLAN |
2103                                   MLX5_FLOW_LAYER_INNER_VLAN);
2104
2105         if ((last_item & l2_vlan) && ether_type &&
2106             ether_type != RTE_ETHER_TYPE_IPV4)
2107                 return rte_flow_error_set(error, EINVAL,
2108                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2109                                           "IPv4 cannot follow L2/VLAN layer "
2110                                           "which ether type is not IPv4");
2111         if (item_flags & MLX5_FLOW_LAYER_IPIP) {
2112                 if (mask && spec)
2113                         next_proto = mask->hdr.next_proto_id &
2114                                      spec->hdr.next_proto_id;
2115                 if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6)
2116                         return rte_flow_error_set(error, EINVAL,
2117                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2118                                                   item,
2119                                                   "multiple tunnel "
2120                                                   "not supported");
2121         }
2122         if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP)
2123                 return rte_flow_error_set(error, EINVAL,
2124                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2125                                           "wrong tunnel type - IPv6 specified "
2126                                           "but IPv4 item provided");
2127         if (item_flags & l3m)
2128                 return rte_flow_error_set(error, ENOTSUP,
2129                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2130                                           "multiple L3 layers not supported");
2131         else if (item_flags & l4m)
2132                 return rte_flow_error_set(error, EINVAL,
2133                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2134                                           "L3 cannot follow an L4 layer.");
2135         else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) &&
2136                   !(item_flags & MLX5_FLOW_LAYER_INNER_L2))
2137                 return rte_flow_error_set(error, EINVAL,
2138                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2139                                           "L3 cannot follow an NVGRE layer.");
2140         if (!mask)
2141                 mask = &rte_flow_item_ipv4_mask;
2142         else if (mask->hdr.next_proto_id != 0 &&
2143                  mask->hdr.next_proto_id != 0xff)
2144                 return rte_flow_error_set(error, EINVAL,
2145                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
2146                                           "partial mask is not supported"
2147                                           " for protocol");
2148         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2149                                         acc_mask ? (const uint8_t *)acc_mask
2150                                                  : (const uint8_t *)&nic_mask,
2151                                         sizeof(struct rte_flow_item_ipv4),
2152                                         range_accepted, error);
2153         if (ret < 0)
2154                 return ret;
2155         return 0;
2156 }
2157
2158 /**
2159  * Validate IPV6 item.
2160  *
2161  * @param[in] item
2162  *   Item specification.
2163  * @param[in] item_flags
2164  *   Bit-fields that holds the items detected until now.
2165  * @param[in] last_item
2166  *   Previous validated item in the pattern items.
2167  * @param[in] ether_type
2168  *   Type in the ethernet layer header (including dot1q).
2169  * @param[in] acc_mask
2170  *   Acceptable mask, if NULL default internal default mask
2171  *   will be used to check whether item fields are supported.
2172  * @param[out] error
2173  *   Pointer to error structure.
2174  *
2175  * @return
2176  *   0 on success, a negative errno value otherwise and rte_errno is set.
2177  */
2178 int
2179 mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
2180                              uint64_t item_flags,
2181                              uint64_t last_item,
2182                              uint16_t ether_type,
2183                              const struct rte_flow_item_ipv6 *acc_mask,
2184                              struct rte_flow_error *error)
2185 {
2186         const struct rte_flow_item_ipv6 *mask = item->mask;
2187         const struct rte_flow_item_ipv6 *spec = item->spec;
2188         const struct rte_flow_item_ipv6 nic_mask = {
2189                 .hdr = {
2190                         .src_addr =
2191                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
2192                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
2193                         .dst_addr =
2194                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
2195                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
2196                         .vtc_flow = RTE_BE32(0xffffffff),
2197                         .proto = 0xff,
2198                 },
2199         };
2200         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2201         const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
2202                                       MLX5_FLOW_LAYER_OUTER_L3;
2203         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2204                                       MLX5_FLOW_LAYER_OUTER_L4;
2205         int ret;
2206         uint8_t next_proto = 0xFF;
2207         const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 |
2208                                   MLX5_FLOW_LAYER_OUTER_VLAN |
2209                                   MLX5_FLOW_LAYER_INNER_VLAN);
2210
2211         if ((last_item & l2_vlan) && ether_type &&
2212             ether_type != RTE_ETHER_TYPE_IPV6)
2213                 return rte_flow_error_set(error, EINVAL,
2214                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2215                                           "IPv6 cannot follow L2/VLAN layer "
2216                                           "which ether type is not IPv6");
2217         if (mask && mask->hdr.proto == UINT8_MAX && spec)
2218                 next_proto = spec->hdr.proto;
2219         if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) {
2220                 if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6)
2221                         return rte_flow_error_set(error, EINVAL,
2222                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2223                                                   item,
2224                                                   "multiple tunnel "
2225                                                   "not supported");
2226         }
2227         if (next_proto == IPPROTO_HOPOPTS  ||
2228             next_proto == IPPROTO_ROUTING  ||
2229             next_proto == IPPROTO_FRAGMENT ||
2230             next_proto == IPPROTO_ESP      ||
2231             next_proto == IPPROTO_AH       ||
2232             next_proto == IPPROTO_DSTOPTS)
2233                 return rte_flow_error_set(error, EINVAL,
2234                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2235                                           "IPv6 proto (next header) should "
2236                                           "not be set as extension header");
2237         if (item_flags & MLX5_FLOW_LAYER_IPIP)
2238                 return rte_flow_error_set(error, EINVAL,
2239                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2240                                           "wrong tunnel type - IPv4 specified "
2241                                           "but IPv6 item provided");
2242         if (item_flags & l3m)
2243                 return rte_flow_error_set(error, ENOTSUP,
2244                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2245                                           "multiple L3 layers not supported");
2246         else if (item_flags & l4m)
2247                 return rte_flow_error_set(error, EINVAL,
2248                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2249                                           "L3 cannot follow an L4 layer.");
2250         else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) &&
2251                   !(item_flags & MLX5_FLOW_LAYER_INNER_L2))
2252                 return rte_flow_error_set(error, EINVAL,
2253                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2254                                           "L3 cannot follow an NVGRE layer.");
2255         if (!mask)
2256                 mask = &rte_flow_item_ipv6_mask;
2257         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2258                                         acc_mask ? (const uint8_t *)acc_mask
2259                                                  : (const uint8_t *)&nic_mask,
2260                                         sizeof(struct rte_flow_item_ipv6),
2261                                         MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2262         if (ret < 0)
2263                 return ret;
2264         return 0;
2265 }
2266
2267 /**
2268  * Validate UDP item.
2269  *
2270  * @param[in] item
2271  *   Item specification.
2272  * @param[in] item_flags
2273  *   Bit-fields that holds the items detected until now.
2274  * @param[in] target_protocol
2275  *   The next protocol in the previous item.
2276  * @param[in] flow_mask
2277  *   mlx5 flow-specific (DV, verbs, etc.) supported header fields mask.
2278  * @param[out] error
2279  *   Pointer to error structure.
2280  *
2281  * @return
2282  *   0 on success, a negative errno value otherwise and rte_errno is set.
2283  */
2284 int
2285 mlx5_flow_validate_item_udp(const struct rte_flow_item *item,
2286                             uint64_t item_flags,
2287                             uint8_t target_protocol,
2288                             struct rte_flow_error *error)
2289 {
2290         const struct rte_flow_item_udp *mask = item->mask;
2291         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2292         const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
2293                                       MLX5_FLOW_LAYER_OUTER_L3;
2294         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2295                                       MLX5_FLOW_LAYER_OUTER_L4;
2296         int ret;
2297
2298         if (target_protocol != 0xff && target_protocol != IPPROTO_UDP)
2299                 return rte_flow_error_set(error, EINVAL,
2300                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2301                                           "protocol filtering not compatible"
2302                                           " with UDP layer");
2303         if (!(item_flags & l3m))
2304                 return rte_flow_error_set(error, EINVAL,
2305                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2306                                           "L3 is mandatory to filter on L4");
2307         if (item_flags & l4m)
2308                 return rte_flow_error_set(error, EINVAL,
2309                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2310                                           "multiple L4 layers not supported");
2311         if (!mask)
2312                 mask = &rte_flow_item_udp_mask;
2313         ret = mlx5_flow_item_acceptable
2314                 (item, (const uint8_t *)mask,
2315                  (const uint8_t *)&rte_flow_item_udp_mask,
2316                  sizeof(struct rte_flow_item_udp), MLX5_ITEM_RANGE_NOT_ACCEPTED,
2317                  error);
2318         if (ret < 0)
2319                 return ret;
2320         return 0;
2321 }
2322
2323 /**
2324  * Validate TCP item.
2325  *
2326  * @param[in] item
2327  *   Item specification.
2328  * @param[in] item_flags
2329  *   Bit-fields that holds the items detected until now.
2330  * @param[in] target_protocol
2331  *   The next protocol in the previous item.
2332  * @param[out] error
2333  *   Pointer to error structure.
2334  *
2335  * @return
2336  *   0 on success, a negative errno value otherwise and rte_errno is set.
2337  */
2338 int
2339 mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,
2340                             uint64_t item_flags,
2341                             uint8_t target_protocol,
2342                             const struct rte_flow_item_tcp *flow_mask,
2343                             struct rte_flow_error *error)
2344 {
2345         const struct rte_flow_item_tcp *mask = item->mask;
2346         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2347         const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
2348                                       MLX5_FLOW_LAYER_OUTER_L3;
2349         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
2350                                       MLX5_FLOW_LAYER_OUTER_L4;
2351         int ret;
2352
2353         MLX5_ASSERT(flow_mask);
2354         if (target_protocol != 0xff && target_protocol != IPPROTO_TCP)
2355                 return rte_flow_error_set(error, EINVAL,
2356                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2357                                           "protocol filtering not compatible"
2358                                           " with TCP layer");
2359         if (!(item_flags & l3m))
2360                 return rte_flow_error_set(error, EINVAL,
2361                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2362                                           "L3 is mandatory to filter on L4");
2363         if (item_flags & l4m)
2364                 return rte_flow_error_set(error, EINVAL,
2365                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2366                                           "multiple L4 layers not supported");
2367         if (!mask)
2368                 mask = &rte_flow_item_tcp_mask;
2369         ret = mlx5_flow_item_acceptable
2370                 (item, (const uint8_t *)mask,
2371                  (const uint8_t *)flow_mask,
2372                  sizeof(struct rte_flow_item_tcp), MLX5_ITEM_RANGE_NOT_ACCEPTED,
2373                  error);
2374         if (ret < 0)
2375                 return ret;
2376         return 0;
2377 }
2378
2379 /**
2380  * Validate VXLAN item.
2381  *
2382  * @param[in] item
2383  *   Item specification.
2384  * @param[in] item_flags
2385  *   Bit-fields that holds the items detected until now.
2386  * @param[in] target_protocol
2387  *   The next protocol in the previous item.
2388  * @param[out] error
2389  *   Pointer to error structure.
2390  *
2391  * @return
2392  *   0 on success, a negative errno value otherwise and rte_errno is set.
2393  */
2394 int
2395 mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item,
2396                               uint64_t item_flags,
2397                               struct rte_flow_error *error)
2398 {
2399         const struct rte_flow_item_vxlan *spec = item->spec;
2400         const struct rte_flow_item_vxlan *mask = item->mask;
2401         int ret;
2402         union vni {
2403                 uint32_t vlan_id;
2404                 uint8_t vni[4];
2405         } id = { .vlan_id = 0, };
2406
2407
2408         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2409                 return rte_flow_error_set(error, ENOTSUP,
2410                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2411                                           "multiple tunnel layers not"
2412                                           " supported");
2413         /*
2414          * Verify only UDPv4 is present as defined in
2415          * https://tools.ietf.org/html/rfc7348
2416          */
2417         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2418                 return rte_flow_error_set(error, EINVAL,
2419                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2420                                           "no outer UDP layer found");
2421         if (!mask)
2422                 mask = &rte_flow_item_vxlan_mask;
2423         ret = mlx5_flow_item_acceptable
2424                 (item, (const uint8_t *)mask,
2425                  (const uint8_t *)&rte_flow_item_vxlan_mask,
2426                  sizeof(struct rte_flow_item_vxlan),
2427                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2428         if (ret < 0)
2429                 return ret;
2430         if (spec) {
2431                 memcpy(&id.vni[1], spec->vni, 3);
2432                 memcpy(&id.vni[1], mask->vni, 3);
2433         }
2434         if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
2435                 return rte_flow_error_set(error, ENOTSUP,
2436                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2437                                           "VXLAN tunnel must be fully defined");
2438         return 0;
2439 }
2440
2441 /**
2442  * Validate VXLAN_GPE item.
2443  *
2444  * @param[in] item
2445  *   Item specification.
2446  * @param[in] item_flags
2447  *   Bit-fields that holds the items detected until now.
2448  * @param[in] priv
2449  *   Pointer to the private data structure.
2450  * @param[in] target_protocol
2451  *   The next protocol in the previous item.
2452  * @param[out] error
2453  *   Pointer to error structure.
2454  *
2455  * @return
2456  *   0 on success, a negative errno value otherwise and rte_errno is set.
2457  */
2458 int
2459 mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,
2460                                   uint64_t item_flags,
2461                                   struct rte_eth_dev *dev,
2462                                   struct rte_flow_error *error)
2463 {
2464         struct mlx5_priv *priv = dev->data->dev_private;
2465         const struct rte_flow_item_vxlan_gpe *spec = item->spec;
2466         const struct rte_flow_item_vxlan_gpe *mask = item->mask;
2467         int ret;
2468         union vni {
2469                 uint32_t vlan_id;
2470                 uint8_t vni[4];
2471         } id = { .vlan_id = 0, };
2472
2473         if (!priv->config.l3_vxlan_en)
2474                 return rte_flow_error_set(error, ENOTSUP,
2475                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2476                                           "L3 VXLAN is not enabled by device"
2477                                           " parameter and/or not configured in"
2478                                           " firmware");
2479         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2480                 return rte_flow_error_set(error, ENOTSUP,
2481                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2482                                           "multiple tunnel layers not"
2483                                           " supported");
2484         /*
2485          * Verify only UDPv4 is present as defined in
2486          * https://tools.ietf.org/html/rfc7348
2487          */
2488         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2489                 return rte_flow_error_set(error, EINVAL,
2490                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2491                                           "no outer UDP layer found");
2492         if (!mask)
2493                 mask = &rte_flow_item_vxlan_gpe_mask;
2494         ret = mlx5_flow_item_acceptable
2495                 (item, (const uint8_t *)mask,
2496                  (const uint8_t *)&rte_flow_item_vxlan_gpe_mask,
2497                  sizeof(struct rte_flow_item_vxlan_gpe),
2498                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2499         if (ret < 0)
2500                 return ret;
2501         if (spec) {
2502                 if (spec->protocol)
2503                         return rte_flow_error_set(error, ENOTSUP,
2504                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2505                                                   item,
2506                                                   "VxLAN-GPE protocol"
2507                                                   " not supported");
2508                 memcpy(&id.vni[1], spec->vni, 3);
2509                 memcpy(&id.vni[1], mask->vni, 3);
2510         }
2511         if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
2512                 return rte_flow_error_set(error, ENOTSUP,
2513                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2514                                           "VXLAN-GPE tunnel must be fully"
2515                                           " defined");
2516         return 0;
2517 }
2518 /**
2519  * Validate GRE Key item.
2520  *
2521  * @param[in] item
2522  *   Item specification.
2523  * @param[in] item_flags
2524  *   Bit flags to mark detected items.
2525  * @param[in] gre_item
2526  *   Pointer to gre_item
2527  * @param[out] error
2528  *   Pointer to error structure.
2529  *
2530  * @return
2531  *   0 on success, a negative errno value otherwise and rte_errno is set.
2532  */
2533 int
2534 mlx5_flow_validate_item_gre_key(const struct rte_flow_item *item,
2535                                 uint64_t item_flags,
2536                                 const struct rte_flow_item *gre_item,
2537                                 struct rte_flow_error *error)
2538 {
2539         const rte_be32_t *mask = item->mask;
2540         int ret = 0;
2541         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
2542         const struct rte_flow_item_gre *gre_spec;
2543         const struct rte_flow_item_gre *gre_mask;
2544
2545         if (item_flags & MLX5_FLOW_LAYER_GRE_KEY)
2546                 return rte_flow_error_set(error, ENOTSUP,
2547                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2548                                           "Multiple GRE key not support");
2549         if (!(item_flags & MLX5_FLOW_LAYER_GRE))
2550                 return rte_flow_error_set(error, ENOTSUP,
2551                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2552                                           "No preceding GRE header");
2553         if (item_flags & MLX5_FLOW_LAYER_INNER)
2554                 return rte_flow_error_set(error, ENOTSUP,
2555                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2556                                           "GRE key following a wrong item");
2557         gre_mask = gre_item->mask;
2558         if (!gre_mask)
2559                 gre_mask = &rte_flow_item_gre_mask;
2560         gre_spec = gre_item->spec;
2561         if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x2000)) &&
2562                          !(gre_spec->c_rsvd0_ver & RTE_BE16(0x2000)))
2563                 return rte_flow_error_set(error, EINVAL,
2564                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2565                                           "Key bit must be on");
2566
2567         if (!mask)
2568                 mask = &gre_key_default_mask;
2569         ret = mlx5_flow_item_acceptable
2570                 (item, (const uint8_t *)mask,
2571                  (const uint8_t *)&gre_key_default_mask,
2572                  sizeof(rte_be32_t), MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2573         return ret;
2574 }
2575
2576 /**
2577  * Validate GRE item.
2578  *
2579  * @param[in] item
2580  *   Item specification.
2581  * @param[in] item_flags
2582  *   Bit flags to mark detected items.
2583  * @param[in] target_protocol
2584  *   The next protocol in the previous item.
2585  * @param[out] error
2586  *   Pointer to error structure.
2587  *
2588  * @return
2589  *   0 on success, a negative errno value otherwise and rte_errno is set.
2590  */
2591 int
2592 mlx5_flow_validate_item_gre(const struct rte_flow_item *item,
2593                             uint64_t item_flags,
2594                             uint8_t target_protocol,
2595                             struct rte_flow_error *error)
2596 {
2597         const struct rte_flow_item_gre *spec __rte_unused = item->spec;
2598         const struct rte_flow_item_gre *mask = item->mask;
2599         int ret;
2600         const struct rte_flow_item_gre nic_mask = {
2601                 .c_rsvd0_ver = RTE_BE16(0xB000),
2602                 .protocol = RTE_BE16(UINT16_MAX),
2603         };
2604
2605         if (target_protocol != 0xff && target_protocol != IPPROTO_GRE)
2606                 return rte_flow_error_set(error, EINVAL,
2607                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2608                                           "protocol filtering not compatible"
2609                                           " with this GRE layer");
2610         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2611                 return rte_flow_error_set(error, ENOTSUP,
2612                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2613                                           "multiple tunnel layers not"
2614                                           " supported");
2615         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3))
2616                 return rte_flow_error_set(error, ENOTSUP,
2617                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2618                                           "L3 Layer is missing");
2619         if (!mask)
2620                 mask = &rte_flow_item_gre_mask;
2621         ret = mlx5_flow_item_acceptable
2622                 (item, (const uint8_t *)mask,
2623                  (const uint8_t *)&nic_mask,
2624                  sizeof(struct rte_flow_item_gre), MLX5_ITEM_RANGE_NOT_ACCEPTED,
2625                  error);
2626         if (ret < 0)
2627                 return ret;
2628 #ifndef HAVE_MLX5DV_DR
2629 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
2630         if (spec && (spec->protocol & mask->protocol))
2631                 return rte_flow_error_set(error, ENOTSUP,
2632                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2633                                           "without MPLS support the"
2634                                           " specification cannot be used for"
2635                                           " filtering");
2636 #endif
2637 #endif
2638         return 0;
2639 }
2640
2641 /**
2642  * Validate Geneve item.
2643  *
2644  * @param[in] item
2645  *   Item specification.
2646  * @param[in] itemFlags
2647  *   Bit-fields that holds the items detected until now.
2648  * @param[in] enPriv
2649  *   Pointer to the private data structure.
2650  * @param[out] error
2651  *   Pointer to error structure.
2652  *
2653  * @return
2654  *   0 on success, a negative errno value otherwise and rte_errno is set.
2655  */
2656
2657 int
2658 mlx5_flow_validate_item_geneve(const struct rte_flow_item *item,
2659                                uint64_t item_flags,
2660                                struct rte_eth_dev *dev,
2661                                struct rte_flow_error *error)
2662 {
2663         struct mlx5_priv *priv = dev->data->dev_private;
2664         const struct rte_flow_item_geneve *spec = item->spec;
2665         const struct rte_flow_item_geneve *mask = item->mask;
2666         int ret;
2667         uint16_t gbhdr;
2668         uint8_t opt_len = priv->config.hca_attr.geneve_max_opt_len ?
2669                           MLX5_GENEVE_OPT_LEN_1 : MLX5_GENEVE_OPT_LEN_0;
2670         const struct rte_flow_item_geneve nic_mask = {
2671                 .ver_opt_len_o_c_rsvd0 = RTE_BE16(0x3f80),
2672                 .vni = "\xff\xff\xff",
2673                 .protocol = RTE_BE16(UINT16_MAX),
2674         };
2675
2676         if (!priv->config.hca_attr.tunnel_stateless_geneve_rx)
2677                 return rte_flow_error_set(error, ENOTSUP,
2678                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2679                                           "L3 Geneve is not enabled by device"
2680                                           " parameter and/or not configured in"
2681                                           " firmware");
2682         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2683                 return rte_flow_error_set(error, ENOTSUP,
2684                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2685                                           "multiple tunnel layers not"
2686                                           " supported");
2687         /*
2688          * Verify only UDPv4 is present as defined in
2689          * https://tools.ietf.org/html/rfc7348
2690          */
2691         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2692                 return rte_flow_error_set(error, EINVAL,
2693                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2694                                           "no outer UDP layer found");
2695         if (!mask)
2696                 mask = &rte_flow_item_geneve_mask;
2697         ret = mlx5_flow_item_acceptable
2698                                   (item, (const uint8_t *)mask,
2699                                    (const uint8_t *)&nic_mask,
2700                                    sizeof(struct rte_flow_item_geneve),
2701                                    MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2702         if (ret)
2703                 return ret;
2704         if (spec) {
2705                 gbhdr = rte_be_to_cpu_16(spec->ver_opt_len_o_c_rsvd0);
2706                 if (MLX5_GENEVE_VER_VAL(gbhdr) ||
2707                      MLX5_GENEVE_CRITO_VAL(gbhdr) ||
2708                      MLX5_GENEVE_RSVD_VAL(gbhdr) || spec->rsvd1)
2709                         return rte_flow_error_set(error, ENOTSUP,
2710                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2711                                                   item,
2712                                                   "Geneve protocol unsupported"
2713                                                   " fields are being used");
2714                 if (MLX5_GENEVE_OPTLEN_VAL(gbhdr) > opt_len)
2715                         return rte_flow_error_set
2716                                         (error, ENOTSUP,
2717                                          RTE_FLOW_ERROR_TYPE_ITEM,
2718                                          item,
2719                                          "Unsupported Geneve options length");
2720         }
2721         if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
2722                 return rte_flow_error_set
2723                                     (error, ENOTSUP,
2724                                      RTE_FLOW_ERROR_TYPE_ITEM, item,
2725                                      "Geneve tunnel must be fully defined");
2726         return 0;
2727 }
2728
2729 /**
2730  * Validate MPLS item.
2731  *
2732  * @param[in] dev
2733  *   Pointer to the rte_eth_dev structure.
2734  * @param[in] item
2735  *   Item specification.
2736  * @param[in] item_flags
2737  *   Bit-fields that holds the items detected until now.
2738  * @param[in] prev_layer
2739  *   The protocol layer indicated in previous item.
2740  * @param[out] error
2741  *   Pointer to error structure.
2742  *
2743  * @return
2744  *   0 on success, a negative errno value otherwise and rte_errno is set.
2745  */
2746 int
2747 mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused,
2748                              const struct rte_flow_item *item __rte_unused,
2749                              uint64_t item_flags __rte_unused,
2750                              uint64_t prev_layer __rte_unused,
2751                              struct rte_flow_error *error)
2752 {
2753 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
2754         const struct rte_flow_item_mpls *mask = item->mask;
2755         struct mlx5_priv *priv = dev->data->dev_private;
2756         int ret;
2757
2758         if (!priv->config.mpls_en)
2759                 return rte_flow_error_set(error, ENOTSUP,
2760                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2761                                           "MPLS not supported or"
2762                                           " disabled in firmware"
2763                                           " configuration.");
2764         /* MPLS over IP, UDP, GRE is allowed */
2765         if (!(prev_layer & (MLX5_FLOW_LAYER_OUTER_L3 |
2766                             MLX5_FLOW_LAYER_OUTER_L4_UDP |
2767                             MLX5_FLOW_LAYER_GRE)))
2768                 return rte_flow_error_set(error, EINVAL,
2769                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2770                                           "protocol filtering not compatible"
2771                                           " with MPLS layer");
2772         /* Multi-tunnel isn't allowed but MPLS over GRE is an exception. */
2773         if ((item_flags & MLX5_FLOW_LAYER_TUNNEL) &&
2774             !(item_flags & MLX5_FLOW_LAYER_GRE))
2775                 return rte_flow_error_set(error, ENOTSUP,
2776                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2777                                           "multiple tunnel layers not"
2778                                           " supported");
2779         if (!mask)
2780                 mask = &rte_flow_item_mpls_mask;
2781         ret = mlx5_flow_item_acceptable
2782                 (item, (const uint8_t *)mask,
2783                  (const uint8_t *)&rte_flow_item_mpls_mask,
2784                  sizeof(struct rte_flow_item_mpls),
2785                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2786         if (ret < 0)
2787                 return ret;
2788         return 0;
2789 #else
2790         return rte_flow_error_set(error, ENOTSUP,
2791                                   RTE_FLOW_ERROR_TYPE_ITEM, item,
2792                                   "MPLS is not supported by Verbs, please"
2793                                   " update.");
2794 #endif
2795 }
2796
2797 /**
2798  * Validate NVGRE item.
2799  *
2800  * @param[in] item
2801  *   Item specification.
2802  * @param[in] item_flags
2803  *   Bit flags to mark detected items.
2804  * @param[in] target_protocol
2805  *   The next protocol in the previous item.
2806  * @param[out] error
2807  *   Pointer to error structure.
2808  *
2809  * @return
2810  *   0 on success, a negative errno value otherwise and rte_errno is set.
2811  */
2812 int
2813 mlx5_flow_validate_item_nvgre(const struct rte_flow_item *item,
2814                               uint64_t item_flags,
2815                               uint8_t target_protocol,
2816                               struct rte_flow_error *error)
2817 {
2818         const struct rte_flow_item_nvgre *mask = item->mask;
2819         int ret;
2820
2821         if (target_protocol != 0xff && target_protocol != IPPROTO_GRE)
2822                 return rte_flow_error_set(error, EINVAL,
2823                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2824                                           "protocol filtering not compatible"
2825                                           " with this GRE layer");
2826         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2827                 return rte_flow_error_set(error, ENOTSUP,
2828                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2829                                           "multiple tunnel layers not"
2830                                           " supported");
2831         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3))
2832                 return rte_flow_error_set(error, ENOTSUP,
2833                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2834                                           "L3 Layer is missing");
2835         if (!mask)
2836                 mask = &rte_flow_item_nvgre_mask;
2837         ret = mlx5_flow_item_acceptable
2838                 (item, (const uint8_t *)mask,
2839                  (const uint8_t *)&rte_flow_item_nvgre_mask,
2840                  sizeof(struct rte_flow_item_nvgre),
2841                  MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2842         if (ret < 0)
2843                 return ret;
2844         return 0;
2845 }
2846
2847 /**
2848  * Validate eCPRI item.
2849  *
2850  * @param[in] item
2851  *   Item specification.
2852  * @param[in] item_flags
2853  *   Bit-fields that holds the items detected until now.
2854  * @param[in] last_item
2855  *   Previous validated item in the pattern items.
2856  * @param[in] ether_type
2857  *   Type in the ethernet layer header (including dot1q).
2858  * @param[in] acc_mask
2859  *   Acceptable mask, if NULL default internal default mask
2860  *   will be used to check whether item fields are supported.
2861  * @param[out] error
2862  *   Pointer to error structure.
2863  *
2864  * @return
2865  *   0 on success, a negative errno value otherwise and rte_errno is set.
2866  */
2867 int
2868 mlx5_flow_validate_item_ecpri(const struct rte_flow_item *item,
2869                               uint64_t item_flags,
2870                               uint64_t last_item,
2871                               uint16_t ether_type,
2872                               const struct rte_flow_item_ecpri *acc_mask,
2873                               struct rte_flow_error *error)
2874 {
2875         const struct rte_flow_item_ecpri *mask = item->mask;
2876         const struct rte_flow_item_ecpri nic_mask = {
2877                 .hdr = {
2878                         .common = {
2879                                 .u32 =
2880                                 RTE_BE32(((const struct rte_ecpri_common_hdr) {
2881                                         .type = 0xFF,
2882                                         }).u32),
2883                         },
2884                         .dummy[0] = 0xFFFFFFFF,
2885                 },
2886         };
2887         const uint64_t outer_l2_vlan = (MLX5_FLOW_LAYER_OUTER_L2 |
2888                                         MLX5_FLOW_LAYER_OUTER_VLAN);
2889         struct rte_flow_item_ecpri mask_lo;
2890
2891         if ((last_item & outer_l2_vlan) && ether_type &&
2892             ether_type != RTE_ETHER_TYPE_ECPRI)
2893                 return rte_flow_error_set(error, EINVAL,
2894                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2895                                           "eCPRI cannot follow L2/VLAN layer "
2896                                           "which ether type is not 0xAEFE.");
2897         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2898                 return rte_flow_error_set(error, EINVAL,
2899                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2900                                           "eCPRI with tunnel is not supported "
2901                                           "right now.");
2902         if (item_flags & MLX5_FLOW_LAYER_OUTER_L3)
2903                 return rte_flow_error_set(error, ENOTSUP,
2904                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2905                                           "multiple L3 layers not supported");
2906         else if (item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP)
2907                 return rte_flow_error_set(error, EINVAL,
2908                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2909                                           "eCPRI cannot follow a TCP layer.");
2910         /* In specification, eCPRI could be over UDP layer. */
2911         else if (item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP)
2912                 return rte_flow_error_set(error, EINVAL,
2913                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2914                                           "eCPRI over UDP layer is not yet "
2915                                           "supported right now.");
2916         /* Mask for type field in common header could be zero. */
2917         if (!mask)
2918                 mask = &rte_flow_item_ecpri_mask;
2919         mask_lo.hdr.common.u32 = rte_be_to_cpu_32(mask->hdr.common.u32);
2920         /* Input mask is in big-endian format. */
2921         if (mask_lo.hdr.common.type != 0 && mask_lo.hdr.common.type != 0xff)
2922                 return rte_flow_error_set(error, EINVAL,
2923                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
2924                                           "partial mask is not supported "
2925                                           "for protocol");
2926         else if (mask_lo.hdr.common.type == 0 && mask->hdr.dummy[0] != 0)
2927                 return rte_flow_error_set(error, EINVAL,
2928                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
2929                                           "message header mask must be after "
2930                                           "a type mask");
2931         return mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
2932                                          acc_mask ? (const uint8_t *)acc_mask
2933                                                   : (const uint8_t *)&nic_mask,
2934                                          sizeof(struct rte_flow_item_ecpri),
2935                                          MLX5_ITEM_RANGE_NOT_ACCEPTED, error);
2936 }
2937
2938 /**
2939  * Release resource related QUEUE/RSS action split.
2940  *
2941  * @param dev
2942  *   Pointer to Ethernet device.
2943  * @param flow
2944  *   Flow to release id's from.
2945  */
2946 static void
2947 flow_mreg_split_qrss_release(struct rte_eth_dev *dev,
2948                              struct rte_flow *flow)
2949 {
2950         struct mlx5_priv *priv = dev->data->dev_private;
2951         uint32_t handle_idx;
2952         struct mlx5_flow_handle *dev_handle;
2953
2954         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
2955                        handle_idx, dev_handle, next)
2956                 if (dev_handle->split_flow_id)
2957                         mlx5_ipool_free(priv->sh->ipool
2958                                         [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
2959                                         dev_handle->split_flow_id);
2960 }
2961
2962 static int
2963 flow_null_validate(struct rte_eth_dev *dev __rte_unused,
2964                    const struct rte_flow_attr *attr __rte_unused,
2965                    const struct rte_flow_item items[] __rte_unused,
2966                    const struct rte_flow_action actions[] __rte_unused,
2967                    bool external __rte_unused,
2968                    int hairpin __rte_unused,
2969                    struct rte_flow_error *error)
2970 {
2971         return rte_flow_error_set(error, ENOTSUP,
2972                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
2973 }
2974
2975 static struct mlx5_flow *
2976 flow_null_prepare(struct rte_eth_dev *dev __rte_unused,
2977                   const struct rte_flow_attr *attr __rte_unused,
2978                   const struct rte_flow_item items[] __rte_unused,
2979                   const struct rte_flow_action actions[] __rte_unused,
2980                   struct rte_flow_error *error)
2981 {
2982         rte_flow_error_set(error, ENOTSUP,
2983                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
2984         return NULL;
2985 }
2986
2987 static int
2988 flow_null_translate(struct rte_eth_dev *dev __rte_unused,
2989                     struct mlx5_flow *dev_flow __rte_unused,
2990                     const struct rte_flow_attr *attr __rte_unused,
2991                     const struct rte_flow_item items[] __rte_unused,
2992                     const struct rte_flow_action actions[] __rte_unused,
2993                     struct rte_flow_error *error)
2994 {
2995         return rte_flow_error_set(error, ENOTSUP,
2996                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
2997 }
2998
2999 static int
3000 flow_null_apply(struct rte_eth_dev *dev __rte_unused,
3001                 struct rte_flow *flow __rte_unused,
3002                 struct rte_flow_error *error)
3003 {
3004         return rte_flow_error_set(error, ENOTSUP,
3005                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
3006 }
3007
3008 static void
3009 flow_null_remove(struct rte_eth_dev *dev __rte_unused,
3010                  struct rte_flow *flow __rte_unused)
3011 {
3012 }
3013
3014 static void
3015 flow_null_destroy(struct rte_eth_dev *dev __rte_unused,
3016                   struct rte_flow *flow __rte_unused)
3017 {
3018 }
3019
3020 static int
3021 flow_null_query(struct rte_eth_dev *dev __rte_unused,
3022                 struct rte_flow *flow __rte_unused,
3023                 const struct rte_flow_action *actions __rte_unused,
3024                 void *data __rte_unused,
3025                 struct rte_flow_error *error)
3026 {
3027         return rte_flow_error_set(error, ENOTSUP,
3028                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
3029 }
3030
3031 static int
3032 flow_null_sync_domain(struct rte_eth_dev *dev __rte_unused,
3033                       uint32_t domains __rte_unused,
3034                       uint32_t flags __rte_unused)
3035 {
3036         return 0;
3037 }
3038
3039 /* Void driver to protect from null pointer reference. */
3040 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops = {
3041         .validate = flow_null_validate,
3042         .prepare = flow_null_prepare,
3043         .translate = flow_null_translate,
3044         .apply = flow_null_apply,
3045         .remove = flow_null_remove,
3046         .destroy = flow_null_destroy,
3047         .query = flow_null_query,
3048         .sync_domain = flow_null_sync_domain,
3049 };
3050
3051 /**
3052  * Select flow driver type according to flow attributes and device
3053  * configuration.
3054  *
3055  * @param[in] dev
3056  *   Pointer to the dev structure.
3057  * @param[in] attr
3058  *   Pointer to the flow attributes.
3059  *
3060  * @return
3061  *   flow driver type, MLX5_FLOW_TYPE_MAX otherwise.
3062  */
3063 static enum mlx5_flow_drv_type
3064 flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr)
3065 {
3066         struct mlx5_priv *priv = dev->data->dev_private;
3067         /* The OS can determine first a specific flow type (DV, VERBS) */
3068         enum mlx5_flow_drv_type type = mlx5_flow_os_get_type();
3069
3070         if (type != MLX5_FLOW_TYPE_MAX)
3071                 return type;
3072         /* If no OS specific type - continue with DV/VERBS selection */
3073         if (attr->transfer && priv->config.dv_esw_en)
3074                 type = MLX5_FLOW_TYPE_DV;
3075         if (!attr->transfer)
3076                 type = priv->config.dv_flow_en ? MLX5_FLOW_TYPE_DV :
3077                                                  MLX5_FLOW_TYPE_VERBS;
3078         return type;
3079 }
3080
3081 #define flow_get_drv_ops(type) flow_drv_ops[type]
3082
3083 /**
3084  * Flow driver validation API. This abstracts calling driver specific functions.
3085  * The type of flow driver is determined according to flow attributes.
3086  *
3087  * @param[in] dev
3088  *   Pointer to the dev structure.
3089  * @param[in] attr
3090  *   Pointer to the flow attributes.
3091  * @param[in] items
3092  *   Pointer to the list of items.
3093  * @param[in] actions
3094  *   Pointer to the list of actions.
3095  * @param[in] external
3096  *   This flow rule is created by request external to PMD.
3097  * @param[in] hairpin
3098  *   Number of hairpin TX actions, 0 means classic flow.
3099  * @param[out] error
3100  *   Pointer to the error structure.
3101  *
3102  * @return
3103  *   0 on success, a negative errno value otherwise and rte_errno is set.
3104  */
3105 static inline int
3106 flow_drv_validate(struct rte_eth_dev *dev,
3107                   const struct rte_flow_attr *attr,
3108                   const struct rte_flow_item items[],
3109                   const struct rte_flow_action actions[],
3110                   bool external, int hairpin, struct rte_flow_error *error)
3111 {
3112         const struct mlx5_flow_driver_ops *fops;
3113         enum mlx5_flow_drv_type type = flow_get_drv_type(dev, attr);
3114
3115         fops = flow_get_drv_ops(type);
3116         return fops->validate(dev, attr, items, actions, external,
3117                               hairpin, error);
3118 }
3119
3120 /**
3121  * Flow driver preparation API. This abstracts calling driver specific
3122  * functions. Parent flow (rte_flow) should have driver type (drv_type). It
3123  * calculates the size of memory required for device flow, allocates the memory,
3124  * initializes the device flow and returns the pointer.
3125  *
3126  * @note
3127  *   This function initializes device flow structure such as dv or verbs in
3128  *   struct mlx5_flow. However, it is caller's responsibility to initialize the
3129  *   rest. For example, adding returning device flow to flow->dev_flow list and
3130  *   setting backward reference to the flow should be done out of this function.
3131  *   layers field is not filled either.
3132  *
3133  * @param[in] dev
3134  *   Pointer to the dev structure.
3135  * @param[in] attr
3136  *   Pointer to the flow attributes.
3137  * @param[in] items
3138  *   Pointer to the list of items.
3139  * @param[in] actions
3140  *   Pointer to the list of actions.
3141  * @param[in] flow_idx
3142  *   This memory pool index to the flow.
3143  * @param[out] error
3144  *   Pointer to the error structure.
3145  *
3146  * @return
3147  *   Pointer to device flow on success, otherwise NULL and rte_errno is set.
3148  */
3149 static inline struct mlx5_flow *
3150 flow_drv_prepare(struct rte_eth_dev *dev,
3151                  const struct rte_flow *flow,
3152                  const struct rte_flow_attr *attr,
3153                  const struct rte_flow_item items[],
3154                  const struct rte_flow_action actions[],
3155                  uint32_t flow_idx,
3156                  struct rte_flow_error *error)
3157 {
3158         const struct mlx5_flow_driver_ops *fops;
3159         enum mlx5_flow_drv_type type = flow->drv_type;
3160         struct mlx5_flow *mlx5_flow = NULL;
3161
3162         MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
3163         fops = flow_get_drv_ops(type);
3164         mlx5_flow = fops->prepare(dev, attr, items, actions, error);
3165         if (mlx5_flow)
3166                 mlx5_flow->flow_idx = flow_idx;
3167         return mlx5_flow;
3168 }
3169
3170 /**
3171  * Flow driver translation API. This abstracts calling driver specific
3172  * functions. Parent flow (rte_flow) should have driver type (drv_type). It
3173  * translates a generic flow into a driver flow. flow_drv_prepare() must
3174  * precede.
3175  *
3176  * @note
3177  *   dev_flow->layers could be filled as a result of parsing during translation
3178  *   if needed by flow_drv_apply(). dev_flow->flow->actions can also be filled
3179  *   if necessary. As a flow can have multiple dev_flows by RSS flow expansion,
3180  *   flow->actions could be overwritten even though all the expanded dev_flows
3181  *   have the same actions.
3182  *
3183  * @param[in] dev
3184  *   Pointer to the rte dev structure.
3185  * @param[in, out] dev_flow
3186  *   Pointer to the mlx5 flow.
3187  * @param[in] attr
3188  *   Pointer to the flow attributes.
3189  * @param[in] items
3190  *   Pointer to the list of items.
3191  * @param[in] actions
3192  *   Pointer to the list of actions.
3193  * @param[out] error
3194  *   Pointer to the error structure.
3195  *
3196  * @return
3197  *   0 on success, a negative errno value otherwise and rte_errno is set.
3198  */
3199 static inline int
3200 flow_drv_translate(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow,
3201                    const struct rte_flow_attr *attr,
3202                    const struct rte_flow_item items[],
3203                    const struct rte_flow_action actions[],
3204                    struct rte_flow_error *error)
3205 {
3206         const struct mlx5_flow_driver_ops *fops;
3207         enum mlx5_flow_drv_type type = dev_flow->flow->drv_type;
3208
3209         MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
3210         fops = flow_get_drv_ops(type);
3211         return fops->translate(dev, dev_flow, attr, items, actions, error);
3212 }
3213
3214 /**
3215  * Flow driver apply API. This abstracts calling driver specific functions.
3216  * Parent flow (rte_flow) should have driver type (drv_type). It applies
3217  * translated driver flows on to device. flow_drv_translate() must precede.
3218  *
3219  * @param[in] dev
3220  *   Pointer to Ethernet device structure.
3221  * @param[in, out] flow
3222  *   Pointer to flow structure.
3223  * @param[out] error
3224  *   Pointer to error structure.
3225  *
3226  * @return
3227  *   0 on success, a negative errno value otherwise and rte_errno is set.
3228  */
3229 static inline int
3230 flow_drv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
3231                struct rte_flow_error *error)
3232 {
3233         const struct mlx5_flow_driver_ops *fops;
3234         enum mlx5_flow_drv_type type = flow->drv_type;
3235
3236         MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
3237         fops = flow_get_drv_ops(type);
3238         return fops->apply(dev, flow, error);
3239 }
3240
3241 /**
3242  * Flow driver destroy API. This abstracts calling driver specific functions.
3243  * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow
3244  * on device and releases resources of the flow.
3245  *
3246  * @param[in] dev
3247  *   Pointer to Ethernet device.
3248  * @param[in, out] flow
3249  *   Pointer to flow structure.
3250  */
3251 static inline void
3252 flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
3253 {
3254         const struct mlx5_flow_driver_ops *fops;
3255         enum mlx5_flow_drv_type type = flow->drv_type;
3256
3257         flow_mreg_split_qrss_release(dev, flow);
3258         MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
3259         fops = flow_get_drv_ops(type);
3260         fops->destroy(dev, flow);
3261 }
3262
3263 /**
3264  * Get RSS action from the action list.
3265  *
3266  * @param[in] actions
3267  *   Pointer to the list of actions.
3268  *
3269  * @return
3270  *   Pointer to the RSS action if exist, else return NULL.
3271  */
3272 static const struct rte_flow_action_rss*
3273 flow_get_rss_action(const struct rte_flow_action actions[])
3274 {
3275         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3276                 switch (actions->type) {
3277                 case RTE_FLOW_ACTION_TYPE_RSS:
3278                         return (const struct rte_flow_action_rss *)
3279                                actions->conf;
3280                 default:
3281                         break;
3282                 }
3283         }
3284         return NULL;
3285 }
3286
3287 /* maps shared action to translated non shared in some actions array */
3288 struct mlx5_translated_shared_action {
3289         struct rte_flow_shared_action *action; /**< Shared action */
3290         int index; /**< Index in related array of rte_flow_action */
3291 };
3292
3293 /**
3294  * Translates actions of type RTE_FLOW_ACTION_TYPE_SHARED to related
3295  * non shared action if translation possible.
3296  * This functionality used to run same execution path for both shared & non
3297  * shared actions on flow create. All necessary preparations for shared
3298  * action handling should be preformed on *shared* actions list returned
3299  * from this call.
3300  *
3301  * @param[in] actions
3302  *   List of actions to translate.
3303  * @param[out] shared
3304  *   List to store translated shared actions.
3305  * @param[in, out] shared_n
3306  *   Size of *shared* array. On return should be updated with number of shared
3307  *   actions retrieved from the *actions* list.
3308  * @param[out] translated_actions
3309  *   List of actions where all shared actions were translated to non shared
3310  *   if possible. NULL if no translation took place.
3311  * @param[out] error
3312  *   Pointer to the error structure.
3313  *
3314  * @return
3315  *   0 on success, a negative errno value otherwise and rte_errno is set.
3316  */
3317 static int
3318 flow_shared_actions_translate(const struct rte_flow_action actions[],
3319         struct mlx5_translated_shared_action *shared,
3320         int *shared_n,
3321         struct rte_flow_action **translated_actions,
3322         struct rte_flow_error *error)
3323 {
3324         struct rte_flow_action *translated = NULL;
3325         size_t actions_size;
3326         int n;
3327         int copied_n = 0;
3328         struct mlx5_translated_shared_action *shared_end = NULL;
3329
3330         for (n = 0; actions[n].type != RTE_FLOW_ACTION_TYPE_END; n++) {
3331                 if (actions[n].type != RTE_FLOW_ACTION_TYPE_SHARED)
3332                         continue;
3333                 if (copied_n == *shared_n) {
3334                         return rte_flow_error_set
3335                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_NUM,
3336                                  NULL, "too many shared actions");
3337                 }
3338                 rte_memcpy(&shared[copied_n].action, &actions[n].conf,
3339                            sizeof(actions[n].conf));
3340                 shared[copied_n].index = n;
3341                 copied_n++;
3342         }
3343         n++;
3344         *shared_n = copied_n;
3345         if (!copied_n)
3346                 return 0;
3347         actions_size = sizeof(struct rte_flow_action) * n;
3348         translated = mlx5_malloc(MLX5_MEM_ZERO, actions_size, 0, SOCKET_ID_ANY);
3349         if (!translated) {
3350                 rte_errno = ENOMEM;
3351                 return -ENOMEM;
3352         }
3353         memcpy(translated, actions, actions_size);
3354         for (shared_end = shared + copied_n; shared < shared_end; shared++) {
3355                 const struct rte_flow_shared_action *shared_action;
3356
3357                 shared_action = shared->action;
3358                 switch (shared_action->type) {
3359                 case MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS:
3360                         translated[shared->index].type =
3361                                 RTE_FLOW_ACTION_TYPE_RSS;
3362                         translated[shared->index].conf =
3363                                 &shared_action->rss.origin;
3364                         break;
3365                 default:
3366                         mlx5_free(translated);
3367                         return rte_flow_error_set
3368                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION,
3369                                  NULL, "invalid shared action type");
3370                 }
3371         }
3372         *translated_actions = translated;
3373         return 0;
3374 }
3375
3376 /**
3377  * Get Shared RSS action from the action list.
3378  *
3379  * @param[in] shared
3380  *   Pointer to the list of actions.
3381  * @param[in] shared_n
3382  *   Actions list length.
3383  *
3384  * @return
3385  *   Pointer to the MLX5 RSS action if exists, otherwise return NULL.
3386  */
3387 static struct mlx5_shared_action_rss *
3388 flow_get_shared_rss_action(struct mlx5_translated_shared_action *shared,
3389                            int shared_n)
3390 {
3391         struct mlx5_translated_shared_action *shared_end;
3392
3393         for (shared_end = shared + shared_n; shared < shared_end; shared++) {
3394                 struct rte_flow_shared_action *shared_action;
3395
3396                 shared_action = shared->action;
3397                 switch (shared_action->type) {
3398                 case MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS:
3399                         __atomic_add_fetch(&shared_action->refcnt, 1,
3400                                            __ATOMIC_RELAXED);
3401                         return &shared_action->rss;
3402                 default:
3403                         break;
3404                 }
3405         }
3406         return NULL;
3407 }
3408
3409 struct rte_flow_shared_action *
3410 mlx5_flow_get_shared_rss(struct rte_flow *flow)
3411 {
3412         if (flow->shared_rss)
3413                 return container_of(flow->shared_rss,
3414                                     struct rte_flow_shared_action, rss);
3415         else
3416                 return NULL;
3417 }
3418
3419 static unsigned int
3420 find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level)
3421 {
3422         const struct rte_flow_item *item;
3423         unsigned int has_vlan = 0;
3424
3425         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
3426                 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
3427                         has_vlan = 1;
3428                         break;
3429                 }
3430         }
3431         if (has_vlan)
3432                 return rss_level < 2 ? MLX5_EXPANSION_ROOT_ETH_VLAN :
3433                                        MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN;
3434         return rss_level < 2 ? MLX5_EXPANSION_ROOT :
3435                                MLX5_EXPANSION_ROOT_OUTER;
3436 }
3437
3438 /**
3439  *  Get layer flags from the prefix flow.
3440  *
3441  *  Some flows may be split to several subflows, the prefix subflow gets the
3442  *  match items and the suffix sub flow gets the actions.
3443  *  Some actions need the user defined match item flags to get the detail for
3444  *  the action.
3445  *  This function helps the suffix flow to get the item layer flags from prefix
3446  *  subflow.
3447  *
3448  * @param[in] dev_flow
3449  *   Pointer the created preifx subflow.
3450  *
3451  * @return
3452  *   The layers get from prefix subflow.
3453  */
3454 static inline uint64_t
3455 flow_get_prefix_layer_flags(struct mlx5_flow *dev_flow)
3456 {
3457         uint64_t layers = 0;
3458
3459         /*
3460          * Layers bits could be localization, but usually the compiler will
3461          * help to do the optimization work for source code.
3462          * If no decap actions, use the layers directly.
3463          */
3464         if (!(dev_flow->act_flags & MLX5_FLOW_ACTION_DECAP))
3465                 return dev_flow->handle->layers;
3466         /* Convert L3 layers with decap action. */
3467         if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV4)
3468                 layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3469         else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV6)
3470                 layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3471         /* Convert L4 layers with decap action.  */
3472         if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_TCP)
3473                 layers |= MLX5_FLOW_LAYER_OUTER_L4_TCP;
3474         else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_UDP)
3475                 layers |= MLX5_FLOW_LAYER_OUTER_L4_UDP;
3476         return layers;
3477 }
3478
3479 /**
3480  * Get metadata split action information.
3481  *
3482  * @param[in] actions
3483  *   Pointer to the list of actions.
3484  * @param[out] qrss
3485  *   Pointer to the return pointer.
3486  * @param[out] qrss_type
3487  *   Pointer to the action type to return. RTE_FLOW_ACTION_TYPE_END is returned
3488  *   if no QUEUE/RSS is found.
3489  * @param[out] encap_idx
3490  *   Pointer to the index of the encap action if exists, otherwise the last
3491  *   action index.
3492  *
3493  * @return
3494  *   Total number of actions.
3495  */
3496 static int
3497 flow_parse_metadata_split_actions_info(const struct rte_flow_action actions[],
3498                                        const struct rte_flow_action **qrss,
3499                                        int *encap_idx)
3500 {
3501         const struct rte_flow_action_raw_encap *raw_encap;
3502         int actions_n = 0;
3503         int raw_decap_idx = -1;
3504
3505         *encap_idx = -1;
3506         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3507                 switch (actions->type) {
3508                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3509                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3510                         *encap_idx = actions_n;
3511                         break;
3512                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
3513                         raw_decap_idx = actions_n;
3514                         break;
3515                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3516                         raw_encap = actions->conf;
3517                         if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
3518                                 *encap_idx = raw_decap_idx != -1 ?
3519                                                       raw_decap_idx : actions_n;
3520                         break;
3521                 case RTE_FLOW_ACTION_TYPE_QUEUE:
3522                 case RTE_FLOW_ACTION_TYPE_RSS:
3523                         *qrss = actions;
3524                         break;
3525                 default:
3526                         break;
3527                 }
3528                 actions_n++;
3529         }
3530         if (*encap_idx == -1)
3531                 *encap_idx = actions_n;
3532         /* Count RTE_FLOW_ACTION_TYPE_END. */
3533         return actions_n + 1;
3534 }
3535
3536 /**
3537  * Check meter action from the action list.
3538  *
3539  * @param[in] actions
3540  *   Pointer to the list of actions.
3541  * @param[out] mtr
3542  *   Pointer to the meter exist flag.
3543  *
3544  * @return
3545  *   Total number of actions.
3546  */
3547 static int
3548 flow_check_meter_action(const struct rte_flow_action actions[], uint32_t *mtr)
3549 {
3550         int actions_n = 0;
3551
3552         MLX5_ASSERT(mtr);
3553         *mtr = 0;
3554         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3555                 switch (actions->type) {
3556                 case RTE_FLOW_ACTION_TYPE_METER:
3557                         *mtr = 1;
3558                         break;
3559                 default:
3560                         break;
3561                 }
3562                 actions_n++;
3563         }
3564         /* Count RTE_FLOW_ACTION_TYPE_END. */
3565         return actions_n + 1;
3566 }
3567
3568 /**
3569  * Check if the flow should be split due to hairpin.
3570  * The reason for the split is that in current HW we can't
3571  * support encap and push-vlan on Rx, so if a flow contains
3572  * these actions we move it to Tx.
3573  *
3574  * @param dev
3575  *   Pointer to Ethernet device.
3576  * @param[in] attr
3577  *   Flow rule attributes.
3578  * @param[in] actions
3579  *   Associated actions (list terminated by the END action).
3580  *
3581  * @return
3582  *   > 0 the number of actions and the flow should be split,
3583  *   0 when no split required.
3584  */
3585 static int
3586 flow_check_hairpin_split(struct rte_eth_dev *dev,
3587                          const struct rte_flow_attr *attr,
3588                          const struct rte_flow_action actions[])
3589 {
3590         int queue_action = 0;
3591         int action_n = 0;
3592         int split = 0;
3593         const struct rte_flow_action_queue *queue;
3594         const struct rte_flow_action_rss *rss;
3595         const struct rte_flow_action_raw_encap *raw_encap;
3596         const struct rte_eth_hairpin_conf *conf;
3597
3598         if (!attr->ingress)
3599                 return 0;
3600         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3601                 switch (actions->type) {
3602                 case RTE_FLOW_ACTION_TYPE_QUEUE:
3603                         queue = actions->conf;
3604                         if (queue == NULL)
3605                                 return 0;
3606                         conf = mlx5_rxq_get_hairpin_conf(dev, queue->index);
3607                         if (conf != NULL && !!conf->tx_explicit)
3608                                 return 0;
3609                         queue_action = 1;
3610                         action_n++;
3611                         break;
3612                 case RTE_FLOW_ACTION_TYPE_RSS:
3613                         rss = actions->conf;
3614                         if (rss == NULL || rss->queue_num == 0)
3615                                 return 0;
3616                         conf = mlx5_rxq_get_hairpin_conf(dev, rss->queue[0]);
3617                         if (conf != NULL && !!conf->tx_explicit)
3618                                 return 0;
3619                         queue_action = 1;
3620                         action_n++;
3621                         break;
3622                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3623                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3624                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
3625                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
3626                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
3627                         split++;
3628                         action_n++;
3629                         break;
3630                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3631                         raw_encap = actions->conf;
3632                         if (raw_encap->size >
3633                             (sizeof(struct rte_flow_item_eth) +
3634                              sizeof(struct rte_flow_item_ipv4)))
3635                                 split++;
3636                         action_n++;
3637                         break;
3638                 default:
3639                         action_n++;
3640                         break;
3641                 }
3642         }
3643         if (split && queue_action)
3644                 return action_n;
3645         return 0;
3646 }
3647
3648 /* Declare flow create/destroy prototype in advance. */
3649 static uint32_t
3650 flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
3651                  const struct rte_flow_attr *attr,
3652                  const struct rte_flow_item items[],
3653                  const struct rte_flow_action actions[],
3654                  bool external, struct rte_flow_error *error);
3655
3656 static void
3657 flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,
3658                   uint32_t flow_idx);
3659
3660 /**
3661  * Add a flow of copying flow metadata registers in RX_CP_TBL.
3662  *
3663  * As mark_id is unique, if there's already a registered flow for the mark_id,
3664  * return by increasing the reference counter of the resource. Otherwise, create
3665  * the resource (mcp_res) and flow.
3666  *
3667  * Flow looks like,
3668  *   - If ingress port is ANY and reg_c[1] is mark_id,
3669  *     flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL.
3670  *
3671  * For default flow (zero mark_id), flow is like,
3672  *   - If ingress port is ANY,
3673  *     reg_b := reg_c[0] and jump to RX_ACT_TBL.
3674  *
3675  * @param dev
3676  *   Pointer to Ethernet device.
3677  * @param mark_id
3678  *   ID of MARK action, zero means default flow for META.
3679  * @param[out] error
3680  *   Perform verbose error reporting if not NULL.
3681  *
3682  * @return
3683  *   Associated resource on success, NULL otherwise and rte_errno is set.
3684  */
3685 static struct mlx5_flow_mreg_copy_resource *
3686 flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id,
3687                           struct rte_flow_error *error)
3688 {
3689         struct mlx5_priv *priv = dev->data->dev_private;
3690         struct rte_flow_attr attr = {
3691                 .group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
3692                 .ingress = 1,
3693         };
3694         struct mlx5_rte_flow_item_tag tag_spec = {
3695                 .data = mark_id,
3696         };
3697         struct rte_flow_item items[] = {
3698                 [1] = { .type = RTE_FLOW_ITEM_TYPE_END, },
3699         };
3700         struct rte_flow_action_mark ftag = {
3701                 .id = mark_id,
3702         };
3703         struct mlx5_flow_action_copy_mreg cp_mreg = {
3704                 .dst = REG_B,
3705                 .src = REG_NON,
3706         };
3707         struct rte_flow_action_jump jump = {
3708                 .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP,
3709         };
3710         struct rte_flow_action actions[] = {
3711                 [3] = { .type = RTE_FLOW_ACTION_TYPE_END, },
3712         };
3713         struct mlx5_flow_mreg_copy_resource *mcp_res;
3714         uint32_t idx = 0;
3715         int ret;
3716
3717         /* Fill the register fileds in the flow. */
3718         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
3719         if (ret < 0)
3720                 return NULL;
3721         tag_spec.id = ret;
3722         ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error);
3723         if (ret < 0)
3724                 return NULL;
3725         cp_mreg.src = ret;
3726         /* Check if already registered. */
3727         MLX5_ASSERT(priv->mreg_cp_tbl);
3728         mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id, NULL);
3729         if (mcp_res) {
3730                 /* For non-default rule. */
3731                 if (mark_id != MLX5_DEFAULT_COPY_ID)
3732                         mcp_res->refcnt++;
3733                 MLX5_ASSERT(mark_id != MLX5_DEFAULT_COPY_ID ||
3734                             mcp_res->refcnt == 1);
3735                 return mcp_res;
3736         }
3737         /* Provide the full width of FLAG specific value. */
3738         if (mark_id == (priv->sh->dv_regc0_mask & MLX5_FLOW_MARK_DEFAULT))
3739                 tag_spec.data = MLX5_FLOW_MARK_DEFAULT;
3740         /* Build a new flow. */
3741         if (mark_id != MLX5_DEFAULT_COPY_ID) {
3742                 items[0] = (struct rte_flow_item){
3743                         .type = (enum rte_flow_item_type)
3744                                 MLX5_RTE_FLOW_ITEM_TYPE_TAG,
3745                         .spec = &tag_spec,
3746                 };
3747                 items[1] = (struct rte_flow_item){
3748                         .type = RTE_FLOW_ITEM_TYPE_END,
3749                 };
3750                 actions[0] = (struct rte_flow_action){
3751                         .type = (enum rte_flow_action_type)
3752                                 MLX5_RTE_FLOW_ACTION_TYPE_MARK,
3753                         .conf = &ftag,
3754                 };
3755                 actions[1] = (struct rte_flow_action){
3756                         .type = (enum rte_flow_action_type)
3757                                 MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
3758                         .conf = &cp_mreg,
3759                 };
3760                 actions[2] = (struct rte_flow_action){
3761                         .type = RTE_FLOW_ACTION_TYPE_JUMP,
3762                         .conf = &jump,
3763                 };
3764                 actions[3] = (struct rte_flow_action){
3765                         .type = RTE_FLOW_ACTION_TYPE_END,
3766                 };
3767         } else {
3768                 /* Default rule, wildcard match. */
3769                 attr.priority = MLX5_FLOW_PRIO_RSVD;
3770                 items[0] = (struct rte_flow_item){
3771                         .type = RTE_FLOW_ITEM_TYPE_END,
3772                 };
3773                 actions[0] = (struct rte_flow_action){
3774                         .type = (enum rte_flow_action_type)
3775                                 MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
3776                         .conf = &cp_mreg,
3777                 };
3778                 actions[1] = (struct rte_flow_action){
3779                         .type = RTE_FLOW_ACTION_TYPE_JUMP,
3780                         .conf = &jump,
3781                 };
3782                 actions[2] = (struct rte_flow_action){
3783                         .type = RTE_FLOW_ACTION_TYPE_END,
3784                 };
3785         }
3786         /* Build a new entry. */
3787         mcp_res = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MCP], &idx);
3788         if (!mcp_res) {
3789                 rte_errno = ENOMEM;
3790                 return NULL;
3791         }
3792         mcp_res->idx = idx;
3793         /*
3794          * The copy Flows are not included in any list. There
3795          * ones are referenced from other Flows and can not
3796          * be applied, removed, deleted in ardbitrary order
3797          * by list traversing.
3798          */
3799         mcp_res->rix_flow = flow_list_create(dev, NULL, &attr, items,
3800                                          actions, false, error);
3801         if (!mcp_res->rix_flow)
3802                 goto error;
3803         mcp_res->refcnt++;
3804         mcp_res->hlist_ent.key = mark_id;
3805         ret = !mlx5_hlist_insert(priv->mreg_cp_tbl, &mcp_res->hlist_ent);
3806         MLX5_ASSERT(!ret);
3807         if (ret)
3808                 goto error;
3809         return mcp_res;
3810 error:
3811         if (mcp_res->rix_flow)
3812                 flow_list_destroy(dev, NULL, mcp_res->rix_flow);
3813         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx);
3814         return NULL;
3815 }
3816
3817 /**
3818  * Release flow in RX_CP_TBL.
3819  *
3820  * @param dev
3821  *   Pointer to Ethernet device.
3822  * @flow
3823  *   Parent flow for wich copying is provided.
3824  */
3825 static void
3826 flow_mreg_del_copy_action(struct rte_eth_dev *dev,
3827                           struct rte_flow *flow)
3828 {
3829         struct mlx5_flow_mreg_copy_resource *mcp_res;
3830         struct mlx5_priv *priv = dev->data->dev_private;
3831
3832         if (!flow->rix_mreg_copy)
3833                 return;
3834         mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP],
3835                                  flow->rix_mreg_copy);
3836         if (!mcp_res || !priv->mreg_cp_tbl)
3837                 return;
3838         /*
3839          * We do not check availability of metadata registers here,
3840          * because copy resources are not allocated in this case.
3841          */
3842         if (--mcp_res->refcnt)
3843                 return;
3844         MLX5_ASSERT(mcp_res->rix_flow);
3845         flow_list_destroy(dev, NULL, mcp_res->rix_flow);
3846         mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent);
3847         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx);
3848         flow->rix_mreg_copy = 0;
3849 }
3850
3851 /**
3852  * Remove the default copy action from RX_CP_TBL.
3853  *
3854  * @param dev
3855  *   Pointer to Ethernet device.
3856  */
3857 static void
3858 flow_mreg_del_default_copy_action(struct rte_eth_dev *dev)
3859 {
3860         struct mlx5_flow_mreg_copy_resource *mcp_res;
3861         struct mlx5_priv *priv = dev->data->dev_private;
3862
3863         /* Check if default flow is registered. */
3864         if (!priv->mreg_cp_tbl)
3865                 return;
3866         mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl,
3867                                             MLX5_DEFAULT_COPY_ID, NULL);
3868         if (!mcp_res)
3869                 return;
3870         MLX5_ASSERT(mcp_res->rix_flow);
3871         flow_list_destroy(dev, NULL, mcp_res->rix_flow);
3872         mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent);
3873         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx);
3874 }
3875
3876 /**
3877  * Add the default copy action in in RX_CP_TBL.
3878  *
3879  * @param dev
3880  *   Pointer to Ethernet device.
3881  * @param[out] error
3882  *   Perform verbose error reporting if not NULL.
3883  *
3884  * @return
3885  *   0 for success, negative value otherwise and rte_errno is set.
3886  */
3887 static int
3888 flow_mreg_add_default_copy_action(struct rte_eth_dev *dev,
3889                                   struct rte_flow_error *error)
3890 {
3891         struct mlx5_priv *priv = dev->data->dev_private;
3892         struct mlx5_flow_mreg_copy_resource *mcp_res;
3893
3894         /* Check whether extensive metadata feature is engaged. */
3895         if (!priv->config.dv_flow_en ||
3896             priv->config.dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
3897             !mlx5_flow_ext_mreg_supported(dev) ||
3898             !priv->sh->dv_regc0_mask)
3899                 return 0;
3900         mcp_res = flow_mreg_add_copy_action(dev, MLX5_DEFAULT_COPY_ID, error);
3901         if (!mcp_res)
3902                 return -rte_errno;
3903         return 0;
3904 }
3905
3906 /**
3907  * Add a flow of copying flow metadata registers in RX_CP_TBL.
3908  *
3909  * All the flow having Q/RSS action should be split by
3910  * flow_mreg_split_qrss_prep() to pass by RX_CP_TBL. A flow in the RX_CP_TBL
3911  * performs the following,
3912  *   - CQE->flow_tag := reg_c[1] (MARK)
3913  *   - CQE->flow_table_metadata (reg_b) := reg_c[0] (META)
3914  * As CQE's flow_tag is not a register, it can't be simply copied from reg_c[1]
3915  * but there should be a flow per each MARK ID set by MARK action.
3916  *
3917  * For the aforementioned reason, if there's a MARK action in flow's action
3918  * list, a corresponding flow should be added to the RX_CP_TBL in order to copy
3919  * the MARK ID to CQE's flow_tag like,
3920  *   - If reg_c[1] is mark_id,
3921  *     flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL.
3922  *
3923  * For SET_META action which stores value in reg_c[0], as the destination is
3924  * also a flow metadata register (reg_b), adding a default flow is enough. Zero
3925  * MARK ID means the default flow. The default flow looks like,
3926  *   - For all flow, reg_b := reg_c[0] and jump to RX_ACT_TBL.
3927  *
3928  * @param dev
3929  *   Pointer to Ethernet device.
3930  * @param flow
3931  *   Pointer to flow structure.
3932  * @param[in] actions
3933  *   Pointer to the list of actions.
3934  * @param[out] error
3935  *   Perform verbose error reporting if not NULL.
3936  *
3937  * @return
3938  *   0 on success, negative value otherwise and rte_errno is set.
3939  */
3940 static int
3941 flow_mreg_update_copy_table(struct rte_eth_dev *dev,
3942                             struct rte_flow *flow,
3943                             const struct rte_flow_action *actions,
3944                             struct rte_flow_error *error)
3945 {
3946         struct mlx5_priv *priv = dev->data->dev_private;
3947         struct mlx5_dev_config *config = &priv->config;
3948         struct mlx5_flow_mreg_copy_resource *mcp_res;
3949         const struct rte_flow_action_mark *mark;
3950
3951         /* Check whether extensive metadata feature is engaged. */
3952         if (!config->dv_flow_en ||
3953             config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
3954             !mlx5_flow_ext_mreg_supported(dev) ||
3955             !priv->sh->dv_regc0_mask)
3956                 return 0;
3957         /* Find MARK action. */
3958         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3959                 switch (actions->type) {
3960                 case RTE_FLOW_ACTION_TYPE_FLAG:
3961                         mcp_res = flow_mreg_add_copy_action
3962                                 (dev, MLX5_FLOW_MARK_DEFAULT, error);
3963                         if (!mcp_res)
3964                                 return -rte_errno;
3965                         flow->rix_mreg_copy = mcp_res->idx;
3966                         return 0;
3967                 case RTE_FLOW_ACTION_TYPE_MARK:
3968                         mark = (const struct rte_flow_action_mark *)
3969                                 actions->conf;
3970                         mcp_res =
3971                                 flow_mreg_add_copy_action(dev, mark->id, error);
3972                         if (!mcp_res)
3973                                 return -rte_errno;
3974                         flow->rix_mreg_copy = mcp_res->idx;
3975                         return 0;
3976                 default:
3977                         break;
3978                 }
3979         }
3980         return 0;
3981 }
3982
3983 #define MLX5_MAX_SPLIT_ACTIONS 24
3984 #define MLX5_MAX_SPLIT_ITEMS 24
3985
3986 /**
3987  * Split the hairpin flow.
3988  * Since HW can't support encap and push-vlan on Rx, we move these
3989  * actions to Tx.
3990  * If the count action is after the encap then we also
3991  * move the count action. in this case the count will also measure
3992  * the outer bytes.
3993  *
3994  * @param dev
3995  *   Pointer to Ethernet device.
3996  * @param[in] actions
3997  *   Associated actions (list terminated by the END action).
3998  * @param[out] actions_rx
3999  *   Rx flow actions.
4000  * @param[out] actions_tx
4001  *   Tx flow actions..
4002  * @param[out] pattern_tx
4003  *   The pattern items for the Tx flow.
4004  * @param[out] flow_id
4005  *   The flow ID connected to this flow.
4006  *
4007  * @return
4008  *   0 on success.
4009  */
4010 static int
4011 flow_hairpin_split(struct rte_eth_dev *dev,
4012                    const struct rte_flow_action actions[],
4013                    struct rte_flow_action actions_rx[],
4014                    struct rte_flow_action actions_tx[],
4015                    struct rte_flow_item pattern_tx[],
4016                    uint32_t flow_id)
4017 {
4018         const struct rte_flow_action_raw_encap *raw_encap;
4019         const struct rte_flow_action_raw_decap *raw_decap;
4020         struct mlx5_rte_flow_action_set_tag *set_tag;
4021         struct rte_flow_action *tag_action;
4022         struct mlx5_rte_flow_item_tag *tag_item;
4023         struct rte_flow_item *item;
4024         char *addr;
4025         int encap = 0;
4026
4027         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
4028                 switch (actions->type) {
4029                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
4030                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
4031                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
4032                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
4033                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
4034                         rte_memcpy(actions_tx, actions,
4035                                sizeof(struct rte_flow_action));
4036                         actions_tx++;
4037                         break;
4038                 case RTE_FLOW_ACTION_TYPE_COUNT:
4039                         if (encap) {
4040                                 rte_memcpy(actions_tx, actions,
4041                                            sizeof(struct rte_flow_action));
4042                                 actions_tx++;
4043                         } else {
4044                                 rte_memcpy(actions_rx, actions,
4045                                            sizeof(struct rte_flow_action));
4046                                 actions_rx++;
4047                         }
4048                         break;
4049                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
4050                         raw_encap = actions->conf;
4051                         if (raw_encap->size >
4052                             (sizeof(struct rte_flow_item_eth) +
4053                              sizeof(struct rte_flow_item_ipv4))) {
4054                                 memcpy(actions_tx, actions,
4055                                        sizeof(struct rte_flow_action));
4056                                 actions_tx++;
4057                                 encap = 1;
4058                         } else {
4059                                 rte_memcpy(actions_rx, actions,
4060                                            sizeof(struct rte_flow_action));
4061                                 actions_rx++;
4062                         }
4063                         break;
4064                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
4065                         raw_decap = actions->conf;
4066                         if (raw_decap->size <
4067                             (sizeof(struct rte_flow_item_eth) +
4068                              sizeof(struct rte_flow_item_ipv4))) {
4069                                 memcpy(actions_tx, actions,
4070                                        sizeof(struct rte_flow_action));
4071                                 actions_tx++;
4072                         } else {
4073                                 rte_memcpy(actions_rx, actions,
4074                                            sizeof(struct rte_flow_action));
4075                                 actions_rx++;
4076                         }
4077                         break;
4078                 default:
4079                         rte_memcpy(actions_rx, actions,
4080                                    sizeof(struct rte_flow_action));
4081                         actions_rx++;
4082                         break;
4083                 }
4084         }
4085         /* Add set meta action and end action for the Rx flow. */
4086         tag_action = actions_rx;
4087         tag_action->type = (enum rte_flow_action_type)
4088                            MLX5_RTE_FLOW_ACTION_TYPE_TAG;
4089         actions_rx++;
4090         rte_memcpy(actions_rx, actions, sizeof(struct rte_flow_action));
4091         actions_rx++;
4092         set_tag = (void *)actions_rx;
4093         set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_RX, 0, NULL);
4094         MLX5_ASSERT(set_tag->id > REG_NON);
4095         set_tag->data = flow_id;
4096         tag_action->conf = set_tag;
4097         /* Create Tx item list. */
4098         rte_memcpy(actions_tx, actions, sizeof(struct rte_flow_action));
4099         addr = (void *)&pattern_tx[2];
4100         item = pattern_tx;
4101         item->type = (enum rte_flow_item_type)
4102                      MLX5_RTE_FLOW_ITEM_TYPE_TAG;
4103         tag_item = (void *)addr;
4104         tag_item->data = flow_id;
4105         tag_item->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_TX, 0, NULL);
4106         MLX5_ASSERT(set_tag->id > REG_NON);
4107         item->spec = tag_item;
4108         addr += sizeof(struct mlx5_rte_flow_item_tag);
4109         tag_item = (void *)addr;
4110         tag_item->data = UINT32_MAX;
4111         tag_item->id = UINT16_MAX;
4112         item->mask = tag_item;
4113         item->last = NULL;
4114         item++;
4115         item->type = RTE_FLOW_ITEM_TYPE_END;
4116         return 0;
4117 }
4118
4119 __extension__
4120 union tunnel_offload_mark {
4121         uint32_t val;
4122         struct {
4123                 uint32_t app_reserve:8;
4124                 uint32_t table_id:15;
4125                 uint32_t transfer:1;
4126                 uint32_t _unused_:8;
4127         };
4128 };
4129
4130 struct tunnel_default_miss_ctx {
4131         uint16_t *queue;
4132         __extension__
4133         union {
4134                 struct rte_flow_action_rss action_rss;
4135                 struct rte_flow_action_queue miss_queue;
4136                 struct rte_flow_action_jump miss_jump;
4137                 uint8_t raw[0];
4138         };
4139 };
4140
4141 static int
4142 flow_tunnel_add_default_miss(struct rte_eth_dev *dev,
4143                              struct rte_flow *flow,
4144                              const struct rte_flow_attr *attr,
4145                              const struct rte_flow_action *app_actions,
4146                              uint32_t flow_idx,
4147                              struct tunnel_default_miss_ctx *ctx,
4148                              struct rte_flow_error *error)
4149 {
4150         struct mlx5_priv *priv = dev->data->dev_private;
4151         struct mlx5_flow *dev_flow;
4152         struct rte_flow_attr miss_attr = *attr;
4153         const struct mlx5_flow_tunnel *tunnel = app_actions[0].conf;
4154         const struct rte_flow_item miss_items[2] = {
4155                 {
4156                         .type = RTE_FLOW_ITEM_TYPE_ETH,
4157                         .spec = NULL,
4158                         .last = NULL,
4159                         .mask = NULL
4160                 },
4161                 {
4162                         .type = RTE_FLOW_ITEM_TYPE_END,
4163                         .spec = NULL,
4164                         .last = NULL,
4165                         .mask = NULL
4166                 }
4167         };
4168         union tunnel_offload_mark mark_id;
4169         struct rte_flow_action_mark miss_mark;
4170         struct rte_flow_action miss_actions[3] = {
4171                 [0] = { .type = RTE_FLOW_ACTION_TYPE_MARK, .conf = &miss_mark },
4172                 [2] = { .type = RTE_FLOW_ACTION_TYPE_END,  .conf = NULL }
4173         };
4174         const struct rte_flow_action_jump *jump_data;
4175         uint32_t i, flow_table = 0; /* prevent compilation warning */
4176         struct flow_grp_info grp_info = {
4177                 .external = 1,
4178                 .transfer = attr->transfer,
4179                 .fdb_def_rule = !!priv->fdb_def_rule,
4180                 .std_tbl_fix = 0,
4181         };
4182         int ret;
4183
4184         if (!attr->transfer) {
4185                 uint32_t q_size;
4186
4187                 miss_actions[1].type = RTE_FLOW_ACTION_TYPE_RSS;
4188                 q_size = priv->reta_idx_n * sizeof(ctx->queue[0]);
4189                 ctx->queue = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, q_size,
4190                                          0, SOCKET_ID_ANY);
4191                 if (!ctx->queue)
4192                         return rte_flow_error_set
4193                                 (error, ENOMEM,
4194                                 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4195                                 NULL, "invalid default miss RSS");
4196                 ctx->action_rss.func = RTE_ETH_HASH_FUNCTION_DEFAULT,
4197                 ctx->action_rss.level = 0,
4198                 ctx->action_rss.types = priv->rss_conf.rss_hf,
4199                 ctx->action_rss.key_len = priv->rss_conf.rss_key_len,
4200                 ctx->action_rss.queue_num = priv->reta_idx_n,
4201                 ctx->action_rss.key = priv->rss_conf.rss_key,
4202                 ctx->action_rss.queue = ctx->queue;
4203                 if (!priv->reta_idx_n || !priv->rxqs_n)
4204                         return rte_flow_error_set
4205                                 (error, EINVAL,
4206                                 RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4207                                 NULL, "invalid port configuration");
4208                 if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
4209                         ctx->action_rss.types = 0;
4210                 for (i = 0; i != priv->reta_idx_n; ++i)
4211                         ctx->queue[i] = (*priv->reta_idx)[i];
4212         } else {
4213                 miss_actions[1].type = RTE_FLOW_ACTION_TYPE_JUMP;
4214                 ctx->miss_jump.group = MLX5_TNL_MISS_FDB_JUMP_GRP;
4215         }
4216         miss_actions[1].conf = (typeof(miss_actions[1].conf))ctx->raw;
4217         for (; app_actions->type != RTE_FLOW_ACTION_TYPE_JUMP; app_actions++);
4218         jump_data = app_actions->conf;
4219         miss_attr.priority = MLX5_TNL_MISS_RULE_PRIORITY;
4220         miss_attr.group = jump_data->group;
4221         ret = mlx5_flow_group_to_table(dev, tunnel, jump_data->group,
4222                                        &flow_table, grp_info, error);
4223         if (ret)
4224                 return rte_flow_error_set(error, EINVAL,
4225                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
4226                                           NULL, "invalid tunnel id");
4227         mark_id.app_reserve = 0;
4228         mark_id.table_id = tunnel_flow_tbl_to_id(flow_table);
4229         mark_id.transfer = !!attr->transfer;
4230         mark_id._unused_ = 0;
4231         miss_mark.id = mark_id.val;
4232         dev_flow = flow_drv_prepare(dev, flow, &miss_attr,
4233                                     miss_items, miss_actions, flow_idx, error);
4234         if (!dev_flow)
4235                 return -rte_errno;
4236         dev_flow->flow = flow;
4237         dev_flow->external = true;
4238         dev_flow->tunnel = tunnel;
4239         /* Subflow object was created, we must include one in the list. */
4240         SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
4241                       dev_flow->handle, next);
4242         DRV_LOG(DEBUG,
4243                 "port %u tunnel type=%d id=%u miss rule priority=%u group=%u",
4244                 dev->data->port_id, tunnel->app_tunnel.type,
4245                 tunnel->tunnel_id, miss_attr.priority, miss_attr.group);
4246         ret = flow_drv_translate(dev, dev_flow, &miss_attr, miss_items,
4247                                   miss_actions, error);
4248         if (!ret)
4249                 ret = flow_mreg_update_copy_table(dev, flow, miss_actions,
4250                                                   error);
4251
4252         return ret;
4253 }
4254
4255 /**
4256  * The last stage of splitting chain, just creates the subflow
4257  * without any modification.
4258  *
4259  * @param[in] dev
4260  *   Pointer to Ethernet device.
4261  * @param[in] flow
4262  *   Parent flow structure pointer.
4263  * @param[in, out] sub_flow
4264  *   Pointer to return the created subflow, may be NULL.
4265  * @param[in] prefix_layers
4266  *   Prefix subflow layers, may be 0.
4267  * @param[in] prefix_mark
4268  *   Prefix subflow mark flag, may be 0.
4269  * @param[in] attr
4270  *   Flow rule attributes.
4271  * @param[in] items
4272  *   Pattern specification (list terminated by the END pattern item).
4273  * @param[in] actions
4274  *   Associated actions (list terminated by the END action).
4275  * @param[in] external
4276  *   This flow rule is created by request external to PMD.
4277  * @param[in] flow_idx
4278  *   This memory pool index to the flow.
4279  * @param[out] error
4280  *   Perform verbose error reporting if not NULL.
4281  * @return
4282  *   0 on success, negative value otherwise
4283  */
4284 static int
4285 flow_create_split_inner(struct rte_eth_dev *dev,
4286                         struct rte_flow *flow,
4287                         struct mlx5_flow **sub_flow,
4288                         uint64_t prefix_layers,
4289                         uint32_t prefix_mark,
4290                         const struct rte_flow_attr *attr,
4291                         const struct rte_flow_item items[],
4292                         const struct rte_flow_action actions[],
4293                         bool external, uint32_t flow_idx,
4294                         struct rte_flow_error *error)
4295 {
4296         struct mlx5_flow *dev_flow;
4297
4298         dev_flow = flow_drv_prepare(dev, flow, attr, items, actions,
4299                 flow_idx, error);
4300         if (!dev_flow)
4301                 return -rte_errno;
4302         dev_flow->flow = flow;
4303         dev_flow->external = external;
4304         /* Subflow object was created, we must include one in the list. */
4305         SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
4306                       dev_flow->handle, next);
4307         /*
4308          * If dev_flow is as one of the suffix flow, some actions in suffix
4309          * flow may need some user defined item layer flags, and pass the
4310          * Metadate rxq mark flag to suffix flow as well.
4311          */
4312         if (prefix_layers)
4313                 dev_flow->handle->layers = prefix_layers;
4314         if (prefix_mark)
4315                 dev_flow->handle->mark = 1;
4316         if (sub_flow)
4317                 *sub_flow = dev_flow;
4318         return flow_drv_translate(dev, dev_flow, attr, items, actions, error);
4319 }
4320
4321 /**
4322  * Split the meter flow.
4323  *
4324  * As meter flow will split to three sub flow, other than meter
4325  * action, the other actions make sense to only meter accepts
4326  * the packet. If it need to be dropped, no other additional
4327  * actions should be take.
4328  *
4329  * One kind of special action which decapsulates the L3 tunnel
4330  * header will be in the prefix sub flow, as not to take the
4331  * L3 tunnel header into account.
4332  *
4333  * @param dev
4334  *   Pointer to Ethernet device.
4335  * @param[in] items
4336  *   Pattern specification (list terminated by the END pattern item).
4337  * @param[out] sfx_items
4338  *   Suffix flow match items (list terminated by the END pattern item).
4339  * @param[in] actions
4340  *   Associated actions (list terminated by the END action).
4341  * @param[out] actions_sfx
4342  *   Suffix flow actions.
4343  * @param[out] actions_pre
4344  *   Prefix flow actions.
4345  * @param[out] pattern_sfx
4346  *   The pattern items for the suffix flow.
4347  * @param[out] tag_sfx
4348  *   Pointer to suffix flow tag.
4349  *
4350  * @return
4351  *   0 on success.
4352  */
4353 static int
4354 flow_meter_split_prep(struct rte_eth_dev *dev,
4355                  const struct rte_flow_item items[],
4356                  struct rte_flow_item sfx_items[],
4357                  const struct rte_flow_action actions[],
4358                  struct rte_flow_action actions_sfx[],
4359                  struct rte_flow_action actions_pre[])
4360 {
4361         struct mlx5_priv *priv = dev->data->dev_private;
4362         struct rte_flow_action *tag_action = NULL;
4363         struct rte_flow_item *tag_item;
4364         struct mlx5_rte_flow_action_set_tag *set_tag;
4365         struct rte_flow_error error;
4366         const struct rte_flow_action_raw_encap *raw_encap;
4367         const struct rte_flow_action_raw_decap *raw_decap;
4368         struct mlx5_rte_flow_item_tag *tag_spec;
4369         struct mlx5_rte_flow_item_tag *tag_mask;
4370         uint32_t tag_id = 0;
4371         bool copy_vlan = false;
4372
4373         /* Prepare the actions for prefix and suffix flow. */
4374         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
4375                 struct rte_flow_action **action_cur = NULL;
4376
4377                 switch (actions->type) {
4378                 case RTE_FLOW_ACTION_TYPE_METER:
4379                         /* Add the extra tag action first. */
4380                         tag_action = actions_pre;
4381                         tag_action->type = (enum rte_flow_action_type)
4382                                            MLX5_RTE_FLOW_ACTION_TYPE_TAG;
4383                         actions_pre++;
4384                         action_cur = &actions_pre;
4385                         break;
4386                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
4387                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
4388                         action_cur = &actions_pre;
4389                         break;
4390                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
4391                         raw_encap = actions->conf;
4392                         if (raw_encap->size < MLX5_ENCAPSULATION_DECISION_SIZE)
4393                                 action_cur = &actions_pre;
4394                         break;
4395                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
4396                         raw_decap = actions->conf;
4397                         if (raw_decap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
4398                                 action_cur = &actions_pre;
4399                         break;
4400                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
4401                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
4402                         copy_vlan = true;
4403                         break;
4404                 default:
4405                         break;
4406                 }
4407                 if (!action_cur)
4408                         action_cur = &actions_sfx;
4409                 memcpy(*action_cur, actions, sizeof(struct rte_flow_action));
4410                 (*action_cur)++;
4411         }
4412         /* Add end action to the actions. */
4413         actions_sfx->type = RTE_FLOW_ACTION_TYPE_END;
4414         actions_pre->type = RTE_FLOW_ACTION_TYPE_END;
4415         actions_pre++;
4416         /* Set the tag. */
4417         set_tag = (void *)actions_pre;
4418         set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_MTR_SFX, 0, &error);
4419         mlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
4420                           &tag_id);
4421         if (tag_id >= (1 << (sizeof(tag_id) * 8 - MLX5_MTR_COLOR_BITS))) {
4422                 DRV_LOG(ERR, "Port %u meter flow id exceed max limit.",
4423                         dev->data->port_id);
4424                 mlx5_ipool_free(priv->sh->ipool
4425                                 [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], tag_id);
4426                 return 0;
4427         } else if (!tag_id) {
4428                 return 0;
4429         }
4430         set_tag->data = tag_id << MLX5_MTR_COLOR_BITS;
4431         assert(tag_action);
4432         tag_action->conf = set_tag;
4433         /* Prepare the suffix subflow items. */
4434         tag_item = sfx_items++;
4435         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
4436                 int item_type = items->type;
4437
4438                 switch (item_type) {
4439                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
4440                         memcpy(sfx_items, items, sizeof(*sfx_items));
4441                         sfx_items++;
4442                         break;
4443                 case RTE_FLOW_ITEM_TYPE_VLAN:
4444                         if (copy_vlan) {
4445                                 memcpy(sfx_items, items, sizeof(*sfx_items));
4446                                 /*
4447                                  * Convert to internal match item, it is used
4448                                  * for vlan push and set vid.
4449                                  */
4450                                 sfx_items->type = (enum rte_flow_item_type)
4451                                                   MLX5_RTE_FLOW_ITEM_TYPE_VLAN;
4452                                 sfx_items++;
4453                         }
4454                         break;
4455                 default:
4456                         break;
4457                 }
4458         }
4459         sfx_items->type = RTE_FLOW_ITEM_TYPE_END;
4460         sfx_items++;
4461         tag_spec = (struct mlx5_rte_flow_item_tag *)sfx_items;
4462         tag_spec->data = tag_id << MLX5_MTR_COLOR_BITS;
4463         tag_spec->id = mlx5_flow_get_reg_id(dev, MLX5_MTR_SFX, 0, &error);
4464         tag_mask = tag_spec + 1;
4465         tag_mask->data = 0xffffff00;
4466         tag_item->type = (enum rte_flow_item_type)
4467                          MLX5_RTE_FLOW_ITEM_TYPE_TAG;
4468         tag_item->spec = tag_spec;
4469         tag_item->last = NULL;
4470         tag_item->mask = tag_mask;
4471         return tag_id;
4472 }
4473
4474 /**
4475  * Split action list having QUEUE/RSS for metadata register copy.
4476  *
4477  * Once Q/RSS action is detected in user's action list, the flow action
4478  * should be split in order to copy metadata registers, which will happen in
4479  * RX_CP_TBL like,
4480  *   - CQE->flow_tag := reg_c[1] (MARK)
4481  *   - CQE->flow_table_metadata (reg_b) := reg_c[0] (META)
4482  * The Q/RSS action will be performed on RX_ACT_TBL after passing by RX_CP_TBL.
4483  * This is because the last action of each flow must be a terminal action
4484  * (QUEUE, RSS or DROP).
4485  *
4486  * Flow ID must be allocated to identify actions in the RX_ACT_TBL and it is
4487  * stored and kept in the mlx5_flow structure per each sub_flow.
4488  *
4489  * The Q/RSS action is replaced with,
4490  *   - SET_TAG, setting the allocated flow ID to reg_c[2].
4491  * And the following JUMP action is added at the end,
4492  *   - JUMP, to RX_CP_TBL.
4493  *
4494  * A flow to perform remained Q/RSS action will be created in RX_ACT_TBL by
4495  * flow_create_split_metadata() routine. The flow will look like,
4496  *   - If flow ID matches (reg_c[2]), perform Q/RSS.
4497  *
4498  * @param dev
4499  *   Pointer to Ethernet device.
4500  * @param[out] split_actions
4501  *   Pointer to store split actions to jump to CP_TBL.
4502  * @param[in] actions
4503  *   Pointer to the list of original flow actions.
4504  * @param[in] qrss
4505  *   Pointer to the Q/RSS action.
4506  * @param[in] actions_n
4507  *   Number of original actions.
4508  * @param[out] error
4509  *   Perform verbose error reporting if not NULL.
4510  *
4511  * @return
4512  *   non-zero unique flow_id on success, otherwise 0 and
4513  *   error/rte_error are set.
4514  */
4515 static uint32_t
4516 flow_mreg_split_qrss_prep(struct rte_eth_dev *dev,
4517                           struct rte_flow_action *split_actions,
4518                           const struct rte_flow_action *actions,
4519                           const struct rte_flow_action *qrss,
4520                           int actions_n, struct rte_flow_error *error)
4521 {
4522         struct mlx5_priv *priv = dev->data->dev_private;
4523         struct mlx5_rte_flow_action_set_tag *set_tag;
4524         struct rte_flow_action_jump *jump;
4525         const int qrss_idx = qrss - actions;
4526         uint32_t flow_id = 0;
4527         int ret = 0;
4528
4529         /*
4530          * Given actions will be split
4531          * - Replace QUEUE/RSS action with SET_TAG to set flow ID.
4532          * - Add jump to mreg CP_TBL.
4533          * As a result, there will be one more action.
4534          */
4535         ++actions_n;
4536         memcpy(split_actions, actions, sizeof(*split_actions) * actions_n);
4537         set_tag = (void *)(split_actions + actions_n);
4538         /*
4539          * If tag action is not set to void(it means we are not the meter
4540          * suffix flow), add the tag action. Since meter suffix flow already
4541          * has the tag added.
4542          */
4543         if (split_actions[qrss_idx].type != RTE_FLOW_ACTION_TYPE_VOID) {
4544                 /*
4545                  * Allocate the new subflow ID. This one is unique within
4546                  * device and not shared with representors. Otherwise,
4547                  * we would have to resolve multi-thread access synch
4548                  * issue. Each flow on the shared device is appended
4549                  * with source vport identifier, so the resulting
4550                  * flows will be unique in the shared (by master and
4551                  * representors) domain even if they have coinciding
4552                  * IDs.
4553                  */
4554                 mlx5_ipool_malloc(priv->sh->ipool
4555                                   [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], &flow_id);
4556                 if (!flow_id)
4557                         return rte_flow_error_set(error, ENOMEM,
4558                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4559                                                   NULL, "can't allocate id "
4560                                                   "for split Q/RSS subflow");
4561                 /* Internal SET_TAG action to set flow ID. */
4562                 *set_tag = (struct mlx5_rte_flow_action_set_tag){
4563                         .data = flow_id,
4564                 };
4565                 ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0, error);
4566                 if (ret < 0)
4567                         return ret;
4568                 set_tag->id = ret;
4569                 /* Construct new actions array. */
4570                 /* Replace QUEUE/RSS action. */
4571                 split_actions[qrss_idx] = (struct rte_flow_action){
4572                         .type = (enum rte_flow_action_type)
4573                                 MLX5_RTE_FLOW_ACTION_TYPE_TAG,
4574                         .conf = set_tag,
4575                 };
4576         }
4577         /* JUMP action to jump to mreg copy table (CP_TBL). */
4578         jump = (void *)(set_tag + 1);
4579         *jump = (struct rte_flow_action_jump){
4580                 .group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
4581         };
4582         split_actions[actions_n - 2] = (struct rte_flow_action){
4583                 .type = RTE_FLOW_ACTION_TYPE_JUMP,
4584                 .conf = jump,
4585         };
4586         split_actions[actions_n - 1] = (struct rte_flow_action){
4587                 .type = RTE_FLOW_ACTION_TYPE_END,
4588         };
4589         return flow_id;
4590 }
4591
4592 /**
4593  * Extend the given action list for Tx metadata copy.
4594  *
4595  * Copy the given action list to the ext_actions and add flow metadata register
4596  * copy action in order to copy reg_a set by WQE to reg_c[0].
4597  *
4598  * @param[out] ext_actions
4599  *   Pointer to the extended action list.
4600  * @param[in] actions
4601  *   Pointer to the list of actions.
4602  * @param[in] actions_n
4603  *   Number of actions in the list.
4604  * @param[out] error
4605  *   Perform verbose error reporting if not NULL.
4606  * @param[in] encap_idx
4607  *   The encap action inndex.
4608  *
4609  * @return
4610  *   0 on success, negative value otherwise
4611  */
4612 static int
4613 flow_mreg_tx_copy_prep(struct rte_eth_dev *dev,
4614                        struct rte_flow_action *ext_actions,
4615                        const struct rte_flow_action *actions,
4616                        int actions_n, struct rte_flow_error *error,
4617                        int encap_idx)
4618 {
4619         struct mlx5_flow_action_copy_mreg *cp_mreg =
4620                 (struct mlx5_flow_action_copy_mreg *)
4621                         (ext_actions + actions_n + 1);
4622         int ret;
4623
4624         ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error);
4625         if (ret < 0)
4626                 return ret;
4627         cp_mreg->dst = ret;
4628         ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_TX, 0, error);
4629         if (ret < 0)
4630                 return ret;
4631         cp_mreg->src = ret;
4632         if (encap_idx != 0)
4633                 memcpy(ext_actions, actions, sizeof(*ext_actions) * encap_idx);
4634         if (encap_idx == actions_n - 1) {
4635                 ext_actions[actions_n - 1] = (struct rte_flow_action){
4636                         .type = (enum rte_flow_action_type)
4637                                 MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
4638                         .conf = cp_mreg,
4639                 };
4640                 ext_actions[actions_n] = (struct rte_flow_action){
4641                         .type = RTE_FLOW_ACTION_TYPE_END,
4642                 };
4643         } else {
4644                 ext_actions[encap_idx] = (struct rte_flow_action){
4645                         .type = (enum rte_flow_action_type)
4646                                 MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
4647                         .conf = cp_mreg,
4648                 };
4649                 memcpy(ext_actions + encap_idx + 1, actions + encap_idx,
4650                                 sizeof(*ext_actions) * (actions_n - encap_idx));
4651         }
4652         return 0;
4653 }
4654
4655 /**
4656  * Check the match action from the action list.
4657  *
4658  * @param[in] actions
4659  *   Pointer to the list of actions.
4660  * @param[in] attr
4661  *   Flow rule attributes.
4662  * @param[in] action
4663  *   The action to be check if exist.
4664  * @param[out] match_action_pos
4665  *   Pointer to the position of the matched action if exists, otherwise is -1.
4666  * @param[out] qrss_action_pos
4667  *   Pointer to the position of the Queue/RSS action if exists, otherwise is -1.
4668  *
4669  * @return
4670  *   > 0 the total number of actions.
4671  *   0 if not found match action in action list.
4672  */
4673 static int
4674 flow_check_match_action(const struct rte_flow_action actions[],
4675                         const struct rte_flow_attr *attr,
4676                         enum rte_flow_action_type action,
4677                         int *match_action_pos, int *qrss_action_pos)
4678 {
4679         const struct rte_flow_action_sample *sample;
4680         int actions_n = 0;
4681         int jump_flag = 0;
4682         uint32_t ratio = 0;
4683         int sub_type = 0;
4684         int flag = 0;
4685
4686         *match_action_pos = -1;
4687         *qrss_action_pos = -1;
4688         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
4689                 if (actions->type == action) {
4690                         flag = 1;
4691                         *match_action_pos = actions_n;
4692                 }
4693                 if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE ||
4694                     actions->type == RTE_FLOW_ACTION_TYPE_RSS)
4695                         *qrss_action_pos = actions_n;
4696                 if (actions->type == RTE_FLOW_ACTION_TYPE_JUMP)
4697                         jump_flag = 1;
4698                 if (actions->type == RTE_FLOW_ACTION_TYPE_SAMPLE) {
4699                         sample = actions->conf;
4700                         ratio = sample->ratio;
4701                         sub_type = ((const struct rte_flow_action *)
4702                                         (sample->actions))->type;
4703                 }
4704                 actions_n++;
4705         }
4706         if (flag && action == RTE_FLOW_ACTION_TYPE_SAMPLE && attr->transfer) {
4707                 if (ratio == 1) {
4708                         /* JUMP Action not support for Mirroring;
4709                          * Mirroring support multi-destination;
4710                          */
4711                         if (!jump_flag && sub_type != RTE_FLOW_ACTION_TYPE_END)
4712                                 flag = 0;
4713                 }
4714         }
4715         /* Count RTE_FLOW_ACTION_TYPE_END. */
4716         return flag ? actions_n + 1 : 0;
4717 }
4718
4719 #define SAMPLE_SUFFIX_ITEM 2
4720
4721 /**
4722  * Split the sample flow.
4723  *
4724  * As sample flow will split to two sub flow, sample flow with
4725  * sample action, the other actions will move to new suffix flow.
4726  *
4727  * Also add unique tag id with tag action in the sample flow,
4728  * the same tag id will be as match in the suffix flow.
4729  *
4730  * @param dev
4731  *   Pointer to Ethernet device.
4732  * @param[in] fdb_tx
4733  *   FDB egress flow flag.
4734  * @param[out] sfx_items
4735  *   Suffix flow match items (list terminated by the END pattern item).
4736  * @param[in] actions
4737  *   Associated actions (list terminated by the END action).
4738  * @param[out] actions_sfx
4739  *   Suffix flow actions.
4740  * @param[out] actions_pre
4741  *   Prefix flow actions.
4742  * @param[in] actions_n
4743  *  The total number of actions.
4744  * @param[in] sample_action_pos
4745  *   The sample action position.
4746  * @param[in] qrss_action_pos
4747  *   The Queue/RSS action position.
4748  * @param[out] error
4749  *   Perform verbose error reporting if not NULL.
4750  *
4751  * @return
4752  *   0 on success, or unique flow_id, a negative errno value
4753  *   otherwise and rte_errno is set.
4754  */
4755 static int
4756 flow_sample_split_prep(struct rte_eth_dev *dev,
4757                        uint32_t fdb_tx,
4758                        struct rte_flow_item sfx_items[],
4759                        const struct rte_flow_action actions[],
4760                        struct rte_flow_action actions_sfx[],
4761                        struct rte_flow_action actions_pre[],
4762                        int actions_n,
4763                        int sample_action_pos,
4764                        int qrss_action_pos,
4765                        struct rte_flow_error *error)
4766 {
4767         struct mlx5_priv *priv = dev->data->dev_private;
4768         struct mlx5_rte_flow_action_set_tag *set_tag;
4769         struct mlx5_rte_flow_item_tag *tag_spec;
4770         struct mlx5_rte_flow_item_tag *tag_mask;
4771         uint32_t tag_id = 0;
4772         int index;
4773         int ret;
4774
4775         if (sample_action_pos < 0)
4776                 return rte_flow_error_set(error, EINVAL,
4777                                           RTE_FLOW_ERROR_TYPE_ACTION,
4778                                           NULL, "invalid position of sample "
4779                                           "action in list");
4780         if (!fdb_tx) {
4781                 /* Prepare the prefix tag action. */
4782                 set_tag = (void *)(actions_pre + actions_n + 1);
4783                 ret = mlx5_flow_get_reg_id(dev, MLX5_APP_TAG, 0, error);
4784                 if (ret < 0)
4785                         return ret;
4786                 set_tag->id = ret;
4787                 mlx5_ipool_malloc(priv->sh->ipool
4788                                   [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], &tag_id);
4789                 set_tag->data = tag_id;
4790                 /* Prepare the suffix subflow items. */
4791                 tag_spec = (void *)(sfx_items + SAMPLE_SUFFIX_ITEM);
4792                 tag_spec->data = tag_id;
4793                 tag_spec->id = set_tag->id;
4794                 tag_mask = tag_spec + 1;
4795                 tag_mask->data = UINT32_MAX;
4796                 sfx_items[0] = (struct rte_flow_item){
4797                         .type = (enum rte_flow_item_type)
4798                                 MLX5_RTE_FLOW_ITEM_TYPE_TAG,
4799                         .spec = tag_spec,
4800                         .last = NULL,
4801                         .mask = tag_mask,
4802                 };
4803                 sfx_items[1] = (struct rte_flow_item){
4804                         .type = (enum rte_flow_item_type)
4805                                 RTE_FLOW_ITEM_TYPE_END,
4806                 };
4807         }
4808         /* Prepare the actions for prefix and suffix flow. */
4809         if (qrss_action_pos >= 0 && qrss_action_pos < sample_action_pos) {
4810                 index = qrss_action_pos;
4811                 /* Put the preceding the Queue/RSS action into prefix flow. */
4812                 if (index != 0)
4813                         memcpy(actions_pre, actions,
4814                                sizeof(struct rte_flow_action) * index);
4815                 /* Put others preceding the sample action into prefix flow. */
4816                 if (sample_action_pos > index + 1)
4817                         memcpy(actions_pre + index, actions + index + 1,
4818                                sizeof(struct rte_flow_action) *
4819                                (sample_action_pos - index - 1));
4820                 index = sample_action_pos - 1;
4821                 /* Put Queue/RSS action into Suffix flow. */
4822                 memcpy(actions_sfx, actions + qrss_action_pos,
4823                        sizeof(struct rte_flow_action));
4824                 actions_sfx++;
4825         } else {
4826                 index = sample_action_pos;
4827                 if (index != 0)
4828                         memcpy(actions_pre, actions,
4829                                sizeof(struct rte_flow_action) * index);
4830         }
4831         /* Add the extra tag action for NIC-RX and E-Switch ingress. */
4832         if (!fdb_tx) {
4833                 actions_pre[index++] =
4834                         (struct rte_flow_action){
4835                         .type = (enum rte_flow_action_type)
4836                                 MLX5_RTE_FLOW_ACTION_TYPE_TAG,
4837                         .conf = set_tag,
4838                 };
4839         }
4840         memcpy(actions_pre + index, actions + sample_action_pos,
4841                sizeof(struct rte_flow_action));
4842         index += 1;
4843         actions_pre[index] = (struct rte_flow_action){
4844                 .type = (enum rte_flow_action_type)
4845                         RTE_FLOW_ACTION_TYPE_END,
4846         };
4847         /* Put the actions after sample into Suffix flow. */
4848         memcpy(actions_sfx, actions + sample_action_pos + 1,
4849                sizeof(struct rte_flow_action) *
4850                (actions_n - sample_action_pos - 1));
4851         return tag_id;
4852 }
4853
4854 /**
4855  * The splitting for metadata feature.
4856  *
4857  * - Q/RSS action on NIC Rx should be split in order to pass by
4858  *   the mreg copy table (RX_CP_TBL) and then it jumps to the
4859  *   action table (RX_ACT_TBL) which has the split Q/RSS action.
4860  *
4861  * - All the actions on NIC Tx should have a mreg copy action to
4862  *   copy reg_a from WQE to reg_c[0].
4863  *
4864  * @param dev
4865  *   Pointer to Ethernet device.
4866  * @param[in] flow
4867  *   Parent flow structure pointer.
4868  * @param[in] prefix_layers
4869  *   Prefix flow layer flags.
4870  * @param[in] prefix_mark
4871  *   Prefix subflow mark flag, may be 0.
4872  * @param[in] attr
4873  *   Flow rule attributes.
4874  * @param[in] items
4875  *   Pattern specification (list terminated by the END pattern item).
4876  * @param[in] actions
4877  *   Associated actions (list terminated by the END action).
4878  * @param[in] external
4879  *   This flow rule is created by request external to PMD.
4880  * @param[in] flow_idx
4881  *   This memory pool index to the flow.
4882  * @param[out] error
4883  *   Perform verbose error reporting if not NULL.
4884  * @return
4885  *   0 on success, negative value otherwise
4886  */
4887 static int
4888 flow_create_split_metadata(struct rte_eth_dev *dev,
4889                            struct rte_flow *flow,
4890                            uint64_t prefix_layers,
4891                            uint32_t prefix_mark,
4892                            const struct rte_flow_attr *attr,
4893                            const struct rte_flow_item items[],
4894                            const struct rte_flow_action actions[],
4895                            bool external, uint32_t flow_idx,
4896                            struct rte_flow_error *error)
4897 {
4898         struct mlx5_priv *priv = dev->data->dev_private;
4899         struct mlx5_dev_config *config = &priv->config;
4900         const struct rte_flow_action *qrss = NULL;
4901         struct rte_flow_action *ext_actions = NULL;
4902         struct mlx5_flow *dev_flow = NULL;
4903         uint32_t qrss_id = 0;
4904         int mtr_sfx = 0;
4905         size_t act_size;
4906         int actions_n;
4907         int encap_idx;
4908         int ret;
4909
4910         /* Check whether extensive metadata feature is engaged. */
4911         if (!config->dv_flow_en ||
4912             config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
4913             !mlx5_flow_ext_mreg_supported(dev))
4914                 return flow_create_split_inner(dev, flow, NULL, prefix_layers,
4915                                                prefix_mark, attr, items,
4916                                                actions, external, flow_idx,
4917                                                error);
4918         actions_n = flow_parse_metadata_split_actions_info(actions, &qrss,
4919                                                            &encap_idx);
4920         if (qrss) {
4921                 /* Exclude hairpin flows from splitting. */
4922                 if (qrss->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
4923                         const struct rte_flow_action_queue *queue;
4924
4925                         queue = qrss->conf;
4926                         if (mlx5_rxq_get_type(dev, queue->index) ==
4927                             MLX5_RXQ_TYPE_HAIRPIN)
4928                                 qrss = NULL;
4929                 } else if (qrss->type == RTE_FLOW_ACTION_TYPE_RSS) {
4930                         const struct rte_flow_action_rss *rss;
4931
4932                         rss = qrss->conf;
4933                         if (mlx5_rxq_get_type(dev, rss->queue[0]) ==
4934                             MLX5_RXQ_TYPE_HAIRPIN)
4935                                 qrss = NULL;
4936                 }
4937         }
4938         if (qrss) {
4939                 /* Check if it is in meter suffix table. */
4940                 mtr_sfx = attr->group == (attr->transfer ?
4941                           (MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) :
4942                           MLX5_FLOW_TABLE_LEVEL_SUFFIX);
4943                 /*
4944                  * Q/RSS action on NIC Rx should be split in order to pass by
4945                  * the mreg copy table (RX_CP_TBL) and then it jumps to the
4946                  * action table (RX_ACT_TBL) which has the split Q/RSS action.
4947                  */
4948                 act_size = sizeof(struct rte_flow_action) * (actions_n + 1) +
4949                            sizeof(struct rte_flow_action_set_tag) +
4950                            sizeof(struct rte_flow_action_jump);
4951                 ext_actions = mlx5_malloc(MLX5_MEM_ZERO, act_size, 0,
4952                                           SOCKET_ID_ANY);
4953                 if (!ext_actions)
4954                         return rte_flow_error_set(error, ENOMEM,
4955                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4956                                                   NULL, "no memory to split "
4957                                                   "metadata flow");
4958                 /*
4959                  * If we are the suffix flow of meter, tag already exist.
4960                  * Set the tag action to void.
4961                  */
4962                 if (mtr_sfx)
4963                         ext_actions[qrss - actions].type =
4964                                                 RTE_FLOW_ACTION_TYPE_VOID;
4965                 else
4966                         ext_actions[qrss - actions].type =
4967                                                 (enum rte_flow_action_type)
4968                                                 MLX5_RTE_FLOW_ACTION_TYPE_TAG;
4969                 /*
4970                  * Create the new actions list with removed Q/RSS action
4971                  * and appended set tag and jump to register copy table
4972                  * (RX_CP_TBL). We should preallocate unique tag ID here
4973                  * in advance, because it is needed for set tag action.
4974                  */
4975                 qrss_id = flow_mreg_split_qrss_prep(dev, ext_actions, actions,
4976                                                     qrss, actions_n, error);
4977                 if (!mtr_sfx && !qrss_id) {
4978                         ret = -rte_errno;
4979                         goto exit;
4980                 }
4981         } else if (attr->egress && !attr->transfer) {
4982                 /*
4983                  * All the actions on NIC Tx should have a metadata register
4984                  * copy action to copy reg_a from WQE to reg_c[meta]
4985                  */
4986                 act_size = sizeof(struct rte_flow_action) * (actions_n + 1) +
4987                            sizeof(struct mlx5_flow_action_copy_mreg);
4988                 ext_actions = mlx5_malloc(MLX5_MEM_ZERO, act_size, 0,
4989                                           SOCKET_ID_ANY);
4990                 if (!ext_actions)
4991                         return rte_flow_error_set(error, ENOMEM,
4992                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4993                                                   NULL, "no memory to split "
4994                                                   "metadata flow");
4995                 /* Create the action list appended with copy register. */
4996                 ret = flow_mreg_tx_copy_prep(dev, ext_actions, actions,
4997                                              actions_n, error, encap_idx);
4998                 if (ret < 0)
4999                         goto exit;
5000         }
5001         /* Add the unmodified original or prefix subflow. */
5002         ret = flow_create_split_inner(dev, flow, &dev_flow, prefix_layers,
5003                                       prefix_mark, attr,
5004                                       items, ext_actions ? ext_actions :
5005                                       actions, external, flow_idx, error);
5006         if (ret < 0)
5007                 goto exit;
5008         MLX5_ASSERT(dev_flow);
5009         if (qrss) {
5010                 const struct rte_flow_attr q_attr = {
5011                         .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP,
5012                         .ingress = 1,
5013                 };
5014                 /* Internal PMD action to set register. */
5015                 struct mlx5_rte_flow_item_tag q_tag_spec = {
5016                         .data = qrss_id,
5017                         .id = REG_NON,
5018                 };
5019                 struct rte_flow_item q_items[] = {
5020                         {
5021                                 .type = (enum rte_flow_item_type)
5022                                         MLX5_RTE_FLOW_ITEM_TYPE_TAG,
5023                                 .spec = &q_tag_spec,
5024                                 .last = NULL,
5025                                 .mask = NULL,
5026                         },
5027                         {
5028                                 .type = RTE_FLOW_ITEM_TYPE_END,
5029                         },
5030                 };
5031                 struct rte_flow_action q_actions[] = {
5032                         {
5033                                 .type = qrss->type,
5034                                 .conf = qrss->conf,
5035                         },
5036                         {
5037                                 .type = RTE_FLOW_ACTION_TYPE_END,
5038                         },
5039                 };
5040                 uint64_t layers = flow_get_prefix_layer_flags(dev_flow);
5041
5042                 /*
5043                  * Configure the tag item only if there is no meter subflow.
5044                  * Since tag is already marked in the meter suffix subflow
5045                  * we can just use the meter suffix items as is.
5046                  */
5047                 if (qrss_id) {
5048                         /* Not meter subflow. */
5049                         MLX5_ASSERT(!mtr_sfx);
5050                         /*
5051                          * Put unique id in prefix flow due to it is destroyed
5052                          * after suffix flow and id will be freed after there
5053                          * is no actual flows with this id and identifier
5054                          * reallocation becomes possible (for example, for
5055                          * other flows in other threads).
5056                          */
5057                         dev_flow->handle->split_flow_id = qrss_id;
5058                         ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0,
5059                                                    error);
5060                         if (ret < 0)
5061                                 goto exit;
5062                         q_tag_spec.id = ret;
5063                 }
5064                 dev_flow = NULL;
5065                 /* Add suffix subflow to execute Q/RSS. */
5066                 ret = flow_create_split_inner(dev, flow, &dev_flow, layers, 0,
5067                                               &q_attr, mtr_sfx ? items :
5068                                               q_items, q_actions,
5069                                               external, flow_idx, error);
5070                 if (ret < 0)
5071                         goto exit;
5072                 /* qrss ID should be freed if failed. */
5073                 qrss_id = 0;
5074                 MLX5_ASSERT(dev_flow);
5075         }
5076
5077 exit:
5078         /*
5079          * We do not destroy the partially created sub_flows in case of error.
5080          * These ones are included into parent flow list and will be destroyed
5081          * by flow_drv_destroy.
5082          */
5083         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
5084                         qrss_id);
5085         mlx5_free(ext_actions);
5086         return ret;
5087 }
5088
5089 /**
5090  * The splitting for meter feature.
5091  *
5092  * - The meter flow will be split to two flows as prefix and
5093  *   suffix flow. The packets make sense only it pass the prefix
5094  *   meter action.
5095  *
5096  * - Reg_C_5 is used for the packet to match betweend prefix and
5097  *   suffix flow.
5098  *
5099  * @param dev
5100  *   Pointer to Ethernet device.
5101  * @param[in] flow
5102  *   Parent flow structure pointer.
5103  * @param[in] prefix_layers
5104  *   Prefix subflow layers, may be 0.
5105  * @param[in] prefix_mark
5106  *   Prefix subflow mark flag, may be 0.
5107  * @param[in] attr
5108  *   Flow rule attributes.
5109  * @param[in] items
5110  *   Pattern specification (list terminated by the END pattern item).
5111  * @param[in] actions
5112  *   Associated actions (list terminated by the END action).
5113  * @param[in] external
5114  *   This flow rule is created by request external to PMD.
5115  * @param[in] flow_idx
5116  *   This memory pool index to the flow.
5117  * @param[out] error
5118  *   Perform verbose error reporting if not NULL.
5119  * @return
5120  *   0 on success, negative value otherwise
5121  */
5122 static int
5123 flow_create_split_meter(struct rte_eth_dev *dev,
5124                         struct rte_flow *flow,
5125                         uint64_t prefix_layers,
5126                         uint32_t prefix_mark,
5127                         const struct rte_flow_attr *attr,
5128                         const struct rte_flow_item items[],
5129                         const struct rte_flow_action actions[],
5130                         bool external, uint32_t flow_idx,
5131                         struct rte_flow_error *error)
5132 {
5133         struct mlx5_priv *priv = dev->data->dev_private;
5134         struct rte_flow_action *sfx_actions = NULL;
5135         struct rte_flow_action *pre_actions = NULL;
5136         struct rte_flow_item *sfx_items = NULL;
5137         struct mlx5_flow *dev_flow = NULL;
5138         struct rte_flow_attr sfx_attr = *attr;
5139         uint32_t mtr = 0;
5140         uint32_t mtr_tag_id = 0;
5141         size_t act_size;
5142         size_t item_size;
5143         int actions_n = 0;
5144         int ret;
5145
5146         if (priv->mtr_en)
5147                 actions_n = flow_check_meter_action(actions, &mtr);
5148         if (mtr) {
5149                 /* The five prefix actions: meter, decap, encap, tag, end. */
5150                 act_size = sizeof(struct rte_flow_action) * (actions_n + 5) +
5151                            sizeof(struct mlx5_rte_flow_action_set_tag);
5152                 /* tag, vlan, port id, end. */
5153 #define METER_SUFFIX_ITEM 4
5154                 item_size = sizeof(struct rte_flow_item) * METER_SUFFIX_ITEM +
5155                             sizeof(struct mlx5_rte_flow_item_tag) * 2;
5156                 sfx_actions = mlx5_malloc(MLX5_MEM_ZERO, (act_size + item_size),
5157                                           0, SOCKET_ID_ANY);
5158                 if (!sfx_actions)
5159                         return rte_flow_error_set(error, ENOMEM,
5160                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5161                                                   NULL, "no memory to split "
5162                                                   "meter flow");
5163                 sfx_items = (struct rte_flow_item *)((char *)sfx_actions +
5164                              act_size);
5165                 pre_actions = sfx_actions + actions_n;
5166                 mtr_tag_id = flow_meter_split_prep(dev, items, sfx_items,
5167                                                    actions, sfx_actions,
5168                                                    pre_actions);
5169                 if (!mtr_tag_id) {
5170                         ret = -rte_errno;
5171                         goto exit;
5172                 }
5173                 /* Add the prefix subflow. */
5174                 ret = flow_create_split_inner(dev, flow, &dev_flow,
5175                                               prefix_layers, 0,
5176                                               attr, items,
5177                                               pre_actions, external,
5178                                               flow_idx, error);
5179                 if (ret) {
5180                         ret = -rte_errno;
5181                         goto exit;
5182                 }
5183                 dev_flow->handle->split_flow_id = mtr_tag_id;
5184                 /* Setting the sfx group atrr. */
5185                 sfx_attr.group = sfx_attr.transfer ?
5186                                 (MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) :
5187                                  MLX5_FLOW_TABLE_LEVEL_SUFFIX;
5188         }
5189         /* Add the prefix subflow. */
5190         ret = flow_create_split_metadata(dev, flow, dev_flow ?
5191                                          flow_get_prefix_layer_flags(dev_flow) :
5192                                          prefix_layers, dev_flow ?
5193                                          dev_flow->handle->mark : prefix_mark,
5194                                          &sfx_attr, sfx_items ?
5195                                          sfx_items : items,
5196                                          sfx_actions ? sfx_actions : actions,
5197                                          external, flow_idx, error);
5198 exit:
5199         if (sfx_actions)
5200                 mlx5_free(sfx_actions);
5201         return ret;
5202 }
5203
5204 /**
5205  * The splitting for sample feature.
5206  *
5207  * Once Sample action is detected in the action list, the flow actions should
5208  * be split into prefix sub flow and suffix sub flow.
5209  *
5210  * The original items remain in the prefix sub flow, all actions preceding the
5211  * sample action and the sample action itself will be copied to the prefix
5212  * sub flow, the actions following the sample action will be copied to the
5213  * suffix sub flow, Queue action always be located in the suffix sub flow.
5214  *
5215  * In order to make the packet from prefix sub flow matches with suffix sub
5216  * flow, an extra tag action be added into prefix sub flow, and the suffix sub
5217  * flow uses tag item with the unique flow id.
5218  *
5219  * @param dev
5220  *   Pointer to Ethernet device.
5221  * @param[in] flow
5222  *   Parent flow structure pointer.
5223  * @param[in] attr
5224  *   Flow rule attributes.
5225  * @param[in] items
5226  *   Pattern specification (list terminated by the END pattern item).
5227  * @param[in] actions
5228  *   Associated actions (list terminated by the END action).
5229  * @param[in] external
5230  *   This flow rule is created by request external to PMD.
5231  * @param[in] flow_idx
5232  *   This memory pool index to the flow.
5233  * @param[out] error
5234  *   Perform verbose error reporting if not NULL.
5235  * @return
5236  *   0 on success, negative value otherwise
5237  */
5238 static int
5239 flow_create_split_sample(struct rte_eth_dev *dev,
5240                          struct rte_flow *flow,
5241                          const struct rte_flow_attr *attr,
5242                          const struct rte_flow_item items[],
5243                          const struct rte_flow_action actions[],
5244                          bool external, uint32_t flow_idx,
5245                          struct rte_flow_error *error)
5246 {
5247         struct mlx5_priv *priv = dev->data->dev_private;
5248         struct rte_flow_action *sfx_actions = NULL;
5249         struct rte_flow_action *pre_actions = NULL;
5250         struct rte_flow_item *sfx_items = NULL;
5251         struct mlx5_flow *dev_flow = NULL;
5252         struct rte_flow_attr sfx_attr = *attr;
5253 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
5254         struct mlx5_flow_dv_sample_resource *sample_res;
5255         struct mlx5_flow_tbl_data_entry *sfx_tbl_data;
5256         struct mlx5_flow_tbl_resource *sfx_tbl;
5257         union mlx5_flow_tbl_key sfx_table_key;
5258 #endif
5259         size_t act_size;
5260         size_t item_size;
5261         uint32_t fdb_tx = 0;
5262         int32_t tag_id = 0;
5263         int actions_n = 0;
5264         int sample_action_pos;
5265         int qrss_action_pos;
5266         int ret = 0;
5267
5268         if (priv->sampler_en)
5269                 actions_n = flow_check_match_action(actions, attr,
5270                                         RTE_FLOW_ACTION_TYPE_SAMPLE,
5271                                         &sample_action_pos, &qrss_action_pos);
5272         if (actions_n) {
5273                 /* The prefix actions must includes sample, tag, end. */
5274                 act_size = sizeof(struct rte_flow_action) * (actions_n * 2 + 1)
5275                            + sizeof(struct mlx5_rte_flow_action_set_tag);
5276                 item_size = sizeof(struct rte_flow_item) * SAMPLE_SUFFIX_ITEM +
5277                             sizeof(struct mlx5_rte_flow_item_tag) * 2;
5278                 sfx_actions = mlx5_malloc(MLX5_MEM_ZERO, (act_size +
5279                                           item_size), 0, SOCKET_ID_ANY);
5280                 if (!sfx_actions)
5281                         return rte_flow_error_set(error, ENOMEM,
5282                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5283                                                   NULL, "no memory to split "
5284                                                   "sample flow");
5285                 /* The representor_id is -1 for uplink. */
5286                 fdb_tx = (attr->transfer && priv->representor_id != -1);
5287                 if (!fdb_tx)
5288                         sfx_items = (struct rte_flow_item *)((char *)sfx_actions
5289                                         + act_size);
5290                 pre_actions = sfx_actions + actions_n;
5291                 tag_id = flow_sample_split_prep(dev, fdb_tx, sfx_items,
5292                                                 actions, sfx_actions,
5293                                                 pre_actions, actions_n,
5294                                                 sample_action_pos,
5295                                                 qrss_action_pos, error);
5296                 if (tag_id < 0 || (!fdb_tx && !tag_id)) {
5297                         ret = -rte_errno;
5298                         goto exit;
5299                 }
5300                 /* Add the prefix subflow. */
5301                 ret = flow_create_split_inner(dev, flow, &dev_flow, 0, 0, attr,
5302                                               items, pre_actions, external,
5303                                               flow_idx, error);
5304                 if (ret) {
5305                         ret = -rte_errno;
5306                         goto exit;
5307                 }
5308                 dev_flow->handle->split_flow_id = tag_id;
5309 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
5310                 /* Set the sfx group attr. */
5311                 sample_res = (struct mlx5_flow_dv_sample_resource *)
5312                                         dev_flow->dv.sample_res;
5313                 sfx_tbl = (struct mlx5_flow_tbl_resource *)
5314                                         sample_res->normal_path_tbl;
5315                 sfx_tbl_data = container_of(sfx_tbl,
5316                                         struct mlx5_flow_tbl_data_entry, tbl);
5317                 sfx_table_key.v64 = sfx_tbl_data->entry.key;
5318                 sfx_attr.group = sfx_attr.transfer ?
5319                                         (sfx_table_key.table_id - 1) :
5320                                          sfx_table_key.table_id;
5321 #endif
5322         }
5323         /* Add the suffix subflow. */
5324         ret = flow_create_split_meter(dev, flow, dev_flow ?
5325                                  flow_get_prefix_layer_flags(dev_flow) : 0,
5326                                  dev_flow ? dev_flow->handle->mark : 0,
5327                                  &sfx_attr, sfx_items ? sfx_items : items,
5328                                  sfx_actions ? sfx_actions : actions,
5329                                  external, flow_idx, error);
5330 exit:
5331         if (sfx_actions)
5332                 mlx5_free(sfx_actions);
5333         return ret;
5334 }
5335
5336 /**
5337  * Split the flow to subflow set. The splitters might be linked
5338  * in the chain, like this:
5339  * flow_create_split_outer() calls:
5340  *   flow_create_split_meter() calls:
5341  *     flow_create_split_metadata(meter_subflow_0) calls:
5342  *       flow_create_split_inner(metadata_subflow_0)
5343  *       flow_create_split_inner(metadata_subflow_1)
5344  *       flow_create_split_inner(metadata_subflow_2)
5345  *     flow_create_split_metadata(meter_subflow_1) calls:
5346  *       flow_create_split_inner(metadata_subflow_0)
5347  *       flow_create_split_inner(metadata_subflow_1)
5348  *       flow_create_split_inner(metadata_subflow_2)
5349  *
5350  * This provide flexible way to add new levels of flow splitting.
5351  * The all of successfully created subflows are included to the
5352  * parent flow dev_flow list.
5353  *
5354  * @param dev
5355  *   Pointer to Ethernet device.
5356  * @param[in] flow
5357  *   Parent flow structure pointer.
5358  * @param[in] attr
5359  *   Flow rule attributes.
5360  * @param[in] items
5361  *   Pattern specification (list terminated by the END pattern item).
5362  * @param[in] actions
5363  *   Associated actions (list terminated by the END action).
5364  * @param[in] external
5365  *   This flow rule is created by request external to PMD.
5366  * @param[in] flow_idx
5367  *   This memory pool index to the flow.
5368  * @param[out] error
5369  *   Perform verbose error reporting if not NULL.
5370  * @return
5371  *   0 on success, negative value otherwise
5372  */
5373 static int
5374 flow_create_split_outer(struct rte_eth_dev *dev,
5375                         struct rte_flow *flow,
5376                         const struct rte_flow_attr *attr,
5377                         const struct rte_flow_item items[],
5378                         const struct rte_flow_action actions[],
5379                         bool external, uint32_t flow_idx,
5380                         struct rte_flow_error *error)
5381 {
5382         int ret;
5383
5384         ret = flow_create_split_sample(dev, flow, attr, items,
5385                                        actions, external, flow_idx, error);
5386         MLX5_ASSERT(ret <= 0);
5387         return ret;
5388 }
5389
5390 static struct mlx5_flow_tunnel *
5391 flow_tunnel_from_rule(struct rte_eth_dev *dev,
5392                       const struct rte_flow_attr *attr,
5393                       const struct rte_flow_item items[],
5394                       const struct rte_flow_action actions[])
5395 {
5396         struct mlx5_flow_tunnel *tunnel;
5397
5398 #pragma GCC diagnostic push
5399 #pragma GCC diagnostic ignored "-Wcast-qual"
5400         if (is_flow_tunnel_match_rule(dev, attr, items, actions))
5401                 tunnel = (struct mlx5_flow_tunnel *)items[0].spec;
5402         else if (is_flow_tunnel_steer_rule(dev, attr, items, actions))
5403                 tunnel = (struct mlx5_flow_tunnel *)actions[0].conf;
5404         else
5405                 tunnel = NULL;
5406 #pragma GCC diagnostic pop
5407
5408         return tunnel;
5409 }
5410
5411 /**
5412  * Adjust flow RSS workspace if needed.
5413  *
5414  * @param wks
5415  *   Pointer to thread flow work space.
5416  * @param rss_desc
5417  *   Pointer to RSS descriptor.
5418  * @param[in] nrssq_num
5419  *   New RSS queue number.
5420  *
5421  * @return
5422  *   0 on success, -1 otherwise and rte_errno is set.
5423  */
5424 static int
5425 flow_rss_workspace_adjust(struct mlx5_flow_workspace *wks,
5426                           struct mlx5_flow_rss_desc *rss_desc,
5427                           uint32_t nrssq_num)
5428 {
5429         bool fidx = !!wks->flow_idx;
5430
5431         if (likely(nrssq_num <= wks->rssq_num[fidx]))
5432                 return 0;
5433         rss_desc->queue = realloc(rss_desc->queue,
5434                           sizeof(rss_desc->queue[0]) * RTE_ALIGN(nrssq_num, 2));
5435         if (!rss_desc->queue) {
5436                 rte_errno = ENOMEM;
5437                 return -1;
5438         }
5439         wks->rssq_num[fidx] = RTE_ALIGN(nrssq_num, 2);
5440         return 0;
5441 }
5442
5443 /**
5444  * Create a flow and add it to @p list.
5445  *
5446  * @param dev
5447  *   Pointer to Ethernet device.
5448  * @param list
5449  *   Pointer to a TAILQ flow list. If this parameter NULL,
5450  *   no list insertion occurred, flow is just created,
5451  *   this is caller's responsibility to track the
5452  *   created flow.
5453  * @param[in] attr
5454  *   Flow rule attributes.
5455  * @param[in] items
5456  *   Pattern specification (list terminated by the END pattern item).
5457  * @param[in] actions
5458  *   Associated actions (list terminated by the END action).
5459  * @param[in] external
5460  *   This flow rule is created by request external to PMD.
5461  * @param[out] error
5462  *   Perform verbose error reporting if not NULL.
5463  *
5464  * @return
5465  *   A flow index on success, 0 otherwise and rte_errno is set.
5466  */
5467 static uint32_t
5468 flow_list_create(struct rte_eth_dev *dev, uint32_t *list,
5469                  const struct rte_flow_attr *attr,
5470                  const struct rte_flow_item items[],
5471                  const struct rte_flow_action original_actions[],
5472                  bool external, struct rte_flow_error *error)
5473 {
5474         struct mlx5_priv *priv = dev->data->dev_private;
5475         struct rte_flow *flow = NULL;
5476         struct mlx5_flow *dev_flow;
5477         const struct rte_flow_action_rss *rss;
5478         struct mlx5_translated_shared_action
5479                 shared_actions[MLX5_MAX_SHARED_ACTIONS];
5480         int shared_actions_n = MLX5_MAX_SHARED_ACTIONS;
5481         union {
5482                 struct mlx5_flow_expand_rss buf;
5483                 uint8_t buffer[2048];
5484         } expand_buffer;
5485         union {
5486                 struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS];
5487                 uint8_t buffer[2048];
5488         } actions_rx;
5489         union {
5490                 struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS];
5491                 uint8_t buffer[2048];
5492         } actions_hairpin_tx;
5493         union {
5494                 struct rte_flow_item items[MLX5_MAX_SPLIT_ITEMS];
5495                 uint8_t buffer[2048];
5496         } items_tx;
5497         struct mlx5_flow_expand_rss *buf = &expand_buffer.buf;
5498         struct mlx5_flow_rss_desc *rss_desc;
5499         const struct rte_flow_action *p_actions_rx;
5500         uint32_t i;
5501         uint32_t idx = 0;
5502         int hairpin_flow;
5503         struct rte_flow_attr attr_tx = { .priority = 0 };
5504         struct rte_flow_attr attr_factor = {0};
5505         const struct rte_flow_action *actions;
5506         struct rte_flow_action *translated_actions = NULL;
5507         struct mlx5_flow_tunnel *tunnel;
5508         struct tunnel_default_miss_ctx default_miss_ctx = { 0, };
5509         struct mlx5_flow_workspace *wks = mlx5_flow_get_thread_workspace();
5510         bool fidx = !!wks->flow_idx;
5511         int ret;
5512
5513         MLX5_ASSERT(wks);
5514         rss_desc = &wks->rss_desc[fidx];
5515         ret = flow_shared_actions_translate(original_actions,
5516                                             shared_actions,
5517                                             &shared_actions_n,
5518                                             &translated_actions, error);
5519         if (ret < 0) {
5520                 MLX5_ASSERT(translated_actions == NULL);
5521                 return 0;
5522         }
5523         actions = translated_actions ? translated_actions : original_actions;
5524         memcpy((void *)&attr_factor, (const void *)attr, sizeof(*attr));
5525         p_actions_rx = actions;
5526         hairpin_flow = flow_check_hairpin_split(dev, &attr_factor, actions);
5527         ret = flow_drv_validate(dev, &attr_factor, items, p_actions_rx,
5528                                 external, hairpin_flow, error);
5529         if (ret < 0)
5530                 goto error_before_hairpin_split;
5531         flow = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], &idx);
5532         if (!flow) {
5533                 rte_errno = ENOMEM;
5534                 goto error_before_hairpin_split;
5535         }
5536         if (hairpin_flow > 0) {
5537                 if (hairpin_flow > MLX5_MAX_SPLIT_ACTIONS) {
5538                         rte_errno = EINVAL;
5539                         goto error_before_hairpin_split;
5540                 }
5541                 flow_hairpin_split(dev, actions, actions_rx.actions,
5542                                    actions_hairpin_tx.actions, items_tx.items,
5543                                    idx);
5544                 p_actions_rx = actions_rx.actions;
5545         }
5546         flow->drv_type = flow_get_drv_type(dev, &attr_factor);
5547         MLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN &&
5548                     flow->drv_type < MLX5_FLOW_TYPE_MAX);
5549         memset(rss_desc, 0, offsetof(struct mlx5_flow_rss_desc, queue));
5550         rss = flow_get_rss_action(p_actions_rx);
5551         if (rss) {
5552                 if (flow_rss_workspace_adjust(wks, rss_desc, rss->queue_num))
5553                         return 0;
5554                 /*
5555                  * The following information is required by
5556                  * mlx5_flow_hashfields_adjust() in advance.
5557                  */
5558                 rss_desc->level = rss->level;
5559                 /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
5560                 rss_desc->types = !rss->types ? ETH_RSS_IP : rss->types;
5561         }
5562         flow->dev_handles = 0;
5563         if (rss && rss->types) {
5564                 unsigned int graph_root;
5565
5566                 graph_root = find_graph_root(items, rss->level);
5567                 ret = mlx5_flow_expand_rss(buf, sizeof(expand_buffer.buffer),
5568                                            items, rss->types,
5569                                            mlx5_support_expansion, graph_root);
5570                 MLX5_ASSERT(ret > 0 &&
5571                        (unsigned int)ret < sizeof(expand_buffer.buffer));
5572         } else {
5573                 buf->entries = 1;
5574                 buf->entry[0].pattern = (void *)(uintptr_t)items;
5575         }
5576         flow->shared_rss = flow_get_shared_rss_action(shared_actions,
5577                                                       shared_actions_n);
5578         /*
5579          * Record the start index when there is a nested call. All sub-flows
5580          * need to be translated before another calling.
5581          * No need to use ping-pong buffer to save memory here.
5582          */
5583         if (fidx) {
5584                 MLX5_ASSERT(!wks->flow_nested_idx);
5585                 wks->flow_nested_idx = fidx;
5586         }
5587         for (i = 0; i < buf->entries; ++i) {
5588                 /*
5589                  * The splitter may create multiple dev_flows,
5590                  * depending on configuration. In the simplest
5591                  * case it just creates unmodified original flow.
5592                  */
5593                 ret = flow_create_split_outer(dev, flow, &attr_factor,
5594                                               buf->entry[i].pattern,
5595                                               p_actions_rx, external, idx,
5596                                               error);
5597                 if (ret < 0)
5598                         goto error;
5599                 if (is_flow_tunnel_steer_rule(dev, attr,
5600                                               buf->entry[i].pattern,
5601                                               p_actions_rx)) {
5602                         ret = flow_tunnel_add_default_miss(dev, flow, attr,
5603                                                            p_actions_rx,
5604                                                            idx,
5605                                                            &default_miss_ctx,
5606                                                            error);
5607                         if (ret < 0) {
5608                                 mlx5_free(default_miss_ctx.queue);
5609                                 goto error;
5610                         }
5611                 }
5612         }
5613         /* Create the tx flow. */
5614         if (hairpin_flow) {
5615                 attr_tx.group = MLX5_HAIRPIN_TX_TABLE;
5616                 attr_tx.ingress = 0;
5617                 attr_tx.egress = 1;
5618                 dev_flow = flow_drv_prepare(dev, flow, &attr_tx, items_tx.items,
5619                                          actions_hairpin_tx.actions,
5620                                          idx, error);
5621                 if (!dev_flow)
5622                         goto error;
5623                 dev_flow->flow = flow;
5624                 dev_flow->external = 0;
5625                 SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
5626                               dev_flow->handle, next);
5627                 ret = flow_drv_translate(dev, dev_flow, &attr_tx,
5628                                          items_tx.items,
5629                                          actions_hairpin_tx.actions, error);
5630                 if (ret < 0)
5631                         goto error;
5632         }
5633         /*
5634          * Update the metadata register copy table. If extensive
5635          * metadata feature is enabled and registers are supported
5636          * we might create the extra rte_flow for each unique
5637          * MARK/FLAG action ID.
5638          *
5639          * The table is updated for ingress Flows only, because
5640          * the egress Flows belong to the different device and
5641          * copy table should be updated in peer NIC Rx domain.
5642          */
5643         if (attr_factor.ingress &&
5644             (external || attr_factor.group != MLX5_FLOW_MREG_CP_TABLE_GROUP)) {
5645                 ret = flow_mreg_update_copy_table(dev, flow, actions, error);
5646                 if (ret)
5647                         goto error;
5648         }
5649         /*
5650          * If the flow is external (from application) OR device is started, then
5651          * the flow will be applied immediately.
5652          */
5653         if (external || dev->data->dev_started) {
5654                 ret = flow_drv_apply(dev, flow, error);
5655                 if (ret < 0)
5656                         goto error;
5657         }
5658         if (list) {
5659                 rte_spinlock_lock(&priv->flow_list_lock);
5660                 ILIST_INSERT(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list, idx,
5661                              flow, next);
5662                 rte_spinlock_unlock(&priv->flow_list_lock);
5663         }
5664         flow_rxq_flags_set(dev, flow);
5665         rte_free(translated_actions);
5666         /* Nested flow creation index recovery. */
5667         wks->flow_idx = wks->flow_nested_idx;
5668         if (wks->flow_nested_idx)
5669                 wks->flow_nested_idx = 0;
5670         tunnel = flow_tunnel_from_rule(dev, attr, items, actions);
5671         if (tunnel) {
5672                 flow->tunnel = 1;
5673                 flow->tunnel_id = tunnel->tunnel_id;
5674                 __atomic_add_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED);
5675                 mlx5_free(default_miss_ctx.queue);
5676         }
5677         return idx;
5678 error:
5679         MLX5_ASSERT(flow);
5680         ret = rte_errno; /* Save rte_errno before cleanup. */
5681         flow_mreg_del_copy_action(dev, flow);
5682         flow_drv_destroy(dev, flow);
5683         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], idx);
5684         rte_errno = ret; /* Restore rte_errno. */
5685         ret = rte_errno;
5686         rte_errno = ret;
5687         wks->flow_idx = wks->flow_nested_idx;
5688         if (wks->flow_nested_idx)
5689                 wks->flow_nested_idx = 0;
5690 error_before_hairpin_split:
5691         rte_free(translated_actions);
5692         return 0;
5693 }
5694
5695 /**
5696  * Create a dedicated flow rule on e-switch table 0 (root table), to direct all
5697  * incoming packets to table 1.
5698  *
5699  * Other flow rules, requested for group n, will be created in
5700  * e-switch table n+1.
5701  * Jump action to e-switch group n will be created to group n+1.
5702  *
5703  * Used when working in switchdev mode, to utilise advantages of table 1
5704  * and above.
5705  *
5706  * @param dev
5707  *   Pointer to Ethernet device.
5708  *
5709  * @return
5710  *   Pointer to flow on success, NULL otherwise and rte_errno is set.
5711  */
5712 struct rte_flow *
5713 mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev)
5714 {
5715         const struct rte_flow_attr attr = {
5716                 .group = 0,
5717                 .priority = 0,
5718                 .ingress = 1,
5719                 .egress = 0,
5720                 .transfer = 1,
5721         };
5722         const struct rte_flow_item pattern = {
5723                 .type = RTE_FLOW_ITEM_TYPE_END,
5724         };
5725         struct rte_flow_action_jump jump = {
5726                 .group = 1,
5727         };
5728         const struct rte_flow_action actions[] = {
5729                 {
5730                         .type = RTE_FLOW_ACTION_TYPE_JUMP,
5731                         .conf = &jump,
5732                 },
5733                 {
5734                         .type = RTE_FLOW_ACTION_TYPE_END,
5735                 },
5736         };
5737         struct mlx5_priv *priv = dev->data->dev_private;
5738         struct rte_flow_error error;
5739
5740         return (void *)(uintptr_t)flow_list_create(dev, &priv->ctrl_flows,
5741                                                    &attr, &pattern,
5742                                                    actions, false, &error);
5743 }
5744
5745 /**
5746  * Validate a flow supported by the NIC.
5747  *
5748  * @see rte_flow_validate()
5749  * @see rte_flow_ops
5750  */
5751 int
5752 mlx5_flow_validate(struct rte_eth_dev *dev,
5753                    const struct rte_flow_attr *attr,
5754                    const struct rte_flow_item items[],
5755                    const struct rte_flow_action original_actions[],
5756                    struct rte_flow_error *error)
5757 {
5758         int hairpin_flow;
5759         struct mlx5_translated_shared_action
5760                 shared_actions[MLX5_MAX_SHARED_ACTIONS];
5761         int shared_actions_n = MLX5_MAX_SHARED_ACTIONS;
5762         const struct rte_flow_action *actions;
5763         struct rte_flow_action *translated_actions = NULL;
5764         int ret = flow_shared_actions_translate(original_actions,
5765                                                 shared_actions,
5766                                                 &shared_actions_n,
5767                                                 &translated_actions, error);
5768
5769         if (ret)
5770                 return ret;
5771         actions = translated_actions ? translated_actions : original_actions;
5772         hairpin_flow = flow_check_hairpin_split(dev, attr, actions);
5773         ret = flow_drv_validate(dev, attr, items, actions,
5774                                 true, hairpin_flow, error);
5775         rte_free(translated_actions);
5776         return ret;
5777 }
5778
5779 /**
5780  * Create a flow.
5781  *
5782  * @see rte_flow_create()
5783  * @see rte_flow_ops
5784  */
5785 struct rte_flow *
5786 mlx5_flow_create(struct rte_eth_dev *dev,
5787                  const struct rte_flow_attr *attr,
5788                  const struct rte_flow_item items[],
5789                  const struct rte_flow_action actions[],
5790                  struct rte_flow_error *error)
5791 {
5792         struct mlx5_priv *priv = dev->data->dev_private;
5793
5794         /*
5795          * If the device is not started yet, it is not allowed to created a
5796          * flow from application. PMD default flows and traffic control flows
5797          * are not affected.
5798          */
5799         if (unlikely(!dev->data->dev_started)) {
5800                 DRV_LOG(DEBUG, "port %u is not started when "
5801                         "inserting a flow", dev->data->port_id);
5802                 rte_flow_error_set(error, ENODEV,
5803                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5804                                    NULL,
5805                                    "port not started");
5806                 return NULL;
5807         }
5808
5809         return (void *)(uintptr_t)flow_list_create(dev, &priv->flows,
5810                                   attr, items, actions, true, error);
5811 }
5812
5813 /**
5814  * Destroy a flow in a list.
5815  *
5816  * @param dev
5817  *   Pointer to Ethernet device.
5818  * @param list
5819  *   Pointer to the Indexed flow list. If this parameter NULL,
5820  *   there is no flow removal from the list. Be noted that as
5821  *   flow is add to the indexed list, memory of the indexed
5822  *   list points to maybe changed as flow destroyed.
5823  * @param[in] flow_idx
5824  *   Index of flow to destroy.
5825  */
5826 static void
5827 flow_list_destroy(struct rte_eth_dev *dev, uint32_t *list,
5828                   uint32_t flow_idx)
5829 {
5830         struct mlx5_priv *priv = dev->data->dev_private;
5831         struct mlx5_fdir_flow *priv_fdir_flow = NULL;
5832         struct rte_flow *flow = mlx5_ipool_get(priv->sh->ipool
5833                                                [MLX5_IPOOL_RTE_FLOW], flow_idx);
5834
5835         if (!flow)
5836                 return;
5837         /*
5838          * Update RX queue flags only if port is started, otherwise it is
5839          * already clean.
5840          */
5841         if (dev->data->dev_started)
5842                 flow_rxq_flags_trim(dev, flow);
5843         flow_drv_destroy(dev, flow);
5844         if (list) {
5845                 rte_spinlock_lock(&priv->flow_list_lock);
5846                 ILIST_REMOVE(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], list,
5847                              flow_idx, flow, next);
5848                 rte_spinlock_unlock(&priv->flow_list_lock);
5849         }
5850         flow_mreg_del_copy_action(dev, flow);
5851         if (flow->fdir) {
5852                 LIST_FOREACH(priv_fdir_flow, &priv->fdir_flows, next) {
5853                         if (priv_fdir_flow->rix_flow == flow_idx)
5854                                 break;
5855                 }
5856                 if (priv_fdir_flow) {
5857                         LIST_REMOVE(priv_fdir_flow, next);
5858                         mlx5_free(priv_fdir_flow->fdir);
5859                         mlx5_free(priv_fdir_flow);
5860                 }
5861         }
5862         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], flow_idx);
5863         if (flow->tunnel) {
5864                 struct mlx5_flow_tunnel *tunnel;
5865                 tunnel = mlx5_find_tunnel_id(dev, flow->tunnel_id);
5866                 RTE_VERIFY(tunnel);
5867                 if (!__atomic_sub_fetch(&tunnel->refctn, 1, __ATOMIC_RELAXED))
5868                         mlx5_flow_tunnel_free(dev, tunnel);
5869         }
5870 }
5871
5872 /**
5873  * Destroy all flows.
5874  *
5875  * @param dev
5876  *   Pointer to Ethernet device.
5877  * @param list
5878  *   Pointer to the Indexed flow list.
5879  * @param active
5880  *   If flushing is called avtively.
5881  */
5882 void
5883 mlx5_flow_list_flush(struct rte_eth_dev *dev, uint32_t *list, bool active)
5884 {
5885         uint32_t num_flushed = 0;
5886
5887         while (*list) {
5888                 flow_list_destroy(dev, list, *list);
5889                 num_flushed++;
5890         }
5891         if (active) {
5892                 DRV_LOG(INFO, "port %u: %u flows flushed before stopping",
5893                         dev->data->port_id, num_flushed);
5894         }
5895 }
5896
5897 /**
5898  * Stop all default actions for flows.
5899  *
5900  * @param dev
5901  *   Pointer to Ethernet device.
5902  */
5903 void
5904 mlx5_flow_stop_default(struct rte_eth_dev *dev)
5905 {
5906         flow_mreg_del_default_copy_action(dev);
5907         flow_rxq_flags_clear(dev);
5908 }
5909
5910 /**
5911  * Start all default actions for flows.
5912  *
5913  * @param dev
5914  *   Pointer to Ethernet device.
5915  * @return
5916  *   0 on success, a negative errno value otherwise and rte_errno is set.
5917  */
5918 int
5919 mlx5_flow_start_default(struct rte_eth_dev *dev)
5920 {
5921         struct rte_flow_error error;
5922
5923         /* Make sure default copy action (reg_c[0] -> reg_b) is created. */
5924         return flow_mreg_add_default_copy_action(dev, &error);
5925 }
5926
5927 /**
5928  * Release key of thread specific flow workspace data.
5929  */
5930 static void
5931 flow_release_workspace(void *data)
5932 {
5933         struct mlx5_flow_workspace *wks = data;
5934
5935         if (!wks)
5936                 return;
5937         free(wks->rss_desc[0].queue);
5938         free(wks->rss_desc[1].queue);
5939         free(wks);
5940 }
5941
5942 /**
5943  * Initialize key of thread specific flow workspace data.
5944  */
5945 static void
5946 flow_alloc_workspace(void)
5947 {
5948         if (pthread_key_create(&key_workspace, flow_release_workspace))
5949                 DRV_LOG(ERR, "Can't create flow workspace data thread key.");
5950 }
5951
5952 /**
5953  * Get thread specific flow workspace.
5954  *
5955  * @return pointer to thread specific flowworkspace data, NULL on error.
5956  */
5957 struct mlx5_flow_workspace*
5958 mlx5_flow_get_thread_workspace(void)
5959 {
5960         struct mlx5_flow_workspace *data;
5961
5962         if (pthread_once(&key_workspace_init, flow_alloc_workspace)) {
5963                 DRV_LOG(ERR, "Failed to init flow workspace data thread key.");
5964                 return NULL;
5965         }
5966         data = pthread_getspecific(key_workspace);
5967         if (!data) {
5968                 data = calloc(1, sizeof(*data));
5969                 if (!data) {
5970                         DRV_LOG(ERR, "Failed to allocate flow workspace "
5971                                 "memory.");
5972                         return NULL;
5973                 }
5974                 data->rss_desc[0].queue = calloc(1,
5975                                 sizeof(uint16_t) * MLX5_RSSQ_DEFAULT_NUM);
5976                 if (!data->rss_desc[0].queue)
5977                         goto err;
5978                 data->rss_desc[1].queue = calloc(1,
5979                                 sizeof(uint16_t) * MLX5_RSSQ_DEFAULT_NUM);
5980                 if (!data->rss_desc[1].queue)
5981                         goto err;
5982                 data->rssq_num[0] = MLX5_RSSQ_DEFAULT_NUM;
5983                 data->rssq_num[1] = MLX5_RSSQ_DEFAULT_NUM;
5984                 if (pthread_setspecific(key_workspace, data)) {
5985                         DRV_LOG(ERR, "Failed to set flow workspace to thread.");
5986                         goto err;
5987                 }
5988         }
5989         return data;
5990 err:
5991         if (data->rss_desc[0].queue)
5992                 free(data->rss_desc[0].queue);
5993         if (data->rss_desc[1].queue)
5994                 free(data->rss_desc[1].queue);
5995         free(data);
5996         return NULL;
5997 }
5998
5999 /**
6000  * Verify the flow list is empty
6001  *
6002  * @param dev
6003  *  Pointer to Ethernet device.
6004  *
6005  * @return the number of flows not released.
6006  */
6007 int
6008 mlx5_flow_verify(struct rte_eth_dev *dev)
6009 {
6010         struct mlx5_priv *priv = dev->data->dev_private;
6011         struct rte_flow *flow;
6012         uint32_t idx;
6013         int ret = 0;
6014
6015         ILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], priv->flows, idx,
6016                       flow, next) {
6017                 DRV_LOG(DEBUG, "port %u flow %p still referenced",
6018                         dev->data->port_id, (void *)flow);
6019                 ++ret;
6020         }
6021         return ret;
6022 }
6023
6024 /**
6025  * Enable default hairpin egress flow.
6026  *
6027  * @param dev
6028  *   Pointer to Ethernet device.
6029  * @param queue
6030  *   The queue index.
6031  *
6032  * @return
6033  *   0 on success, a negative errno value otherwise and rte_errno is set.
6034  */
6035 int
6036 mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev,
6037                             uint32_t queue)
6038 {
6039         struct mlx5_priv *priv = dev->data->dev_private;
6040         const struct rte_flow_attr attr = {
6041                 .egress = 1,
6042                 .priority = 0,
6043         };
6044         struct mlx5_rte_flow_item_tx_queue queue_spec = {
6045                 .queue = queue,
6046         };
6047         struct mlx5_rte_flow_item_tx_queue queue_mask = {
6048                 .queue = UINT32_MAX,
6049         };
6050         struct rte_flow_item items[] = {
6051                 {
6052                         .type = (enum rte_flow_item_type)
6053                                 MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE,
6054                         .spec = &queue_spec,
6055                         .last = NULL,
6056                         .mask = &queue_mask,
6057                 },
6058                 {
6059                         .type = RTE_FLOW_ITEM_TYPE_END,
6060                 },
6061         };
6062         struct rte_flow_action_jump jump = {
6063                 .group = MLX5_HAIRPIN_TX_TABLE,
6064         };
6065         struct rte_flow_action actions[2];
6066         uint32_t flow_idx;
6067         struct rte_flow_error error;
6068
6069         actions[0].type = RTE_FLOW_ACTION_TYPE_JUMP;
6070         actions[0].conf = &jump;
6071         actions[1].type = RTE_FLOW_ACTION_TYPE_END;
6072         flow_idx = flow_list_create(dev, &priv->ctrl_flows,
6073                                 &attr, items, actions, false, &error);
6074         if (!flow_idx) {
6075                 DRV_LOG(DEBUG,
6076                         "Failed to create ctrl flow: rte_errno(%d),"
6077                         " type(%d), message(%s)",
6078                         rte_errno, error.type,
6079                         error.message ? error.message : " (no stated reason)");
6080                 return -rte_errno;
6081         }
6082         return 0;
6083 }
6084
6085 /**
6086  * Enable a control flow configured from the control plane.
6087  *
6088  * @param dev
6089  *   Pointer to Ethernet device.
6090  * @param eth_spec
6091  *   An Ethernet flow spec to apply.
6092  * @param eth_mask
6093  *   An Ethernet flow mask to apply.
6094  * @param vlan_spec
6095  *   A VLAN flow spec to apply.
6096  * @param vlan_mask
6097  *   A VLAN flow mask to apply.
6098  *
6099  * @return
6100  *   0 on success, a negative errno value otherwise and rte_errno is set.
6101  */
6102 int
6103 mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
6104                     struct rte_flow_item_eth *eth_spec,
6105                     struct rte_flow_item_eth *eth_mask,
6106                     struct rte_flow_item_vlan *vlan_spec,
6107                     struct rte_flow_item_vlan *vlan_mask)
6108 {
6109         struct mlx5_priv *priv = dev->data->dev_private;
6110         const struct rte_flow_attr attr = {
6111                 .ingress = 1,
6112                 .priority = MLX5_FLOW_PRIO_RSVD,
6113         };
6114         struct rte_flow_item items[] = {
6115                 {
6116                         .type = RTE_FLOW_ITEM_TYPE_ETH,
6117                         .spec = eth_spec,
6118                         .last = NULL,
6119                         .mask = eth_mask,
6120                 },
6121                 {
6122                         .type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN :
6123                                               RTE_FLOW_ITEM_TYPE_END,
6124                         .spec = vlan_spec,
6125                         .last = NULL,
6126                         .mask = vlan_mask,
6127                 },
6128                 {
6129                         .type = RTE_FLOW_ITEM_TYPE_END,
6130                 },
6131         };
6132         uint16_t queue[priv->reta_idx_n];
6133         struct rte_flow_action_rss action_rss = {
6134                 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
6135                 .level = 0,
6136                 .types = priv->rss_conf.rss_hf,
6137                 .key_len = priv->rss_conf.rss_key_len,
6138                 .queue_num = priv->reta_idx_n,
6139                 .key = priv->rss_conf.rss_key,
6140                 .queue = queue,
6141         };
6142         struct rte_flow_action actions[] = {
6143                 {
6144                         .type = RTE_FLOW_ACTION_TYPE_RSS,
6145                         .conf = &action_rss,
6146                 },
6147                 {
6148                         .type = RTE_FLOW_ACTION_TYPE_END,
6149                 },
6150         };
6151         uint32_t flow_idx;
6152         struct rte_flow_error error;
6153         unsigned int i;
6154
6155         if (!priv->reta_idx_n || !priv->rxqs_n) {
6156                 return 0;
6157         }
6158         if (!(dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG))
6159                 action_rss.types = 0;
6160         for (i = 0; i != priv->reta_idx_n; ++i)
6161                 queue[i] = (*priv->reta_idx)[i];
6162         flow_idx = flow_list_create(dev, &priv->ctrl_flows,
6163                                 &attr, items, actions, false, &error);
6164         if (!flow_idx)
6165                 return -rte_errno;
6166         return 0;
6167 }
6168
6169 /**
6170  * Enable a flow control configured from the control plane.
6171  *
6172  * @param dev
6173  *   Pointer to Ethernet device.
6174  * @param eth_spec
6175  *   An Ethernet flow spec to apply.
6176  * @param eth_mask
6177  *   An Ethernet flow mask to apply.
6178  *
6179  * @return
6180  *   0 on success, a negative errno value otherwise and rte_errno is set.
6181  */
6182 int
6183 mlx5_ctrl_flow(struct rte_eth_dev *dev,
6184                struct rte_flow_item_eth *eth_spec,
6185                struct rte_flow_item_eth *eth_mask)
6186 {
6187         return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL);
6188 }
6189
6190 /**
6191  * Create default miss flow rule matching lacp traffic
6192  *
6193  * @param dev
6194  *   Pointer to Ethernet device.
6195  * @param eth_spec
6196  *   An Ethernet flow spec to apply.
6197  *
6198  * @return
6199  *   0 on success, a negative errno value otherwise and rte_errno is set.
6200  */
6201 int
6202 mlx5_flow_lacp_miss(struct rte_eth_dev *dev)
6203 {
6204         struct mlx5_priv *priv = dev->data->dev_private;
6205         /*
6206          * The LACP matching is done by only using ether type since using
6207          * a multicast dst mac causes kernel to give low priority to this flow.
6208          */
6209         static const struct rte_flow_item_eth lacp_spec = {
6210                 .type = RTE_BE16(0x8809),
6211         };
6212         static const struct rte_flow_item_eth lacp_mask = {
6213                 .type = 0xffff,
6214         };
6215         const struct rte_flow_attr attr = {
6216                 .ingress = 1,
6217         };
6218         struct rte_flow_item items[] = {
6219                 {
6220                         .type = RTE_FLOW_ITEM_TYPE_ETH,
6221                         .spec = &lacp_spec,
6222                         .mask = &lacp_mask,
6223                 },
6224                 {
6225                         .type = RTE_FLOW_ITEM_TYPE_END,
6226                 },
6227         };
6228         struct rte_flow_action actions[] = {
6229                 {
6230                         .type = (enum rte_flow_action_type)
6231                                 MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS,
6232                 },
6233                 {
6234                         .type = RTE_FLOW_ACTION_TYPE_END,
6235                 },
6236         };
6237         struct rte_flow_error error;
6238         uint32_t flow_idx = flow_list_create(dev, &priv->ctrl_flows,
6239                                 &attr, items, actions, false, &error);
6240
6241         if (!flow_idx)
6242                 return -rte_errno;
6243         return 0;
6244 }
6245
6246 /**
6247  * Destroy a flow.
6248  *
6249  * @see rte_flow_destroy()
6250  * @see rte_flow_ops
6251  */
6252 int
6253 mlx5_flow_destroy(struct rte_eth_dev *dev,
6254                   struct rte_flow *flow,
6255                   struct rte_flow_error *error __rte_unused)
6256 {
6257         struct mlx5_priv *priv = dev->data->dev_private;
6258
6259         flow_list_destroy(dev, &priv->flows, (uintptr_t)(void *)flow);
6260         return 0;
6261 }
6262
6263 /**
6264  * Destroy all flows.
6265  *
6266  * @see rte_flow_flush()
6267  * @see rte_flow_ops
6268  */
6269 int
6270 mlx5_flow_flush(struct rte_eth_dev *dev,
6271                 struct rte_flow_error *error __rte_unused)
6272 {
6273         struct mlx5_priv *priv = dev->data->dev_private;
6274
6275         mlx5_flow_list_flush(dev, &priv->flows, false);
6276         return 0;
6277 }
6278
6279 /**
6280  * Isolated mode.
6281  *
6282  * @see rte_flow_isolate()
6283  * @see rte_flow_ops
6284  */
6285 int
6286 mlx5_flow_isolate(struct rte_eth_dev *dev,
6287                   int enable,
6288                   struct rte_flow_error *error)
6289 {
6290         struct mlx5_priv *priv = dev->data->dev_private;
6291
6292         if (dev->data->dev_started) {
6293                 rte_flow_error_set(error, EBUSY,
6294                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6295                                    NULL,
6296                                    "port must be stopped first");
6297                 return -rte_errno;
6298         }
6299         priv->isolated = !!enable;
6300         if (enable)
6301                 dev->dev_ops = &mlx5_os_dev_ops_isolate;
6302         else
6303                 dev->dev_ops = &mlx5_os_dev_ops;
6304
6305         dev->rx_descriptor_status = mlx5_rx_descriptor_status;
6306         dev->tx_descriptor_status = mlx5_tx_descriptor_status;
6307
6308         return 0;
6309 }
6310
6311 /**
6312  * Query a flow.
6313  *
6314  * @see rte_flow_query()
6315  * @see rte_flow_ops
6316  */
6317 static int
6318 flow_drv_query(struct rte_eth_dev *dev,
6319                uint32_t flow_idx,
6320                const struct rte_flow_action *actions,
6321                void *data,
6322                struct rte_flow_error *error)
6323 {
6324         struct mlx5_priv *priv = dev->data->dev_private;
6325         const struct mlx5_flow_driver_ops *fops;
6326         struct rte_flow *flow = mlx5_ipool_get(priv->sh->ipool
6327                                                [MLX5_IPOOL_RTE_FLOW],
6328                                                flow_idx);
6329         enum mlx5_flow_drv_type ftype;
6330
6331         if (!flow) {
6332                 return rte_flow_error_set(error, ENOENT,
6333                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6334                           NULL,
6335                           "invalid flow handle");
6336         }
6337         ftype = flow->drv_type;
6338         MLX5_ASSERT(ftype > MLX5_FLOW_TYPE_MIN && ftype < MLX5_FLOW_TYPE_MAX);
6339         fops = flow_get_drv_ops(ftype);
6340
6341         return fops->query(dev, flow, actions, data, error);
6342 }
6343
6344 /**
6345  * Query a flow.
6346  *
6347  * @see rte_flow_query()
6348  * @see rte_flow_ops
6349  */
6350 int
6351 mlx5_flow_query(struct rte_eth_dev *dev,
6352                 struct rte_flow *flow,
6353                 const struct rte_flow_action *actions,
6354                 void *data,
6355                 struct rte_flow_error *error)
6356 {
6357         int ret;
6358
6359         ret = flow_drv_query(dev, (uintptr_t)(void *)flow, actions, data,
6360                              error);
6361         if (ret < 0)
6362                 return ret;
6363         return 0;
6364 }
6365
6366 /**
6367  * Convert a flow director filter to a generic flow.
6368  *
6369  * @param dev
6370  *   Pointer to Ethernet device.
6371  * @param fdir_filter
6372  *   Flow director filter to add.
6373  * @param attributes
6374  *   Generic flow parameters structure.
6375  *
6376  * @return
6377  *   0 on success, a negative errno value otherwise and rte_errno is set.
6378  */
6379 static int
6380 flow_fdir_filter_convert(struct rte_eth_dev *dev,
6381                          const struct rte_eth_fdir_filter *fdir_filter,
6382                          struct mlx5_fdir *attributes)
6383 {
6384         struct mlx5_priv *priv = dev->data->dev_private;
6385         const struct rte_eth_fdir_input *input = &fdir_filter->input;
6386         const struct rte_eth_fdir_masks *mask =
6387                 &dev->data->dev_conf.fdir_conf.mask;
6388
6389         /* Validate queue number. */
6390         if (fdir_filter->action.rx_queue >= priv->rxqs_n) {
6391                 DRV_LOG(ERR, "port %u invalid queue number %d",
6392                         dev->data->port_id, fdir_filter->action.rx_queue);
6393                 rte_errno = EINVAL;
6394                 return -rte_errno;
6395         }
6396         attributes->attr.ingress = 1;
6397         attributes->items[0] = (struct rte_flow_item) {
6398                 .type = RTE_FLOW_ITEM_TYPE_ETH,
6399                 .spec = &attributes->l2,
6400                 .mask = &attributes->l2_mask,
6401         };
6402         switch (fdir_filter->action.behavior) {
6403         case RTE_ETH_FDIR_ACCEPT:
6404                 attributes->actions[0] = (struct rte_flow_action){
6405                         .type = RTE_FLOW_ACTION_TYPE_QUEUE,
6406                         .conf = &attributes->queue,
6407                 };
6408                 break;
6409         case RTE_ETH_FDIR_REJECT:
6410                 attributes->actions[0] = (struct rte_flow_action){
6411                         .type = RTE_FLOW_ACTION_TYPE_DROP,
6412                 };
6413                 break;
6414         default:
6415                 DRV_LOG(ERR, "port %u invalid behavior %d",
6416                         dev->data->port_id,
6417                         fdir_filter->action.behavior);
6418                 rte_errno = ENOTSUP;
6419                 return -rte_errno;
6420         }
6421         attributes->queue.index = fdir_filter->action.rx_queue;
6422         /* Handle L3. */
6423         switch (fdir_filter->input.flow_type) {
6424         case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
6425         case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
6426         case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
6427                 attributes->l3.ipv4.hdr = (struct rte_ipv4_hdr){
6428                         .src_addr = input->flow.ip4_flow.src_ip,
6429                         .dst_addr = input->flow.ip4_flow.dst_ip,
6430                         .time_to_live = input->flow.ip4_flow.ttl,
6431                         .type_of_service = input->flow.ip4_flow.tos,
6432                 };
6433                 attributes->l3_mask.ipv4.hdr = (struct rte_ipv4_hdr){
6434                         .src_addr = mask->ipv4_mask.src_ip,
6435                         .dst_addr = mask->ipv4_mask.dst_ip,
6436                         .time_to_live = mask->ipv4_mask.ttl,
6437                         .type_of_service = mask->ipv4_mask.tos,
6438                         .next_proto_id = mask->ipv4_mask.proto,
6439                 };
6440                 attributes->items[1] = (struct rte_flow_item){
6441                         .type = RTE_FLOW_ITEM_TYPE_IPV4,
6442                         .spec = &attributes->l3,
6443                         .mask = &attributes->l3_mask,
6444                 };
6445                 break;
6446         case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
6447         case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
6448         case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
6449                 attributes->l3.ipv6.hdr = (struct rte_ipv6_hdr){
6450                         .hop_limits = input->flow.ipv6_flow.hop_limits,
6451                         .proto = input->flow.ipv6_flow.proto,
6452                 };
6453
6454                 memcpy(attributes->l3.ipv6.hdr.src_addr,
6455                        input->flow.ipv6_flow.src_ip,
6456                        RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
6457                 memcpy(attributes->l3.ipv6.hdr.dst_addr,
6458                        input->flow.ipv6_flow.dst_ip,
6459                        RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
6460                 memcpy(attributes->l3_mask.ipv6.hdr.src_addr,
6461                        mask->ipv6_mask.src_ip,
6462                        RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr));
6463                 memcpy(attributes->l3_mask.ipv6.hdr.dst_addr,
6464                        mask->ipv6_mask.dst_ip,
6465                        RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr));
6466                 attributes->items[1] = (struct rte_flow_item){
6467                         .type = RTE_FLOW_ITEM_TYPE_IPV6,
6468                         .spec = &attributes->l3,
6469                         .mask = &attributes->l3_mask,
6470                 };
6471                 break;
6472         default:
6473                 DRV_LOG(ERR, "port %u invalid flow type%d",
6474                         dev->data->port_id, fdir_filter->input.flow_type);
6475                 rte_errno = ENOTSUP;
6476                 return -rte_errno;
6477         }
6478         /* Handle L4. */
6479         switch (fdir_filter->input.flow_type) {
6480         case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
6481                 attributes->l4.udp.hdr = (struct rte_udp_hdr){
6482                         .src_port = input->flow.udp4_flow.src_port,
6483                         .dst_port = input->flow.udp4_flow.dst_port,
6484                 };
6485                 attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){
6486                         .src_port = mask->src_port_mask,
6487                         .dst_port = mask->dst_port_mask,
6488                 };
6489                 attributes->items[2] = (struct rte_flow_item){
6490                         .type = RTE_FLOW_ITEM_TYPE_UDP,
6491                         .spec = &attributes->l4,
6492                         .mask = &attributes->l4_mask,
6493                 };
6494                 break;
6495         case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
6496                 attributes->l4.tcp.hdr = (struct rte_tcp_hdr){
6497                         .src_port = input->flow.tcp4_flow.src_port,
6498                         .dst_port = input->flow.tcp4_flow.dst_port,
6499                 };
6500                 attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){
6501                         .src_port = mask->src_port_mask,
6502                         .dst_port = mask->dst_port_mask,
6503                 };
6504                 attributes->items[2] = (struct rte_flow_item){
6505                         .type = RTE_FLOW_ITEM_TYPE_TCP,
6506                         .spec = &attributes->l4,
6507                         .mask = &attributes->l4_mask,
6508                 };
6509                 break;
6510         case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
6511                 attributes->l4.udp.hdr = (struct rte_udp_hdr){
6512                         .src_port = input->flow.udp6_flow.src_port,
6513                         .dst_port = input->flow.udp6_flow.dst_port,
6514                 };
6515                 attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){
6516                         .src_port = mask->src_port_mask,
6517                         .dst_port = mask->dst_port_mask,
6518                 };
6519                 attributes->items[2] = (struct rte_flow_item){
6520                         .type = RTE_FLOW_ITEM_TYPE_UDP,
6521                         .spec = &attributes->l4,
6522                         .mask = &attributes->l4_mask,
6523                 };
6524                 break;
6525         case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
6526                 attributes->l4.tcp.hdr = (struct rte_tcp_hdr){
6527                         .src_port = input->flow.tcp6_flow.src_port,
6528                         .dst_port = input->flow.tcp6_flow.dst_port,
6529                 };
6530                 attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){
6531                         .src_port = mask->src_port_mask,
6532                         .dst_port = mask->dst_port_mask,
6533                 };
6534                 attributes->items[2] = (struct rte_flow_item){
6535                         .type = RTE_FLOW_ITEM_TYPE_TCP,
6536                         .spec = &attributes->l4,
6537                         .mask = &attributes->l4_mask,
6538                 };
6539                 break;
6540         case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
6541         case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
6542                 break;
6543         default:
6544                 DRV_LOG(ERR, "port %u invalid flow type%d",
6545                         dev->data->port_id, fdir_filter->input.flow_type);
6546                 rte_errno = ENOTSUP;
6547                 return -rte_errno;
6548         }
6549         return 0;
6550 }
6551
6552 #define FLOW_FDIR_CMP(f1, f2, fld) \
6553         memcmp(&(f1)->fld, &(f2)->fld, sizeof(f1->fld))
6554
6555 /**
6556  * Compare two FDIR flows. If items and actions are identical, the two flows are
6557  * regarded as same.
6558  *
6559  * @param dev
6560  *   Pointer to Ethernet device.
6561  * @param f1
6562  *   FDIR flow to compare.
6563  * @param f2
6564  *   FDIR flow to compare.
6565  *
6566  * @return
6567  *   Zero on match, 1 otherwise.
6568  */
6569 static int
6570 flow_fdir_cmp(const struct mlx5_fdir *f1, const struct mlx5_fdir *f2)
6571 {
6572         if (FLOW_FDIR_CMP(f1, f2, attr) ||
6573             FLOW_FDIR_CMP(f1, f2, l2) ||
6574             FLOW_FDIR_CMP(f1, f2, l2_mask) ||
6575             FLOW_FDIR_CMP(f1, f2, l3) ||
6576             FLOW_FDIR_CMP(f1, f2, l3_mask) ||
6577             FLOW_FDIR_CMP(f1, f2, l4) ||
6578             FLOW_FDIR_CMP(f1, f2, l4_mask) ||
6579             FLOW_FDIR_CMP(f1, f2, actions[0].type))
6580                 return 1;
6581         if (f1->actions[0].type == RTE_FLOW_ACTION_TYPE_QUEUE &&
6582             FLOW_FDIR_CMP(f1, f2, queue))
6583                 return 1;
6584         return 0;
6585 }
6586
6587 /**
6588  * Search device flow list to find out a matched FDIR flow.
6589  *
6590  * @param dev
6591  *   Pointer to Ethernet device.
6592  * @param fdir_flow
6593  *   FDIR flow to lookup.
6594  *
6595  * @return
6596  *   Index of flow if found, 0 otherwise.
6597  */
6598 static uint32_t
6599 flow_fdir_filter_lookup(struct rte_eth_dev *dev, struct mlx5_fdir *fdir_flow)
6600 {
6601         struct mlx5_priv *priv = dev->data->dev_private;
6602         uint32_t flow_idx = 0;
6603         struct mlx5_fdir_flow *priv_fdir_flow = NULL;
6604
6605         MLX5_ASSERT(fdir_flow);
6606         LIST_FOREACH(priv_fdir_flow, &priv->fdir_flows, next) {
6607                 if (!flow_fdir_cmp(priv_fdir_flow->fdir, fdir_flow)) {
6608                         DRV_LOG(DEBUG, "port %u found FDIR flow %u",
6609                                 dev->data->port_id, flow_idx);
6610                         flow_idx = priv_fdir_flow->rix_flow;
6611                         break;
6612                 }
6613         }
6614         return flow_idx;
6615 }
6616
6617 /**
6618  * Add new flow director filter and store it in list.
6619  *
6620  * @param dev
6621  *   Pointer to Ethernet device.
6622  * @param fdir_filter
6623  *   Flow director filter to add.
6624  *
6625  * @return
6626  *   0 on success, a negative errno value otherwise and rte_errno is set.
6627  */
6628 static int
6629 flow_fdir_filter_add(struct rte_eth_dev *dev,
6630                      const struct rte_eth_fdir_filter *fdir_filter)
6631 {
6632         struct mlx5_priv *priv = dev->data->dev_private;
6633         struct mlx5_fdir *fdir_flow;
6634         struct rte_flow *flow;
6635         struct mlx5_fdir_flow *priv_fdir_flow = NULL;
6636         uint32_t flow_idx;
6637         int ret;
6638
6639         fdir_flow = mlx5_malloc(MLX5_MEM_ZERO, sizeof(*fdir_flow), 0,
6640                                 SOCKET_ID_ANY);
6641         if (!fdir_flow) {
6642                 rte_errno = ENOMEM;
6643                 return -rte_errno;
6644         }
6645         ret = flow_fdir_filter_convert(dev, fdir_filter, fdir_flow);
6646         if (ret)
6647                 goto error;
6648         flow_idx = flow_fdir_filter_lookup(dev, fdir_flow);
6649         if (flow_idx) {
6650                 rte_errno = EEXIST;
6651                 goto error;
6652         }
6653         priv_fdir_flow = mlx5_malloc(MLX5_MEM_ZERO,
6654                                      sizeof(struct mlx5_fdir_flow),
6655                                      0, SOCKET_ID_ANY);
6656         if (!priv_fdir_flow) {
6657                 rte_errno = ENOMEM;
6658                 goto error;
6659         }
6660         flow_idx = flow_list_create(dev, &priv->flows, &fdir_flow->attr,
6661                                     fdir_flow->items, fdir_flow->actions, true,
6662                                     NULL);
6663         flow = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW], flow_idx);
6664         if (!flow)
6665                 goto error;
6666         flow->fdir = 1;
6667         priv_fdir_flow->fdir = fdir_flow;
6668         priv_fdir_flow->rix_flow = flow_idx;
6669         LIST_INSERT_HEAD(&priv->fdir_flows, priv_fdir_flow, next);
6670         DRV_LOG(DEBUG, "port %u created FDIR flow %p",
6671                 dev->data->port_id, (void *)flow);
6672         return 0;
6673 error:
6674         mlx5_free(priv_fdir_flow);
6675         mlx5_free(fdir_flow);
6676         return -rte_errno;
6677 }
6678
6679 /**
6680  * Delete specific filter.
6681  *
6682  * @param dev
6683  *   Pointer to Ethernet device.
6684  * @param fdir_filter
6685  *   Filter to be deleted.
6686  *
6687  * @return
6688  *   0 on success, a negative errno value otherwise and rte_errno is set.
6689  */
6690 static int
6691 flow_fdir_filter_delete(struct rte_eth_dev *dev,
6692                         const struct rte_eth_fdir_filter *fdir_filter)
6693 {
6694         struct mlx5_priv *priv = dev->data->dev_private;
6695         uint32_t flow_idx;
6696         struct mlx5_fdir fdir_flow = {
6697                 .attr.group = 0,
6698         };
6699         struct mlx5_fdir_flow *priv_fdir_flow = NULL;
6700         int ret;
6701
6702         ret = flow_fdir_filter_convert(dev, fdir_filter, &fdir_flow);
6703         if (ret)
6704                 return -rte_errno;
6705         LIST_FOREACH(priv_fdir_flow, &priv->fdir_flows, next) {
6706                 /* Find the fdir in priv list */
6707                 if (!flow_fdir_cmp(priv_fdir_flow->fdir, &fdir_flow))
6708                         break;
6709         }
6710         if (!priv_fdir_flow)
6711                 return 0;
6712         LIST_REMOVE(priv_fdir_flow, next);
6713         flow_idx = priv_fdir_flow->rix_flow;
6714         flow_list_destroy(dev, &priv->flows, flow_idx);
6715         mlx5_free(priv_fdir_flow->fdir);
6716         mlx5_free(priv_fdir_flow);
6717         DRV_LOG(DEBUG, "port %u deleted FDIR flow %u",
6718                 dev->data->port_id, flow_idx);
6719         return 0;
6720 }
6721
6722 /**
6723  * Update queue for specific filter.
6724  *
6725  * @param dev
6726  *   Pointer to Ethernet device.
6727  * @param fdir_filter
6728  *   Filter to be updated.
6729  *
6730  * @return
6731  *   0 on success, a negative errno value otherwise and rte_errno is set.
6732  */
6733 static int
6734 flow_fdir_filter_update(struct rte_eth_dev *dev,
6735                         const struct rte_eth_fdir_filter *fdir_filter)
6736 {
6737         int ret;
6738
6739         ret = flow_fdir_filter_delete(dev, fdir_filter);
6740         if (ret)
6741                 return ret;
6742         return flow_fdir_filter_add(dev, fdir_filter);
6743 }
6744
6745 /**
6746  * Flush all filters.
6747  *
6748  * @param dev
6749  *   Pointer to Ethernet device.
6750  */
6751 static void
6752 flow_fdir_filter_flush(struct rte_eth_dev *dev)
6753 {
6754         struct mlx5_priv *priv = dev->data->dev_private;
6755         struct mlx5_fdir_flow *priv_fdir_flow = NULL;
6756
6757         while (!LIST_EMPTY(&priv->fdir_flows)) {
6758                 priv_fdir_flow = LIST_FIRST(&priv->fdir_flows);
6759                 LIST_REMOVE(priv_fdir_flow, next);
6760                 flow_list_destroy(dev, &priv->flows, priv_fdir_flow->rix_flow);
6761                 mlx5_free(priv_fdir_flow->fdir);
6762                 mlx5_free(priv_fdir_flow);
6763         }
6764 }
6765
6766 /**
6767  * Get flow director information.
6768  *
6769  * @param dev
6770  *   Pointer to Ethernet device.
6771  * @param[out] fdir_info
6772  *   Resulting flow director information.
6773  */
6774 static void
6775 flow_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info)
6776 {
6777         struct rte_eth_fdir_masks *mask =
6778                 &dev->data->dev_conf.fdir_conf.mask;
6779
6780         fdir_info->mode = dev->data->dev_conf.fdir_conf.mode;
6781         fdir_info->guarant_spc = 0;
6782         rte_memcpy(&fdir_info->mask, mask, sizeof(fdir_info->mask));
6783         fdir_info->max_flexpayload = 0;
6784         fdir_info->flow_types_mask[0] = 0;
6785         fdir_info->flex_payload_unit = 0;
6786         fdir_info->max_flex_payload_segment_num = 0;
6787         fdir_info->flex_payload_limit = 0;
6788         memset(&fdir_info->flex_conf, 0, sizeof(fdir_info->flex_conf));
6789 }
6790
6791 /**
6792  * Deal with flow director operations.
6793  *
6794  * @param dev
6795  *   Pointer to Ethernet device.
6796  * @param filter_op
6797  *   Operation to perform.
6798  * @param arg
6799  *   Pointer to operation-specific structure.
6800  *
6801  * @return
6802  *   0 on success, a negative errno value otherwise and rte_errno is set.
6803  */
6804 static int
6805 flow_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
6806                     void *arg)
6807 {
6808         enum rte_fdir_mode fdir_mode =
6809                 dev->data->dev_conf.fdir_conf.mode;
6810
6811         if (filter_op == RTE_ETH_FILTER_NOP)
6812                 return 0;
6813         if (fdir_mode != RTE_FDIR_MODE_PERFECT &&
6814             fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
6815                 DRV_LOG(ERR, "port %u flow director mode %d not supported",
6816                         dev->data->port_id, fdir_mode);
6817                 rte_errno = EINVAL;
6818                 return -rte_errno;
6819         }
6820         switch (filter_op) {
6821         case RTE_ETH_FILTER_ADD:
6822                 return flow_fdir_filter_add(dev, arg);
6823         case RTE_ETH_FILTER_UPDATE:
6824                 return flow_fdir_filter_update(dev, arg);
6825         case RTE_ETH_FILTER_DELETE:
6826                 return flow_fdir_filter_delete(dev, arg);
6827         case RTE_ETH_FILTER_FLUSH:
6828                 flow_fdir_filter_flush(dev);
6829                 break;
6830         case RTE_ETH_FILTER_INFO:
6831                 flow_fdir_info_get(dev, arg);
6832                 break;
6833         default:
6834                 DRV_LOG(DEBUG, "port %u unknown operation %u",
6835                         dev->data->port_id, filter_op);
6836                 rte_errno = EINVAL;
6837                 return -rte_errno;
6838         }
6839         return 0;
6840 }
6841
6842 /**
6843  * Manage filter operations.
6844  *
6845  * @param dev
6846  *   Pointer to Ethernet device structure.
6847  * @param filter_type
6848  *   Filter type.
6849  * @param filter_op
6850  *   Operation to perform.
6851  * @param arg
6852  *   Pointer to operation-specific structure.
6853  *
6854  * @return
6855  *   0 on success, a negative errno value otherwise and rte_errno is set.
6856  */
6857 int
6858 mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
6859                      enum rte_filter_type filter_type,
6860                      enum rte_filter_op filter_op,
6861                      void *arg)
6862 {
6863         switch (filter_type) {
6864         case RTE_ETH_FILTER_GENERIC:
6865                 if (filter_op != RTE_ETH_FILTER_GET) {
6866                         rte_errno = EINVAL;
6867                         return -rte_errno;
6868                 }
6869                 *(const void **)arg = &mlx5_flow_ops;
6870                 return 0;
6871         case RTE_ETH_FILTER_FDIR:
6872                 return flow_fdir_ctrl_func(dev, filter_op, arg);
6873         default:
6874                 DRV_LOG(ERR, "port %u filter type (%d) not supported",
6875                         dev->data->port_id, filter_type);
6876                 rte_errno = ENOTSUP;
6877                 return -rte_errno;
6878         }
6879         return 0;
6880 }
6881
6882 /**
6883  * Create the needed meter and suffix tables.
6884  *
6885  * @param[in] dev
6886  *   Pointer to Ethernet device.
6887  * @param[in] fm
6888  *   Pointer to the flow meter.
6889  *
6890  * @return
6891  *   Pointer to table set on success, NULL otherwise.
6892  */
6893 struct mlx5_meter_domains_infos *
6894 mlx5_flow_create_mtr_tbls(struct rte_eth_dev *dev,
6895                           const struct mlx5_flow_meter *fm)
6896 {
6897         const struct mlx5_flow_driver_ops *fops;
6898
6899         fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
6900         return fops->create_mtr_tbls(dev, fm);
6901 }
6902
6903 /**
6904  * Destroy the meter table set.
6905  *
6906  * @param[in] dev
6907  *   Pointer to Ethernet device.
6908  * @param[in] tbl
6909  *   Pointer to the meter table set.
6910  *
6911  * @return
6912  *   0 on success.
6913  */
6914 int
6915 mlx5_flow_destroy_mtr_tbls(struct rte_eth_dev *dev,
6916                            struct mlx5_meter_domains_infos *tbls)
6917 {
6918         const struct mlx5_flow_driver_ops *fops;
6919
6920         fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
6921         return fops->destroy_mtr_tbls(dev, tbls);
6922 }
6923
6924 /**
6925  * Create policer rules.
6926  *
6927  * @param[in] dev
6928  *   Pointer to Ethernet device.
6929  * @param[in] fm
6930  *   Pointer to flow meter structure.
6931  * @param[in] attr
6932  *   Pointer to flow attributes.
6933  *
6934  * @return
6935  *   0 on success, -1 otherwise.
6936  */
6937 int
6938 mlx5_flow_create_policer_rules(struct rte_eth_dev *dev,
6939                                struct mlx5_flow_meter *fm,
6940                                const struct rte_flow_attr *attr)
6941 {
6942         const struct mlx5_flow_driver_ops *fops;
6943
6944         fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
6945         return fops->create_policer_rules(dev, fm, attr);
6946 }
6947
6948 /**
6949  * Destroy policer rules.
6950  *
6951  * @param[in] fm
6952  *   Pointer to flow meter structure.
6953  * @param[in] attr
6954  *   Pointer to flow attributes.
6955  *
6956  * @return
6957  *   0 on success, -1 otherwise.
6958  */
6959 int
6960 mlx5_flow_destroy_policer_rules(struct rte_eth_dev *dev,
6961                                 struct mlx5_flow_meter *fm,
6962                                 const struct rte_flow_attr *attr)
6963 {
6964         const struct mlx5_flow_driver_ops *fops;
6965
6966         fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
6967         return fops->destroy_policer_rules(dev, fm, attr);
6968 }
6969
6970 /**
6971  * Allocate a counter.
6972  *
6973  * @param[in] dev
6974  *   Pointer to Ethernet device structure.
6975  *
6976  * @return
6977  *   Index to allocated counter  on success, 0 otherwise.
6978  */
6979 uint32_t
6980 mlx5_counter_alloc(struct rte_eth_dev *dev)
6981 {
6982         const struct mlx5_flow_driver_ops *fops;
6983         struct rte_flow_attr attr = { .transfer = 0 };
6984
6985         if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
6986                 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
6987                 return fops->counter_alloc(dev);
6988         }
6989         DRV_LOG(ERR,
6990                 "port %u counter allocate is not supported.",
6991                  dev->data->port_id);
6992         return 0;
6993 }
6994
6995 /**
6996  * Free a counter.
6997  *
6998  * @param[in] dev
6999  *   Pointer to Ethernet device structure.
7000  * @param[in] cnt
7001  *   Index to counter to be free.
7002  */
7003 void
7004 mlx5_counter_free(struct rte_eth_dev *dev, uint32_t cnt)
7005 {
7006         const struct mlx5_flow_driver_ops *fops;
7007         struct rte_flow_attr attr = { .transfer = 0 };
7008
7009         if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
7010                 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
7011                 fops->counter_free(dev, cnt);
7012                 return;
7013         }
7014         DRV_LOG(ERR,
7015                 "port %u counter free is not supported.",
7016                  dev->data->port_id);
7017 }
7018
7019 /**
7020  * Query counter statistics.
7021  *
7022  * @param[in] dev
7023  *   Pointer to Ethernet device structure.
7024  * @param[in] cnt
7025  *   Index to counter to query.
7026  * @param[in] clear
7027  *   Set to clear counter statistics.
7028  * @param[out] pkts
7029  *   The counter hits packets number to save.
7030  * @param[out] bytes
7031  *   The counter hits bytes number to save.
7032  *
7033  * @return
7034  *   0 on success, a negative errno value otherwise.
7035  */
7036 int
7037 mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt,
7038                    bool clear, uint64_t *pkts, uint64_t *bytes)
7039 {
7040         const struct mlx5_flow_driver_ops *fops;
7041         struct rte_flow_attr attr = { .transfer = 0 };
7042
7043         if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
7044                 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
7045                 return fops->counter_query(dev, cnt, clear, pkts, bytes);
7046         }
7047         DRV_LOG(ERR,
7048                 "port %u counter query is not supported.",
7049                  dev->data->port_id);
7050         return -ENOTSUP;
7051 }
7052
7053 /**
7054  * Allocate a new memory for the counter values wrapped by all the needed
7055  * management.
7056  *
7057  * @param[in] sh
7058  *   Pointer to mlx5_dev_ctx_shared object.
7059  *
7060  * @return
7061  *   0 on success, a negative errno value otherwise.
7062  */
7063 static int
7064 mlx5_flow_create_counter_stat_mem_mng(struct mlx5_dev_ctx_shared *sh)
7065 {
7066         struct mlx5_devx_mkey_attr mkey_attr;
7067         struct mlx5_counter_stats_mem_mng *mem_mng;
7068         volatile struct flow_counter_stats *raw_data;
7069         int raws_n = MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES;
7070         int size = (sizeof(struct flow_counter_stats) *
7071                         MLX5_COUNTERS_PER_POOL +
7072                         sizeof(struct mlx5_counter_stats_raw)) * raws_n +
7073                         sizeof(struct mlx5_counter_stats_mem_mng);
7074         size_t pgsize = rte_mem_page_size();
7075         uint8_t *mem;
7076         int i;
7077
7078         if (pgsize == (size_t)-1) {
7079                 DRV_LOG(ERR, "Failed to get mem page size");
7080                 rte_errno = ENOMEM;
7081                 return -ENOMEM;
7082         }
7083         mem = mlx5_malloc(MLX5_MEM_ZERO, size, pgsize, SOCKET_ID_ANY);
7084         if (!mem) {
7085                 rte_errno = ENOMEM;
7086                 return -ENOMEM;
7087         }
7088         mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1;
7089         size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n;
7090         mem_mng->umem = mlx5_glue->devx_umem_reg(sh->ctx, mem, size,
7091                                                  IBV_ACCESS_LOCAL_WRITE);
7092         if (!mem_mng->umem) {
7093                 rte_errno = errno;
7094                 mlx5_free(mem);
7095                 return -rte_errno;
7096         }
7097         mkey_attr.addr = (uintptr_t)mem;
7098         mkey_attr.size = size;
7099         mkey_attr.umem_id = mlx5_os_get_umem_id(mem_mng->umem);
7100         mkey_attr.pd = sh->pdn;
7101         mkey_attr.log_entity_size = 0;
7102         mkey_attr.pg_access = 0;
7103         mkey_attr.klm_array = NULL;
7104         mkey_attr.klm_num = 0;
7105         mkey_attr.relaxed_ordering = sh->cmng.relaxed_ordering;
7106         mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);
7107         if (!mem_mng->dm) {
7108                 mlx5_glue->devx_umem_dereg(mem_mng->umem);
7109                 rte_errno = errno;
7110                 mlx5_free(mem);
7111                 return -rte_errno;
7112         }
7113         mem_mng->raws = (struct mlx5_counter_stats_raw *)(mem + size);
7114         raw_data = (volatile struct flow_counter_stats *)mem;
7115         for (i = 0; i < raws_n; ++i) {
7116                 mem_mng->raws[i].mem_mng = mem_mng;
7117                 mem_mng->raws[i].data = raw_data + i * MLX5_COUNTERS_PER_POOL;
7118         }
7119         for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i)
7120                 LIST_INSERT_HEAD(&sh->cmng.free_stat_raws,
7121                                  mem_mng->raws + MLX5_CNT_CONTAINER_RESIZE + i,
7122                                  next);
7123         LIST_INSERT_HEAD(&sh->cmng.mem_mngs, mem_mng, next);
7124         sh->cmng.mem_mng = mem_mng;
7125         return 0;
7126 }
7127
7128 /**
7129  * Set the statistic memory to the new counter pool.
7130  *
7131  * @param[in] sh
7132  *   Pointer to mlx5_dev_ctx_shared object.
7133  * @param[in] pool
7134  *   Pointer to the pool to set the statistic memory.
7135  *
7136  * @return
7137  *   0 on success, a negative errno value otherwise.
7138  */
7139 static int
7140 mlx5_flow_set_counter_stat_mem(struct mlx5_dev_ctx_shared *sh,
7141                                struct mlx5_flow_counter_pool *pool)
7142 {
7143         struct mlx5_flow_counter_mng *cmng = &sh->cmng;
7144         /* Resize statistic memory once used out. */
7145         if (!(pool->index % MLX5_CNT_CONTAINER_RESIZE) &&
7146             mlx5_flow_create_counter_stat_mem_mng(sh)) {
7147                 DRV_LOG(ERR, "Cannot resize counter stat mem.");
7148                 return -1;
7149         }
7150         rte_spinlock_lock(&pool->sl);
7151         pool->raw = cmng->mem_mng->raws + pool->index %
7152                     MLX5_CNT_CONTAINER_RESIZE;
7153         rte_spinlock_unlock(&pool->sl);
7154         pool->raw_hw = NULL;
7155         return 0;
7156 }
7157
7158 #define MLX5_POOL_QUERY_FREQ_US 1000000
7159
7160 /**
7161  * Set the periodic procedure for triggering asynchronous batch queries for all
7162  * the counter pools.
7163  *
7164  * @param[in] sh
7165  *   Pointer to mlx5_dev_ctx_shared object.
7166  */
7167 void
7168 mlx5_set_query_alarm(struct mlx5_dev_ctx_shared *sh)
7169 {
7170         uint32_t pools_n, us;
7171
7172         pools_n = __atomic_load_n(&sh->cmng.n_valid, __ATOMIC_RELAXED);
7173         us = MLX5_POOL_QUERY_FREQ_US / pools_n;
7174         DRV_LOG(DEBUG, "Set alarm for %u pools each %u us", pools_n, us);
7175         if (rte_eal_alarm_set(us, mlx5_flow_query_alarm, sh)) {
7176                 sh->cmng.query_thread_on = 0;
7177                 DRV_LOG(ERR, "Cannot reinitialize query alarm");
7178         } else {
7179                 sh->cmng.query_thread_on = 1;
7180         }
7181 }
7182
7183 /**
7184  * The periodic procedure for triggering asynchronous batch queries for all the
7185  * counter pools. This function is probably called by the host thread.
7186  *
7187  * @param[in] arg
7188  *   The parameter for the alarm process.
7189  */
7190 void
7191 mlx5_flow_query_alarm(void *arg)
7192 {
7193         struct mlx5_dev_ctx_shared *sh = arg;
7194         int ret;
7195         uint16_t pool_index = sh->cmng.pool_index;
7196         struct mlx5_flow_counter_mng *cmng = &sh->cmng;
7197         struct mlx5_flow_counter_pool *pool;
7198         uint16_t n_valid;
7199
7200         if (sh->cmng.pending_queries >= MLX5_MAX_PENDING_QUERIES)
7201                 goto set_alarm;
7202         rte_spinlock_lock(&cmng->pool_update_sl);
7203         pool = cmng->pools[pool_index];
7204         n_valid = cmng->n_valid;
7205         rte_spinlock_unlock(&cmng->pool_update_sl);
7206         /* Set the statistic memory to the new created pool. */
7207         if ((!pool->raw && mlx5_flow_set_counter_stat_mem(sh, pool)))
7208                 goto set_alarm;
7209         if (pool->raw_hw)
7210                 /* There is a pool query in progress. */
7211                 goto set_alarm;
7212         pool->raw_hw =
7213                 LIST_FIRST(&sh->cmng.free_stat_raws);
7214         if (!pool->raw_hw)
7215                 /* No free counter statistics raw memory. */
7216                 goto set_alarm;
7217         /*
7218          * Identify the counters released between query trigger and query
7219          * handle more efficiently. The counter released in this gap period
7220          * should wait for a new round of query as the new arrived packets
7221          * will not be taken into account.
7222          */
7223         pool->query_gen++;
7224         ret = mlx5_devx_cmd_flow_counter_query(pool->min_dcs, 0,
7225                                                MLX5_COUNTERS_PER_POOL,
7226                                                NULL, NULL,
7227                                                pool->raw_hw->mem_mng->dm->id,
7228                                                (void *)(uintptr_t)
7229                                                pool->raw_hw->data,
7230                                                sh->devx_comp,
7231                                                (uint64_t)(uintptr_t)pool);
7232         if (ret) {
7233                 DRV_LOG(ERR, "Failed to trigger asynchronous query for dcs ID"
7234                         " %d", pool->min_dcs->id);
7235                 pool->raw_hw = NULL;
7236                 goto set_alarm;
7237         }
7238         LIST_REMOVE(pool->raw_hw, next);
7239         sh->cmng.pending_queries++;
7240         pool_index++;
7241         if (pool_index >= n_valid)
7242                 pool_index = 0;
7243 set_alarm:
7244         sh->cmng.pool_index = pool_index;
7245         mlx5_set_query_alarm(sh);
7246 }
7247
7248 /**
7249  * Check and callback event for new aged flow in the counter pool
7250  *
7251  * @param[in] sh
7252  *   Pointer to mlx5_dev_ctx_shared object.
7253  * @param[in] pool
7254  *   Pointer to Current counter pool.
7255  */
7256 static void
7257 mlx5_flow_aging_check(struct mlx5_dev_ctx_shared *sh,
7258                    struct mlx5_flow_counter_pool *pool)
7259 {
7260         struct mlx5_priv *priv;
7261         struct mlx5_flow_counter *cnt;
7262         struct mlx5_age_info *age_info;
7263         struct mlx5_age_param *age_param;
7264         struct mlx5_counter_stats_raw *cur = pool->raw_hw;
7265         struct mlx5_counter_stats_raw *prev = pool->raw;
7266         const uint64_t curr_time = MLX5_CURR_TIME_SEC;
7267         const uint32_t time_delta = curr_time - pool->time_of_last_age_check;
7268         uint16_t expected = AGE_CANDIDATE;
7269         uint32_t i;
7270
7271         pool->time_of_last_age_check = curr_time;
7272         for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
7273                 cnt = MLX5_POOL_GET_CNT(pool, i);
7274                 age_param = MLX5_CNT_TO_AGE(cnt);
7275                 if (__atomic_load_n(&age_param->state,
7276                                     __ATOMIC_RELAXED) != AGE_CANDIDATE)
7277                         continue;
7278                 if (cur->data[i].hits != prev->data[i].hits) {
7279                         __atomic_store_n(&age_param->sec_since_last_hit, 0,
7280                                          __ATOMIC_RELAXED);
7281                         continue;
7282                 }
7283                 if (__atomic_add_fetch(&age_param->sec_since_last_hit,
7284                                        time_delta,
7285                                        __ATOMIC_RELAXED) <= age_param->timeout)
7286                         continue;
7287                 /**
7288                  * Hold the lock first, or if between the
7289                  * state AGE_TMOUT and tailq operation the
7290                  * release happened, the release procedure
7291                  * may delete a non-existent tailq node.
7292                  */
7293                 priv = rte_eth_devices[age_param->port_id].data->dev_private;
7294                 age_info = GET_PORT_AGE_INFO(priv);
7295                 rte_spinlock_lock(&age_info->aged_sl);
7296                 if (__atomic_compare_exchange_n(&age_param->state, &expected,
7297                                                 AGE_TMOUT, false,
7298                                                 __ATOMIC_RELAXED,
7299                                                 __ATOMIC_RELAXED)) {
7300                         TAILQ_INSERT_TAIL(&age_info->aged_counters, cnt, next);
7301                         MLX5_AGE_SET(age_info, MLX5_AGE_EVENT_NEW);
7302                 }
7303                 rte_spinlock_unlock(&age_info->aged_sl);
7304         }
7305         for (i = 0; i < sh->max_port; i++) {
7306                 age_info = &sh->port[i].age_info;
7307                 if (!MLX5_AGE_GET(age_info, MLX5_AGE_EVENT_NEW))
7308                         continue;
7309                 if (MLX5_AGE_GET(age_info, MLX5_AGE_TRIGGER))
7310                         rte_eth_dev_callback_process
7311                                 (&rte_eth_devices[sh->port[i].devx_ih_port_id],
7312                                 RTE_ETH_EVENT_FLOW_AGED, NULL);
7313                 age_info->flags = 0;
7314         }
7315 }
7316
7317 /**
7318  * Handler for the HW respond about ready values from an asynchronous batch
7319  * query. This function is probably called by the host thread.
7320  *
7321  * @param[in] sh
7322  *   The pointer to the shared device context.
7323  * @param[in] async_id
7324  *   The Devx async ID.
7325  * @param[in] status
7326  *   The status of the completion.
7327  */
7328 void
7329 mlx5_flow_async_pool_query_handle(struct mlx5_dev_ctx_shared *sh,
7330                                   uint64_t async_id, int status)
7331 {
7332         struct mlx5_flow_counter_pool *pool =
7333                 (struct mlx5_flow_counter_pool *)(uintptr_t)async_id;
7334         struct mlx5_counter_stats_raw *raw_to_free;
7335         uint8_t query_gen = pool->query_gen ^ 1;
7336         struct mlx5_flow_counter_mng *cmng = &sh->cmng;
7337         enum mlx5_counter_type cnt_type =
7338                 pool->is_aged ? MLX5_COUNTER_TYPE_AGE :
7339                                 MLX5_COUNTER_TYPE_ORIGIN;
7340
7341         if (unlikely(status)) {
7342                 raw_to_free = pool->raw_hw;
7343         } else {
7344                 raw_to_free = pool->raw;
7345                 if (pool->is_aged)
7346                         mlx5_flow_aging_check(sh, pool);
7347                 rte_spinlock_lock(&pool->sl);
7348                 pool->raw = pool->raw_hw;
7349                 rte_spinlock_unlock(&pool->sl);
7350                 /* Be sure the new raw counters data is updated in memory. */
7351                 rte_io_wmb();
7352                 if (!TAILQ_EMPTY(&pool->counters[query_gen])) {
7353                         rte_spinlock_lock(&cmng->csl[cnt_type]);
7354                         TAILQ_CONCAT(&cmng->counters[cnt_type],
7355                                      &pool->counters[query_gen], next);
7356                         rte_spinlock_unlock(&cmng->csl[cnt_type]);
7357                 }
7358         }
7359         LIST_INSERT_HEAD(&sh->cmng.free_stat_raws, raw_to_free, next);
7360         pool->raw_hw = NULL;
7361         sh->cmng.pending_queries--;
7362 }
7363
7364 static const struct mlx5_flow_tbl_data_entry  *
7365 tunnel_mark_decode(struct rte_eth_dev *dev, uint32_t mark)
7366 {
7367         struct mlx5_priv *priv = dev->data->dev_private;
7368         struct mlx5_dev_ctx_shared *sh = priv->sh;
7369         struct mlx5_hlist_entry *he;
7370         union tunnel_offload_mark mbits = { .val = mark };
7371         union mlx5_flow_tbl_key table_key = {
7372                 {
7373                         .table_id = tunnel_id_to_flow_tbl(mbits.table_id),
7374                         .dummy = 0,
7375                         .domain = !!mbits.transfer,
7376                         .direction = 0,
7377                 }
7378         };
7379         he = mlx5_hlist_lookup(sh->flow_tbls, table_key.v64, NULL);
7380         return he ?
7381                container_of(he, struct mlx5_flow_tbl_data_entry, entry) : NULL;
7382 }
7383
7384 static uint32_t
7385 tunnel_flow_group_to_flow_table(struct rte_eth_dev *dev,
7386                                 const struct mlx5_flow_tunnel *tunnel,
7387                                 uint32_t group, uint32_t *table,
7388                                 struct rte_flow_error *error)
7389 {
7390         struct mlx5_priv *priv = dev->data->dev_private;
7391         struct mlx5_hlist_entry *he;
7392         struct tunnel_tbl_entry *tte;
7393         union tunnel_tbl_key key = {
7394                 .tunnel_id = tunnel ? tunnel->tunnel_id : 0,
7395                 .group = group
7396         };
7397         struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
7398         struct mlx5_hlist *group_hash;
7399
7400         group_hash = tunnel ? tunnel->groups : thub->groups;
7401         he = mlx5_hlist_lookup(group_hash, key.val, NULL);
7402         if (!he) {
7403                 tte = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO,
7404                                   sizeof(*tte), 0,
7405                                   SOCKET_ID_ANY);
7406                 if (!tte)
7407                         goto err;
7408                 tte->hash.key = key.val;
7409                 mlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
7410                                   &tte->flow_table);
7411                 if (tte->flow_table >= MLX5_MAX_TABLES) {
7412                         DRV_LOG(ERR, "Tunnel TBL ID %d exceed max limit.",
7413                                 tte->flow_table);
7414                         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TNL_TBL_ID],
7415                                         tte->flow_table);
7416                         goto err;
7417                 } else if (!tte->flow_table) {
7418                         goto err;
7419                 }
7420                 tte->flow_table = tunnel_id_to_flow_tbl(tte->flow_table);
7421                 mlx5_hlist_insert(group_hash, &tte->hash);
7422         } else {
7423                 tte = container_of(he, typeof(*tte), hash);
7424         }
7425         *table = tte->flow_table;
7426         DRV_LOG(DEBUG, "port %u tunnel %u group=%#x table=%#x",
7427                 dev->data->port_id, key.tunnel_id, group, *table);
7428         return 0;
7429
7430 err:
7431         if (tte)
7432                 mlx5_free(tte);
7433         return rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
7434                                   NULL, "tunnel group index not supported");
7435 }
7436
7437 static int
7438 flow_group_to_table(uint32_t port_id, uint32_t group, uint32_t *table,
7439                     struct flow_grp_info grp_info, struct rte_flow_error *error)
7440 {
7441         if (grp_info.transfer && grp_info.external && grp_info.fdb_def_rule) {
7442                 if (group == UINT32_MAX)
7443                         return rte_flow_error_set
7444                                                 (error, EINVAL,
7445                                                  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
7446                                                  NULL,
7447                                                  "group index not supported");
7448                 *table = group + 1;
7449         } else {
7450                 *table = group;
7451         }
7452         DRV_LOG(DEBUG, "port %u group=%#x table=%#x", port_id, group, *table);
7453         return 0;
7454 }
7455
7456 /**
7457  * Translate the rte_flow group index to HW table value.
7458  *
7459  * If tunnel offload is disabled, all group ids converted to flow table
7460  * id using the standard method.
7461  * If tunnel offload is enabled, group id can be converted using the
7462  * standard or tunnel conversion method. Group conversion method
7463  * selection depends on flags in `grp_info` parameter:
7464  * - Internal (grp_info.external == 0) groups conversion uses the
7465  *   standard method.
7466  * - Group ids in JUMP action converted with the tunnel conversion.
7467  * - Group id in rule attribute conversion depends on a rule type and
7468  *   group id value:
7469  *   ** non zero group attributes converted with the tunnel method
7470  *   ** zero group attribute in non-tunnel rule is converted using the
7471  *      standard method - there's only one root table
7472  *   ** zero group attribute in steer tunnel rule is converted with the
7473  *      standard method - single root table
7474  *   ** zero group attribute in match tunnel rule is a special OvS
7475  *      case: that value is used for portability reasons. That group
7476  *      id is converted with the tunnel conversion method.
7477  *
7478  * @param[in] dev
7479  *   Port device
7480  * @param[in] tunnel
7481  *   PMD tunnel offload object
7482  * @param[in] group
7483  *   rte_flow group index value.
7484  * @param[out] table
7485  *   HW table value.
7486  * @param[in] grp_info
7487  *   flags used for conversion
7488  * @param[out] error
7489  *   Pointer to error structure.
7490  *
7491  * @return
7492  *   0 on success, a negative errno value otherwise and rte_errno is set.
7493  */
7494 int
7495 mlx5_flow_group_to_table(struct rte_eth_dev *dev,
7496                          const struct mlx5_flow_tunnel *tunnel,
7497                          uint32_t group, uint32_t *table,
7498                          struct flow_grp_info grp_info,
7499                          struct rte_flow_error *error)
7500 {
7501         int ret;
7502         bool standard_translation;
7503
7504         if (grp_info.external && group < MLX5_MAX_TABLES_EXTERNAL)
7505                 group *= MLX5_FLOW_TABLE_FACTOR;
7506         if (is_tunnel_offload_active(dev)) {
7507                 standard_translation = !grp_info.external ||
7508                                         grp_info.std_tbl_fix;
7509         } else {
7510                 standard_translation = true;
7511         }
7512         DRV_LOG(DEBUG,
7513                 "port %u group=%#x transfer=%d external=%d fdb_def_rule=%d translate=%s",
7514                 dev->data->port_id, group, grp_info.transfer,
7515                 grp_info.external, grp_info.fdb_def_rule,
7516                 standard_translation ? "STANDARD" : "TUNNEL");
7517         if (standard_translation)
7518                 ret = flow_group_to_table(dev->data->port_id, group, table,
7519                                           grp_info, error);
7520         else
7521                 ret = tunnel_flow_group_to_flow_table(dev, tunnel, group,
7522                                                       table, error);
7523
7524         return ret;
7525 }
7526
7527 /**
7528  * Discover availability of metadata reg_c's.
7529  *
7530  * Iteratively use test flows to check availability.
7531  *
7532  * @param[in] dev
7533  *   Pointer to the Ethernet device structure.
7534  *
7535  * @return
7536  *   0 on success, a negative errno value otherwise and rte_errno is set.
7537  */
7538 int
7539 mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev)
7540 {
7541         struct mlx5_priv *priv = dev->data->dev_private;
7542         struct mlx5_dev_config *config = &priv->config;
7543         enum modify_reg idx;
7544         int n = 0;
7545
7546         /* reg_c[0] and reg_c[1] are reserved. */
7547         config->flow_mreg_c[n++] = REG_C_0;
7548         config->flow_mreg_c[n++] = REG_C_1;
7549         /* Discover availability of other reg_c's. */
7550         for (idx = REG_C_2; idx <= REG_C_7; ++idx) {
7551                 struct rte_flow_attr attr = {
7552                         .group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
7553                         .priority = MLX5_FLOW_PRIO_RSVD,
7554                         .ingress = 1,
7555                 };
7556                 struct rte_flow_item items[] = {
7557                         [0] = {
7558                                 .type = RTE_FLOW_ITEM_TYPE_END,
7559                         },
7560                 };
7561                 struct rte_flow_action actions[] = {
7562                         [0] = {
7563                                 .type = (enum rte_flow_action_type)
7564                                         MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
7565                                 .conf = &(struct mlx5_flow_action_copy_mreg){
7566                                         .src = REG_C_1,
7567                                         .dst = idx,
7568                                 },
7569                         },
7570                         [1] = {
7571                                 .type = RTE_FLOW_ACTION_TYPE_JUMP,
7572                                 .conf = &(struct rte_flow_action_jump){
7573                                         .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP,
7574                                 },
7575                         },
7576                         [2] = {
7577                                 .type = RTE_FLOW_ACTION_TYPE_END,
7578                         },
7579                 };
7580                 uint32_t flow_idx;
7581                 struct rte_flow *flow;
7582                 struct rte_flow_error error;
7583
7584                 if (!config->dv_flow_en)
7585                         break;
7586                 /* Create internal flow, validation skips copy action. */
7587                 flow_idx = flow_list_create(dev, NULL, &attr, items,
7588                                             actions, false, &error);
7589                 flow = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_RTE_FLOW],
7590                                       flow_idx);
7591                 if (!flow)
7592                         continue;
7593                 if (dev->data->dev_started || !flow_drv_apply(dev, flow, NULL))
7594                         config->flow_mreg_c[n++] = idx;
7595                 flow_list_destroy(dev, NULL, flow_idx);
7596         }
7597         for (; n < MLX5_MREG_C_NUM; ++n)
7598                 config->flow_mreg_c[n] = REG_NON;
7599         return 0;
7600 }
7601
7602 /**
7603  * Dump flow raw hw data to file
7604  *
7605  * @param[in] dev
7606  *    The pointer to Ethernet device.
7607  * @param[in] file
7608  *   A pointer to a file for output.
7609  * @param[out] error
7610  *   Perform verbose error reporting if not NULL. PMDs initialize this
7611  *   structure in case of error only.
7612  * @return
7613  *   0 on success, a nagative value otherwise.
7614  */
7615 int
7616 mlx5_flow_dev_dump(struct rte_eth_dev *dev,
7617                    FILE *file,
7618                    struct rte_flow_error *error __rte_unused)
7619 {
7620         struct mlx5_priv *priv = dev->data->dev_private;
7621         struct mlx5_dev_ctx_shared *sh = priv->sh;
7622
7623         if (!priv->config.dv_flow_en) {
7624                 if (fputs("device dv flow disabled\n", file) <= 0)
7625                         return -errno;
7626                 return -ENOTSUP;
7627         }
7628         return mlx5_devx_cmd_flow_dump(sh->fdb_domain, sh->rx_domain,
7629                                        sh->tx_domain, file);
7630 }
7631
7632 /**
7633  * Get aged-out flows.
7634  *
7635  * @param[in] dev
7636  *   Pointer to the Ethernet device structure.
7637  * @param[in] context
7638  *   The address of an array of pointers to the aged-out flows contexts.
7639  * @param[in] nb_countexts
7640  *   The length of context array pointers.
7641  * @param[out] error
7642  *   Perform verbose error reporting if not NULL. Initialized in case of
7643  *   error only.
7644  *
7645  * @return
7646  *   how many contexts get in success, otherwise negative errno value.
7647  *   if nb_contexts is 0, return the amount of all aged contexts.
7648  *   if nb_contexts is not 0 , return the amount of aged flows reported
7649  *   in the context array.
7650  */
7651 int
7652 mlx5_flow_get_aged_flows(struct rte_eth_dev *dev, void **contexts,
7653                         uint32_t nb_contexts, struct rte_flow_error *error)
7654 {
7655         const struct mlx5_flow_driver_ops *fops;
7656         struct rte_flow_attr attr = { .transfer = 0 };
7657
7658         if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
7659                 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
7660                 return fops->get_aged_flows(dev, contexts, nb_contexts,
7661                                                     error);
7662         }
7663         DRV_LOG(ERR,
7664                 "port %u get aged flows is not supported.",
7665                  dev->data->port_id);
7666         return -ENOTSUP;
7667 }
7668
7669 /* Wrapper for driver action_validate op callback */
7670 static int
7671 flow_drv_action_validate(struct rte_eth_dev *dev,
7672                          const struct rte_flow_shared_action_conf *conf,
7673                          const struct rte_flow_action *action,
7674                          const struct mlx5_flow_driver_ops *fops,
7675                          struct rte_flow_error *error)
7676 {
7677         static const char err_msg[] = "shared action validation unsupported";
7678
7679         if (!fops->action_validate) {
7680                 DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
7681                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
7682                                    NULL, err_msg);
7683                 return -rte_errno;
7684         }
7685         return fops->action_validate(dev, conf, action, error);
7686 }
7687
7688 /**
7689  * Destroys the shared action by handle.
7690  *
7691  * @param dev
7692  *   Pointer to Ethernet device structure.
7693  * @param[in] action
7694  *   Handle for the shared action to be destroyed.
7695  * @param[out] error
7696  *   Perform verbose error reporting if not NULL. PMDs initialize this
7697  *   structure in case of error only.
7698  *
7699  * @return
7700  *   0 on success, a negative errno value otherwise and rte_errno is set.
7701  *
7702  * @note: wrapper for driver action_create op callback.
7703  */
7704 static int
7705 mlx5_shared_action_destroy(struct rte_eth_dev *dev,
7706                            struct rte_flow_shared_action *action,
7707                            struct rte_flow_error *error)
7708 {
7709         static const char err_msg[] = "shared action destruction unsupported";
7710         struct rte_flow_attr attr = { .transfer = 0 };
7711         const struct mlx5_flow_driver_ops *fops =
7712                         flow_get_drv_ops(flow_get_drv_type(dev, &attr));
7713
7714         if (!fops->action_destroy) {
7715                 DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
7716                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
7717                                    NULL, err_msg);
7718                 return -rte_errno;
7719         }
7720         return fops->action_destroy(dev, action, error);
7721 }
7722
7723 /* Wrapper for driver action_destroy op callback */
7724 static int
7725 flow_drv_action_update(struct rte_eth_dev *dev,
7726                        struct rte_flow_shared_action *action,
7727                        const void *action_conf,
7728                        const struct mlx5_flow_driver_ops *fops,
7729                        struct rte_flow_error *error)
7730 {
7731         static const char err_msg[] = "shared action update unsupported";
7732
7733         if (!fops->action_update) {
7734                 DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
7735                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
7736                                    NULL, err_msg);
7737                 return -rte_errno;
7738         }
7739         return fops->action_update(dev, action, action_conf, error);
7740 }
7741
7742 /**
7743  * Create shared action for reuse in multiple flow rules.
7744  *
7745  * @param dev
7746  *   Pointer to Ethernet device structure.
7747  * @param[in] action
7748  *   Action configuration for shared action creation.
7749  * @param[out] error
7750  *   Perform verbose error reporting if not NULL. PMDs initialize this
7751  *   structure in case of error only.
7752  * @return
7753  *   A valid handle in case of success, NULL otherwise and rte_errno is set.
7754  */
7755 static struct rte_flow_shared_action *
7756 mlx5_shared_action_create(struct rte_eth_dev *dev,
7757                           const struct rte_flow_shared_action_conf *conf,
7758                           const struct rte_flow_action *action,
7759                           struct rte_flow_error *error)
7760 {
7761         static const char err_msg[] = "shared action creation unsupported";
7762         struct rte_flow_attr attr = { .transfer = 0 };
7763         const struct mlx5_flow_driver_ops *fops =
7764                         flow_get_drv_ops(flow_get_drv_type(dev, &attr));
7765
7766         if (flow_drv_action_validate(dev, conf, action, fops, error))
7767                 return NULL;
7768         if (!fops->action_create) {
7769                 DRV_LOG(ERR, "port %u %s.", dev->data->port_id, err_msg);
7770                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
7771                                    NULL, err_msg);
7772                 return NULL;
7773         }
7774         return fops->action_create(dev, conf, action, error);
7775 }
7776
7777 /**
7778  * Updates inplace the shared action configuration pointed by *action* handle
7779  * with the configuration provided as *action* argument.
7780  * The update of the shared action configuration effects all flow rules reusing
7781  * the action via handle.
7782  *
7783  * @param dev
7784  *   Pointer to Ethernet device structure.
7785  * @param[in] shared_action
7786  *   Handle for the shared action to be updated.
7787  * @param[in] action
7788  *   Action specification used to modify the action pointed by handle.
7789  *   *action* should be of same type with the action pointed by the *action*
7790  *   handle argument, otherwise considered as invalid.
7791  * @param[out] error
7792  *   Perform verbose error reporting if not NULL. PMDs initialize this
7793  *   structure in case of error only.
7794  *
7795  * @return
7796  *   0 on success, a negative errno value otherwise and rte_errno is set.
7797  */
7798 static int
7799 mlx5_shared_action_update(struct rte_eth_dev *dev,
7800                 struct rte_flow_shared_action *shared_action,
7801                 const struct rte_flow_action *action,
7802                 struct rte_flow_error *error)
7803 {
7804         struct rte_flow_attr attr = { .transfer = 0 };
7805         const struct mlx5_flow_driver_ops *fops =
7806                         flow_get_drv_ops(flow_get_drv_type(dev, &attr));
7807         int ret;
7808
7809         switch (shared_action->type) {
7810         case MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS:
7811                 if (action->type != RTE_FLOW_ACTION_TYPE_RSS) {
7812                         return rte_flow_error_set(error, EINVAL,
7813                                                   RTE_FLOW_ERROR_TYPE_ACTION,
7814                                                   NULL,
7815                                                   "update action type invalid");
7816                 }
7817                 ret = flow_drv_action_validate(dev, NULL, action, fops, error);
7818                 if (ret)
7819                         return ret;
7820                 return flow_drv_action_update(dev, shared_action, action->conf,
7821                                               fops, error);
7822         default:
7823                 return rte_flow_error_set(error, ENOTSUP,
7824                                           RTE_FLOW_ERROR_TYPE_ACTION,
7825                                           NULL,
7826                                           "action type not supported");
7827         }
7828 }
7829
7830 /**
7831  * Query the shared action by handle.
7832  *
7833  * This function allows retrieving action-specific data such as counters.
7834  * Data is gathered by special action which may be present/referenced in
7835  * more than one flow rule definition.
7836  *
7837  * \see RTE_FLOW_ACTION_TYPE_COUNT
7838  *
7839  * @param dev
7840  *   Pointer to Ethernet device structure.
7841  * @param[in] action
7842  *   Handle for the shared action to query.
7843  * @param[in, out] data
7844  *   Pointer to storage for the associated query data type.
7845  * @param[out] error
7846  *   Perform verbose error reporting if not NULL. PMDs initialize this
7847  *   structure in case of error only.
7848  *
7849  * @return
7850  *   0 on success, a negative errno value otherwise and rte_errno is set.
7851  */
7852 static int
7853 mlx5_shared_action_query(struct rte_eth_dev *dev,
7854                          const struct rte_flow_shared_action *action,
7855                          void *data,
7856                          struct rte_flow_error *error)
7857 {
7858         (void)dev;
7859         switch (action->type) {
7860         case MLX5_RTE_FLOW_ACTION_TYPE_SHARED_RSS:
7861                 __atomic_load(&action->refcnt, (uint32_t *)data,
7862                               __ATOMIC_RELAXED);
7863                 return 0;
7864         default:
7865                 return rte_flow_error_set(error, ENOTSUP,
7866                                           RTE_FLOW_ERROR_TYPE_ACTION,
7867                                           NULL,
7868                                           "action type not supported");
7869         }
7870 }
7871
7872 /**
7873  * Destroy all shared actions.
7874  *
7875  * @param dev
7876  *   Pointer to Ethernet device.
7877  *
7878  * @return
7879  *   0 on success, a negative errno value otherwise and rte_errno is set.
7880  */
7881 int
7882 mlx5_shared_action_flush(struct rte_eth_dev *dev)
7883 {
7884         struct rte_flow_error error;
7885         struct mlx5_priv *priv = dev->data->dev_private;
7886         struct rte_flow_shared_action *action;
7887         int ret = 0;
7888
7889         while (!LIST_EMPTY(&priv->shared_actions)) {
7890                 action = LIST_FIRST(&priv->shared_actions);
7891                 ret = mlx5_shared_action_destroy(dev, action, &error);
7892         }
7893         return ret;
7894 }
7895
7896 static void
7897 mlx5_flow_tunnel_free(struct rte_eth_dev *dev,
7898                       struct mlx5_flow_tunnel *tunnel)
7899 {
7900         struct mlx5_priv *priv = dev->data->dev_private;
7901
7902         DRV_LOG(DEBUG, "port %u release pmd tunnel id=0x%x",
7903                 dev->data->port_id, tunnel->tunnel_id);
7904         RTE_VERIFY(!__atomic_load_n(&tunnel->refctn, __ATOMIC_RELAXED));
7905         LIST_REMOVE(tunnel, chain);
7906         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_TUNNEL_ID],
7907                         tunnel->tunnel_id);
7908         mlx5_hlist_destroy(tunnel->groups);
7909         mlx5_free(tunnel);
7910 }
7911
7912 static struct mlx5_flow_tunnel *
7913 mlx5_find_tunnel_id(struct rte_eth_dev *dev, uint32_t id)
7914 {
7915         struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
7916         struct mlx5_flow_tunnel *tun;
7917
7918         LIST_FOREACH(tun, &thub->tunnels, chain) {
7919                 if (tun->tunnel_id == id)
7920                         break;
7921         }
7922
7923         return tun;
7924 }
7925
7926 static struct mlx5_flow_tunnel *
7927 mlx5_flow_tunnel_allocate(struct rte_eth_dev *dev,
7928                           const struct rte_flow_tunnel *app_tunnel)
7929 {
7930         struct mlx5_priv *priv = dev->data->dev_private;
7931         struct mlx5_flow_tunnel *tunnel;
7932         uint32_t id;
7933
7934         mlx5_ipool_malloc(priv->sh->ipool[MLX5_IPOOL_RSS_EXPANTION_FLOW_ID],
7935                           &id);
7936         if (id >= MLX5_MAX_TUNNELS) {
7937                 mlx5_ipool_free(priv->sh->ipool
7938                                 [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);
7939                 DRV_LOG(ERR, "Tunnel ID %d exceed max limit.", id);
7940                 return NULL;
7941         } else if (!id) {
7942                 return NULL;
7943         }
7944         /**
7945          * mlx5 flow tunnel is an auxlilary data structure
7946          * It's not part of IO. No need to allocate it from
7947          * huge pages pools dedicated for IO
7948          */
7949         tunnel = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, sizeof(*tunnel),
7950                              0, SOCKET_ID_ANY);
7951         if (!tunnel) {
7952                 mlx5_ipool_free(priv->sh->ipool
7953                                 [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);
7954                 return NULL;
7955         }
7956         tunnel->groups = mlx5_hlist_create("tunnel groups", 1024, 0, 0,
7957                                            NULL, NULL, NULL);
7958         if (!tunnel->groups) {
7959                 mlx5_ipool_free(priv->sh->ipool
7960                                 [MLX5_IPOOL_RSS_EXPANTION_FLOW_ID], id);
7961                 mlx5_free(tunnel);
7962                 return NULL;
7963         }
7964         /* initiate new PMD tunnel */
7965         memcpy(&tunnel->app_tunnel, app_tunnel, sizeof(*app_tunnel));
7966         tunnel->tunnel_id = id;
7967         tunnel->action.type = (typeof(tunnel->action.type))
7968                               MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET;
7969         tunnel->action.conf = tunnel;
7970         tunnel->item.type = (typeof(tunnel->item.type))
7971                             MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL;
7972         tunnel->item.spec = tunnel;
7973         tunnel->item.last = NULL;
7974         tunnel->item.mask = NULL;
7975
7976         DRV_LOG(DEBUG, "port %u new pmd tunnel id=0x%x",
7977                 dev->data->port_id, tunnel->tunnel_id);
7978
7979         return tunnel;
7980 }
7981
7982 static int
7983 mlx5_get_flow_tunnel(struct rte_eth_dev *dev,
7984                      const struct rte_flow_tunnel *app_tunnel,
7985                      struct mlx5_flow_tunnel **tunnel)
7986 {
7987         int ret;
7988         struct mlx5_flow_tunnel_hub *thub = mlx5_tunnel_hub(dev);
7989         struct mlx5_flow_tunnel *tun;
7990
7991         LIST_FOREACH(tun, &thub->tunnels, chain) {
7992                 if (!memcmp(app_tunnel, &tun->app_tunnel,
7993                             sizeof(*app_tunnel))) {
7994                         *tunnel = tun;
7995                         ret = 0;
7996                         break;
7997                 }
7998         }
7999         if (!tun) {
8000                 tun = mlx5_flow_tunnel_allocate(dev, app_tunnel);
8001                 if (tun) {
8002                         LIST_INSERT_HEAD(&thub->tunnels, tun, chain);
8003                         *tunnel = tun;
8004                 } else {
8005                         ret = -ENOMEM;
8006                 }
8007         }
8008         if (tun)
8009                 __atomic_add_fetch(&tun->refctn, 1, __ATOMIC_RELAXED);
8010
8011         return ret;
8012 }
8013
8014 void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id)
8015 {
8016         struct mlx5_flow_tunnel_hub *thub = sh->tunnel_hub;
8017
8018         if (!thub)
8019                 return;
8020         if (!LIST_EMPTY(&thub->tunnels))
8021                 DRV_LOG(WARNING, "port %u tunnels present\n", port_id);
8022         mlx5_hlist_destroy(thub->groups);
8023         mlx5_free(thub);
8024 }
8025
8026 int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh)
8027 {
8028         int err;
8029         struct mlx5_flow_tunnel_hub *thub;
8030
8031         thub = mlx5_malloc(MLX5_MEM_SYS | MLX5_MEM_ZERO, sizeof(*thub),
8032                            0, SOCKET_ID_ANY);
8033         if (!thub)
8034                 return -ENOMEM;
8035         LIST_INIT(&thub->tunnels);
8036         thub->groups = mlx5_hlist_create("flow groups", MLX5_MAX_TABLES, 0,
8037                                          0, NULL, NULL, NULL);
8038         if (!thub->groups) {
8039                 err = -rte_errno;
8040                 goto err;
8041         }
8042         sh->tunnel_hub = thub;
8043
8044         return 0;
8045
8046 err:
8047         if (thub->groups)
8048                 mlx5_hlist_destroy(thub->groups);
8049         if (thub)
8050                 mlx5_free(thub);
8051         return err;
8052 }
8053
8054 #ifndef HAVE_MLX5DV_DR
8055 #define MLX5_DOMAIN_SYNC_FLOW ((1 << 0) | (1 << 1))
8056 #else
8057 #define MLX5_DOMAIN_SYNC_FLOW \
8058         (MLX5DV_DR_DOMAIN_SYNC_FLAGS_SW | MLX5DV_DR_DOMAIN_SYNC_FLAGS_HW)
8059 #endif
8060
8061 int rte_pmd_mlx5_sync_flow(uint16_t port_id, uint32_t domains)
8062 {
8063         struct rte_eth_dev *dev = &rte_eth_devices[port_id];
8064         const struct mlx5_flow_driver_ops *fops;
8065         int ret;
8066         struct rte_flow_attr attr = { .transfer = 0 };
8067
8068         fops = flow_get_drv_ops(flow_get_drv_type(dev, &attr));
8069         ret = fops->sync_domain(dev, domains, MLX5_DOMAIN_SYNC_FLOW);
8070         if (ret > 0)
8071                 ret = -ret;
8072         return ret;
8073 }