net/mlx5: optimize flow RSS struct
[dpdk.git] / drivers / net / mlx5 / mlx5_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  * Copyright 2016 Mellanox Technologies, Ltd
4  */
5
6 #include <netinet/in.h>
7 #include <sys/queue.h>
8 #include <stdalign.h>
9 #include <stdint.h>
10 #include <string.h>
11 #include <stdbool.h>
12
13 /* Verbs header. */
14 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
15 #ifdef PEDANTIC
16 #pragma GCC diagnostic ignored "-Wpedantic"
17 #endif
18 #include <infiniband/verbs.h>
19 #ifdef PEDANTIC
20 #pragma GCC diagnostic error "-Wpedantic"
21 #endif
22
23 #include <rte_common.h>
24 #include <rte_ether.h>
25 #include <rte_ethdev_driver.h>
26 #include <rte_flow.h>
27 #include <rte_flow_driver.h>
28 #include <rte_malloc.h>
29 #include <rte_ip.h>
30
31 #include <mlx5_glue.h>
32 #include <mlx5_devx_cmds.h>
33 #include <mlx5_prm.h>
34
35 #include "mlx5_defs.h"
36 #include "mlx5.h"
37 #include "mlx5_flow.h"
38 #include "mlx5_rxtx.h"
39
40 /* Dev ops structure defined in mlx5.c */
41 extern const struct eth_dev_ops mlx5_dev_ops;
42 extern const struct eth_dev_ops mlx5_dev_ops_isolate;
43
44 /** Device flow drivers. */
45 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
46 extern const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops;
47 #endif
48 extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops;
49
50 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops;
51
52 const struct mlx5_flow_driver_ops *flow_drv_ops[] = {
53         [MLX5_FLOW_TYPE_MIN] = &mlx5_flow_null_drv_ops,
54 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
55         [MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops,
56 #endif
57         [MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops,
58         [MLX5_FLOW_TYPE_MAX] = &mlx5_flow_null_drv_ops
59 };
60
61 enum mlx5_expansion {
62         MLX5_EXPANSION_ROOT,
63         MLX5_EXPANSION_ROOT_OUTER,
64         MLX5_EXPANSION_ROOT_ETH_VLAN,
65         MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN,
66         MLX5_EXPANSION_OUTER_ETH,
67         MLX5_EXPANSION_OUTER_ETH_VLAN,
68         MLX5_EXPANSION_OUTER_VLAN,
69         MLX5_EXPANSION_OUTER_IPV4,
70         MLX5_EXPANSION_OUTER_IPV4_UDP,
71         MLX5_EXPANSION_OUTER_IPV4_TCP,
72         MLX5_EXPANSION_OUTER_IPV6,
73         MLX5_EXPANSION_OUTER_IPV6_UDP,
74         MLX5_EXPANSION_OUTER_IPV6_TCP,
75         MLX5_EXPANSION_VXLAN,
76         MLX5_EXPANSION_VXLAN_GPE,
77         MLX5_EXPANSION_GRE,
78         MLX5_EXPANSION_MPLS,
79         MLX5_EXPANSION_ETH,
80         MLX5_EXPANSION_ETH_VLAN,
81         MLX5_EXPANSION_VLAN,
82         MLX5_EXPANSION_IPV4,
83         MLX5_EXPANSION_IPV4_UDP,
84         MLX5_EXPANSION_IPV4_TCP,
85         MLX5_EXPANSION_IPV6,
86         MLX5_EXPANSION_IPV6_UDP,
87         MLX5_EXPANSION_IPV6_TCP,
88 };
89
90 /** Supported expansion of items. */
91 static const struct rte_flow_expand_node mlx5_support_expansion[] = {
92         [MLX5_EXPANSION_ROOT] = {
93                 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
94                                                  MLX5_EXPANSION_IPV4,
95                                                  MLX5_EXPANSION_IPV6),
96                 .type = RTE_FLOW_ITEM_TYPE_END,
97         },
98         [MLX5_EXPANSION_ROOT_OUTER] = {
99                 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH,
100                                                  MLX5_EXPANSION_OUTER_IPV4,
101                                                  MLX5_EXPANSION_OUTER_IPV6),
102                 .type = RTE_FLOW_ITEM_TYPE_END,
103         },
104         [MLX5_EXPANSION_ROOT_ETH_VLAN] = {
105                 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH_VLAN),
106                 .type = RTE_FLOW_ITEM_TYPE_END,
107         },
108         [MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN] = {
109                 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH_VLAN),
110                 .type = RTE_FLOW_ITEM_TYPE_END,
111         },
112         [MLX5_EXPANSION_OUTER_ETH] = {
113                 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
114                                                  MLX5_EXPANSION_OUTER_IPV6,
115                                                  MLX5_EXPANSION_MPLS),
116                 .type = RTE_FLOW_ITEM_TYPE_ETH,
117                 .rss_types = 0,
118         },
119         [MLX5_EXPANSION_OUTER_ETH_VLAN] = {
120                 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_VLAN),
121                 .type = RTE_FLOW_ITEM_TYPE_ETH,
122                 .rss_types = 0,
123         },
124         [MLX5_EXPANSION_OUTER_VLAN] = {
125                 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
126                                                  MLX5_EXPANSION_OUTER_IPV6),
127                 .type = RTE_FLOW_ITEM_TYPE_VLAN,
128         },
129         [MLX5_EXPANSION_OUTER_IPV4] = {
130                 .next = RTE_FLOW_EXPAND_RSS_NEXT
131                         (MLX5_EXPANSION_OUTER_IPV4_UDP,
132                          MLX5_EXPANSION_OUTER_IPV4_TCP,
133                          MLX5_EXPANSION_GRE,
134                          MLX5_EXPANSION_IPV4,
135                          MLX5_EXPANSION_IPV6),
136                 .type = RTE_FLOW_ITEM_TYPE_IPV4,
137                 .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
138                         ETH_RSS_NONFRAG_IPV4_OTHER,
139         },
140         [MLX5_EXPANSION_OUTER_IPV4_UDP] = {
141                 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
142                                                  MLX5_EXPANSION_VXLAN_GPE),
143                 .type = RTE_FLOW_ITEM_TYPE_UDP,
144                 .rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
145         },
146         [MLX5_EXPANSION_OUTER_IPV4_TCP] = {
147                 .type = RTE_FLOW_ITEM_TYPE_TCP,
148                 .rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
149         },
150         [MLX5_EXPANSION_OUTER_IPV6] = {
151                 .next = RTE_FLOW_EXPAND_RSS_NEXT
152                         (MLX5_EXPANSION_OUTER_IPV6_UDP,
153                          MLX5_EXPANSION_OUTER_IPV6_TCP,
154                          MLX5_EXPANSION_IPV4,
155                          MLX5_EXPANSION_IPV6),
156                 .type = RTE_FLOW_ITEM_TYPE_IPV6,
157                 .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
158                         ETH_RSS_NONFRAG_IPV6_OTHER,
159         },
160         [MLX5_EXPANSION_OUTER_IPV6_UDP] = {
161                 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
162                                                  MLX5_EXPANSION_VXLAN_GPE),
163                 .type = RTE_FLOW_ITEM_TYPE_UDP,
164                 .rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
165         },
166         [MLX5_EXPANSION_OUTER_IPV6_TCP] = {
167                 .type = RTE_FLOW_ITEM_TYPE_TCP,
168                 .rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
169         },
170         [MLX5_EXPANSION_VXLAN] = {
171                 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
172                                                  MLX5_EXPANSION_IPV4,
173                                                  MLX5_EXPANSION_IPV6),
174                 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
175         },
176         [MLX5_EXPANSION_VXLAN_GPE] = {
177                 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
178                                                  MLX5_EXPANSION_IPV4,
179                                                  MLX5_EXPANSION_IPV6),
180                 .type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
181         },
182         [MLX5_EXPANSION_GRE] = {
183                 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4),
184                 .type = RTE_FLOW_ITEM_TYPE_GRE,
185         },
186         [MLX5_EXPANSION_MPLS] = {
187                 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
188                                                  MLX5_EXPANSION_IPV6),
189                 .type = RTE_FLOW_ITEM_TYPE_MPLS,
190         },
191         [MLX5_EXPANSION_ETH] = {
192                 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
193                                                  MLX5_EXPANSION_IPV6),
194                 .type = RTE_FLOW_ITEM_TYPE_ETH,
195         },
196         [MLX5_EXPANSION_ETH_VLAN] = {
197                 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VLAN),
198                 .type = RTE_FLOW_ITEM_TYPE_ETH,
199         },
200         [MLX5_EXPANSION_VLAN] = {
201                 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
202                                                  MLX5_EXPANSION_IPV6),
203                 .type = RTE_FLOW_ITEM_TYPE_VLAN,
204         },
205         [MLX5_EXPANSION_IPV4] = {
206                 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP,
207                                                  MLX5_EXPANSION_IPV4_TCP),
208                 .type = RTE_FLOW_ITEM_TYPE_IPV4,
209                 .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
210                         ETH_RSS_NONFRAG_IPV4_OTHER,
211         },
212         [MLX5_EXPANSION_IPV4_UDP] = {
213                 .type = RTE_FLOW_ITEM_TYPE_UDP,
214                 .rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
215         },
216         [MLX5_EXPANSION_IPV4_TCP] = {
217                 .type = RTE_FLOW_ITEM_TYPE_TCP,
218                 .rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
219         },
220         [MLX5_EXPANSION_IPV6] = {
221                 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP,
222                                                  MLX5_EXPANSION_IPV6_TCP),
223                 .type = RTE_FLOW_ITEM_TYPE_IPV6,
224                 .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
225                         ETH_RSS_NONFRAG_IPV6_OTHER,
226         },
227         [MLX5_EXPANSION_IPV6_UDP] = {
228                 .type = RTE_FLOW_ITEM_TYPE_UDP,
229                 .rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
230         },
231         [MLX5_EXPANSION_IPV6_TCP] = {
232                 .type = RTE_FLOW_ITEM_TYPE_TCP,
233                 .rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
234         },
235 };
236
237 static const struct rte_flow_ops mlx5_flow_ops = {
238         .validate = mlx5_flow_validate,
239         .create = mlx5_flow_create,
240         .destroy = mlx5_flow_destroy,
241         .flush = mlx5_flow_flush,
242         .isolate = mlx5_flow_isolate,
243         .query = mlx5_flow_query,
244         .dev_dump = mlx5_flow_dev_dump,
245 };
246
247 /* Convert FDIR request to Generic flow. */
248 struct mlx5_fdir {
249         struct rte_flow_attr attr;
250         struct rte_flow_item items[4];
251         struct rte_flow_item_eth l2;
252         struct rte_flow_item_eth l2_mask;
253         union {
254                 struct rte_flow_item_ipv4 ipv4;
255                 struct rte_flow_item_ipv6 ipv6;
256         } l3;
257         union {
258                 struct rte_flow_item_ipv4 ipv4;
259                 struct rte_flow_item_ipv6 ipv6;
260         } l3_mask;
261         union {
262                 struct rte_flow_item_udp udp;
263                 struct rte_flow_item_tcp tcp;
264         } l4;
265         union {
266                 struct rte_flow_item_udp udp;
267                 struct rte_flow_item_tcp tcp;
268         } l4_mask;
269         struct rte_flow_action actions[2];
270         struct rte_flow_action_queue queue;
271 };
272
273 /* Map of Verbs to Flow priority with 8 Verbs priorities. */
274 static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = {
275         { 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 },
276 };
277
278 /* Map of Verbs to Flow priority with 16 Verbs priorities. */
279 static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = {
280         { 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 },
281         { 9, 10, 11 }, { 12, 13, 14 },
282 };
283
284 /* Tunnel information. */
285 struct mlx5_flow_tunnel_info {
286         uint64_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */
287         uint32_t ptype; /**< Tunnel Ptype (see RTE_PTYPE_*). */
288 };
289
290 static struct mlx5_flow_tunnel_info tunnels_info[] = {
291         {
292                 .tunnel = MLX5_FLOW_LAYER_VXLAN,
293                 .ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP,
294         },
295         {
296                 .tunnel = MLX5_FLOW_LAYER_GENEVE,
297                 .ptype = RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L4_UDP,
298         },
299         {
300                 .tunnel = MLX5_FLOW_LAYER_VXLAN_GPE,
301                 .ptype = RTE_PTYPE_TUNNEL_VXLAN_GPE | RTE_PTYPE_L4_UDP,
302         },
303         {
304                 .tunnel = MLX5_FLOW_LAYER_GRE,
305                 .ptype = RTE_PTYPE_TUNNEL_GRE,
306         },
307         {
308                 .tunnel = MLX5_FLOW_LAYER_MPLS | MLX5_FLOW_LAYER_OUTER_L4_UDP,
309                 .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_UDP | RTE_PTYPE_L4_UDP,
310         },
311         {
312                 .tunnel = MLX5_FLOW_LAYER_MPLS,
313                 .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE,
314         },
315         {
316                 .tunnel = MLX5_FLOW_LAYER_NVGRE,
317                 .ptype = RTE_PTYPE_TUNNEL_NVGRE,
318         },
319         {
320                 .tunnel = MLX5_FLOW_LAYER_IPIP,
321                 .ptype = RTE_PTYPE_TUNNEL_IP,
322         },
323         {
324                 .tunnel = MLX5_FLOW_LAYER_IPV6_ENCAP,
325                 .ptype = RTE_PTYPE_TUNNEL_IP,
326         },
327         {
328                 .tunnel = MLX5_FLOW_LAYER_GTP,
329                 .ptype = RTE_PTYPE_TUNNEL_GTPU,
330         },
331 };
332
333 /**
334  * Translate tag ID to register.
335  *
336  * @param[in] dev
337  *   Pointer to the Ethernet device structure.
338  * @param[in] feature
339  *   The feature that request the register.
340  * @param[in] id
341  *   The request register ID.
342  * @param[out] error
343  *   Error description in case of any.
344  *
345  * @return
346  *   The request register on success, a negative errno
347  *   value otherwise and rte_errno is set.
348  */
349 int
350 mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
351                      enum mlx5_feature_name feature,
352                      uint32_t id,
353                      struct rte_flow_error *error)
354 {
355         struct mlx5_priv *priv = dev->data->dev_private;
356         struct mlx5_dev_config *config = &priv->config;
357         enum modify_reg start_reg;
358         bool skip_mtr_reg = false;
359
360         switch (feature) {
361         case MLX5_HAIRPIN_RX:
362                 return REG_B;
363         case MLX5_HAIRPIN_TX:
364                 return REG_A;
365         case MLX5_METADATA_RX:
366                 switch (config->dv_xmeta_en) {
367                 case MLX5_XMETA_MODE_LEGACY:
368                         return REG_B;
369                 case MLX5_XMETA_MODE_META16:
370                         return REG_C_0;
371                 case MLX5_XMETA_MODE_META32:
372                         return REG_C_1;
373                 }
374                 break;
375         case MLX5_METADATA_TX:
376                 return REG_A;
377         case MLX5_METADATA_FDB:
378                 switch (config->dv_xmeta_en) {
379                 case MLX5_XMETA_MODE_LEGACY:
380                         return REG_NONE;
381                 case MLX5_XMETA_MODE_META16:
382                         return REG_C_0;
383                 case MLX5_XMETA_MODE_META32:
384                         return REG_C_1;
385                 }
386                 break;
387         case MLX5_FLOW_MARK:
388                 switch (config->dv_xmeta_en) {
389                 case MLX5_XMETA_MODE_LEGACY:
390                         return REG_NONE;
391                 case MLX5_XMETA_MODE_META16:
392                         return REG_C_1;
393                 case MLX5_XMETA_MODE_META32:
394                         return REG_C_0;
395                 }
396                 break;
397         case MLX5_MTR_SFX:
398                 /*
399                  * If meter color and flow match share one register, flow match
400                  * should use the meter color register for match.
401                  */
402                 if (priv->mtr_reg_share)
403                         return priv->mtr_color_reg;
404                 else
405                         return priv->mtr_color_reg != REG_C_2 ? REG_C_2 :
406                                REG_C_3;
407         case MLX5_MTR_COLOR:
408                 MLX5_ASSERT(priv->mtr_color_reg != REG_NONE);
409                 return priv->mtr_color_reg;
410         case MLX5_COPY_MARK:
411                 /*
412                  * Metadata COPY_MARK register using is in meter suffix sub
413                  * flow while with meter. It's safe to share the same register.
414                  */
415                 return priv->mtr_color_reg != REG_C_2 ? REG_C_2 : REG_C_3;
416         case MLX5_APP_TAG:
417                 /*
418                  * If meter is enable, it will engage the register for color
419                  * match and flow match. If meter color match is not using the
420                  * REG_C_2, need to skip the REG_C_x be used by meter color
421                  * match.
422                  * If meter is disable, free to use all available registers.
423                  */
424                 start_reg = priv->mtr_color_reg != REG_C_2 ? REG_C_2 :
425                             (priv->mtr_reg_share ? REG_C_3 : REG_C_4);
426                 skip_mtr_reg = !!(priv->mtr_en && start_reg == REG_C_2);
427                 if (id > (REG_C_7 - start_reg))
428                         return rte_flow_error_set(error, EINVAL,
429                                                   RTE_FLOW_ERROR_TYPE_ITEM,
430                                                   NULL, "invalid tag id");
431                 if (config->flow_mreg_c[id + start_reg - REG_C_0] == REG_NONE)
432                         return rte_flow_error_set(error, ENOTSUP,
433                                                   RTE_FLOW_ERROR_TYPE_ITEM,
434                                                   NULL, "unsupported tag id");
435                 /*
436                  * This case means meter is using the REG_C_x great than 2.
437                  * Take care not to conflict with meter color REG_C_x.
438                  * If the available index REG_C_y >= REG_C_x, skip the
439                  * color register.
440                  */
441                 if (skip_mtr_reg && config->flow_mreg_c
442                     [id + start_reg - REG_C_0] >= priv->mtr_color_reg) {
443                         if (config->flow_mreg_c
444                             [id + 1 + start_reg - REG_C_0] != REG_NONE)
445                                 return config->flow_mreg_c
446                                                [id + 1 + start_reg - REG_C_0];
447                         return rte_flow_error_set(error, ENOTSUP,
448                                                   RTE_FLOW_ERROR_TYPE_ITEM,
449                                                   NULL, "unsupported tag id");
450                 }
451                 return config->flow_mreg_c[id + start_reg - REG_C_0];
452         }
453         MLX5_ASSERT(false);
454         return rte_flow_error_set(error, EINVAL,
455                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
456                                   NULL, "invalid feature name");
457 }
458
459 /**
460  * Check extensive flow metadata register support.
461  *
462  * @param dev
463  *   Pointer to rte_eth_dev structure.
464  *
465  * @return
466  *   True if device supports extensive flow metadata register, otherwise false.
467  */
468 bool
469 mlx5_flow_ext_mreg_supported(struct rte_eth_dev *dev)
470 {
471         struct mlx5_priv *priv = dev->data->dev_private;
472         struct mlx5_dev_config *config = &priv->config;
473
474         /*
475          * Having available reg_c can be regarded inclusively as supporting
476          * extensive flow metadata register, which could mean,
477          * - metadata register copy action by modify header.
478          * - 16 modify header actions is supported.
479          * - reg_c's are preserved across different domain (FDB and NIC) on
480          *   packet loopback by flow lookup miss.
481          */
482         return config->flow_mreg_c[2] != REG_NONE;
483 }
484
485 /**
486  * Discover the maximum number of priority available.
487  *
488  * @param[in] dev
489  *   Pointer to the Ethernet device structure.
490  *
491  * @return
492  *   number of supported flow priority on success, a negative errno
493  *   value otherwise and rte_errno is set.
494  */
495 int
496 mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
497 {
498         struct mlx5_priv *priv = dev->data->dev_private;
499         struct {
500                 struct ibv_flow_attr attr;
501                 struct ibv_flow_spec_eth eth;
502                 struct ibv_flow_spec_action_drop drop;
503         } flow_attr = {
504                 .attr = {
505                         .num_of_specs = 2,
506                         .port = (uint8_t)priv->ibv_port,
507                 },
508                 .eth = {
509                         .type = IBV_FLOW_SPEC_ETH,
510                         .size = sizeof(struct ibv_flow_spec_eth),
511                 },
512                 .drop = {
513                         .size = sizeof(struct ibv_flow_spec_action_drop),
514                         .type = IBV_FLOW_SPEC_ACTION_DROP,
515                 },
516         };
517         struct ibv_flow *flow;
518         struct mlx5_hrxq *drop = mlx5_hrxq_drop_new(dev);
519         uint16_t vprio[] = { 8, 16 };
520         int i;
521         int priority = 0;
522
523         if (!drop) {
524                 rte_errno = ENOTSUP;
525                 return -rte_errno;
526         }
527         for (i = 0; i != RTE_DIM(vprio); i++) {
528                 flow_attr.attr.priority = vprio[i] - 1;
529                 flow = mlx5_glue->create_flow(drop->qp, &flow_attr.attr);
530                 if (!flow)
531                         break;
532                 claim_zero(mlx5_glue->destroy_flow(flow));
533                 priority = vprio[i];
534         }
535         mlx5_hrxq_drop_release(dev);
536         switch (priority) {
537         case 8:
538                 priority = RTE_DIM(priority_map_3);
539                 break;
540         case 16:
541                 priority = RTE_DIM(priority_map_5);
542                 break;
543         default:
544                 rte_errno = ENOTSUP;
545                 DRV_LOG(ERR,
546                         "port %u verbs maximum priority: %d expected 8/16",
547                         dev->data->port_id, priority);
548                 return -rte_errno;
549         }
550         DRV_LOG(INFO, "port %u flow maximum priority: %d",
551                 dev->data->port_id, priority);
552         return priority;
553 }
554
555 /**
556  * Adjust flow priority based on the highest layer and the request priority.
557  *
558  * @param[in] dev
559  *   Pointer to the Ethernet device structure.
560  * @param[in] priority
561  *   The rule base priority.
562  * @param[in] subpriority
563  *   The priority based on the items.
564  *
565  * @return
566  *   The new priority.
567  */
568 uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
569                                    uint32_t subpriority)
570 {
571         uint32_t res = 0;
572         struct mlx5_priv *priv = dev->data->dev_private;
573
574         switch (priv->config.flow_prio) {
575         case RTE_DIM(priority_map_3):
576                 res = priority_map_3[priority][subpriority];
577                 break;
578         case RTE_DIM(priority_map_5):
579                 res = priority_map_5[priority][subpriority];
580                 break;
581         }
582         return  res;
583 }
584
585 /**
586  * Verify the @p item specifications (spec, last, mask) are compatible with the
587  * NIC capabilities.
588  *
589  * @param[in] item
590  *   Item specification.
591  * @param[in] mask
592  *   @p item->mask or flow default bit-masks.
593  * @param[in] nic_mask
594  *   Bit-masks covering supported fields by the NIC to compare with user mask.
595  * @param[in] size
596  *   Bit-masks size in bytes.
597  * @param[out] error
598  *   Pointer to error structure.
599  *
600  * @return
601  *   0 on success, a negative errno value otherwise and rte_errno is set.
602  */
603 int
604 mlx5_flow_item_acceptable(const struct rte_flow_item *item,
605                           const uint8_t *mask,
606                           const uint8_t *nic_mask,
607                           unsigned int size,
608                           struct rte_flow_error *error)
609 {
610         unsigned int i;
611
612         MLX5_ASSERT(nic_mask);
613         for (i = 0; i < size; ++i)
614                 if ((nic_mask[i] | mask[i]) != nic_mask[i])
615                         return rte_flow_error_set(error, ENOTSUP,
616                                                   RTE_FLOW_ERROR_TYPE_ITEM,
617                                                   item,
618                                                   "mask enables non supported"
619                                                   " bits");
620         if (!item->spec && (item->mask || item->last))
621                 return rte_flow_error_set(error, EINVAL,
622                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
623                                           "mask/last without a spec is not"
624                                           " supported");
625         if (item->spec && item->last) {
626                 uint8_t spec[size];
627                 uint8_t last[size];
628                 unsigned int i;
629                 int ret;
630
631                 for (i = 0; i < size; ++i) {
632                         spec[i] = ((const uint8_t *)item->spec)[i] & mask[i];
633                         last[i] = ((const uint8_t *)item->last)[i] & mask[i];
634                 }
635                 ret = memcmp(spec, last, size);
636                 if (ret != 0)
637                         return rte_flow_error_set(error, EINVAL,
638                                                   RTE_FLOW_ERROR_TYPE_ITEM,
639                                                   item,
640                                                   "range is not valid");
641         }
642         return 0;
643 }
644
645 /**
646  * Adjust the hash fields according to the @p flow information.
647  *
648  * @param[in] dev_flow.
649  *   Pointer to the mlx5_flow.
650  * @param[in] tunnel
651  *   1 when the hash field is for a tunnel item.
652  * @param[in] layer_types
653  *   ETH_RSS_* types.
654  * @param[in] hash_fields
655  *   Item hash fields.
656  *
657  * @return
658  *   The hash fields that should be used.
659  */
660 uint64_t
661 mlx5_flow_hashfields_adjust(struct mlx5_flow_rss_desc *rss_desc,
662                             int tunnel __rte_unused, uint64_t layer_types,
663                             uint64_t hash_fields)
664 {
665 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
666         int rss_request_inner = rss_desc->level >= 2;
667
668         /* Check RSS hash level for tunnel. */
669         if (tunnel && rss_request_inner)
670                 hash_fields |= IBV_RX_HASH_INNER;
671         else if (tunnel || rss_request_inner)
672                 return 0;
673 #endif
674         /* Check if requested layer matches RSS hash fields. */
675         if (!(rss_desc->types & layer_types))
676                 return 0;
677         return hash_fields;
678 }
679
680 /**
681  * Lookup and set the ptype in the data Rx part.  A single Ptype can be used,
682  * if several tunnel rules are used on this queue, the tunnel ptype will be
683  * cleared.
684  *
685  * @param rxq_ctrl
686  *   Rx queue to update.
687  */
688 static void
689 flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl)
690 {
691         unsigned int i;
692         uint32_t tunnel_ptype = 0;
693
694         /* Look up for the ptype to use. */
695         for (i = 0; i != MLX5_FLOW_TUNNEL; ++i) {
696                 if (!rxq_ctrl->flow_tunnels_n[i])
697                         continue;
698                 if (!tunnel_ptype) {
699                         tunnel_ptype = tunnels_info[i].ptype;
700                 } else {
701                         tunnel_ptype = 0;
702                         break;
703                 }
704         }
705         rxq_ctrl->rxq.tunnel = tunnel_ptype;
706 }
707
708 /**
709  * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the devive
710  * flow.
711  *
712  * @param[in] dev
713  *   Pointer to the Ethernet device structure.
714  * @param[in] dev_handle
715  *   Pointer to device flow handle structure.
716  */
717 static void
718 flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
719                        struct mlx5_flow_handle *dev_handle)
720 {
721         struct mlx5_priv *priv = dev->data->dev_private;
722         const int mark = dev_handle->mark;
723         const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
724         struct mlx5_hrxq *hrxq;
725         unsigned int i;
726
727         if (dev_handle->fate_action != MLX5_FLOW_FATE_QUEUE)
728                 return;
729         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
730                               dev_handle->rix_hrxq);
731         if (!hrxq)
732                 return;
733         for (i = 0; i != hrxq->ind_table->queues_n; ++i) {
734                 int idx = hrxq->ind_table->queues[i];
735                 struct mlx5_rxq_ctrl *rxq_ctrl =
736                         container_of((*priv->rxqs)[idx],
737                                      struct mlx5_rxq_ctrl, rxq);
738
739                 /*
740                  * To support metadata register copy on Tx loopback,
741                  * this must be always enabled (metadata may arive
742                  * from other port - not from local flows only.
743                  */
744                 if (priv->config.dv_flow_en &&
745                     priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
746                     mlx5_flow_ext_mreg_supported(dev)) {
747                         rxq_ctrl->rxq.mark = 1;
748                         rxq_ctrl->flow_mark_n = 1;
749                 } else if (mark) {
750                         rxq_ctrl->rxq.mark = 1;
751                         rxq_ctrl->flow_mark_n++;
752                 }
753                 if (tunnel) {
754                         unsigned int j;
755
756                         /* Increase the counter matching the flow. */
757                         for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
758                                 if ((tunnels_info[j].tunnel &
759                                      dev_handle->layers) ==
760                                     tunnels_info[j].tunnel) {
761                                         rxq_ctrl->flow_tunnels_n[j]++;
762                                         break;
763                                 }
764                         }
765                         flow_rxq_tunnel_ptype_update(rxq_ctrl);
766                 }
767         }
768 }
769
770 /**
771  * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) for a flow
772  *
773  * @param[in] dev
774  *   Pointer to the Ethernet device structure.
775  * @param[in] flow
776  *   Pointer to flow structure.
777  */
778 static void
779 flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
780 {
781         struct mlx5_priv *priv = dev->data->dev_private;
782         uint32_t handle_idx;
783         struct mlx5_flow_handle *dev_handle;
784
785         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
786                        handle_idx, dev_handle, next)
787                 flow_drv_rxq_flags_set(dev, dev_handle);
788 }
789
790 /**
791  * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the
792  * device flow if no other flow uses it with the same kind of request.
793  *
794  * @param dev
795  *   Pointer to Ethernet device.
796  * @param[in] dev_handle
797  *   Pointer to the device flow handle structure.
798  */
799 static void
800 flow_drv_rxq_flags_trim(struct rte_eth_dev *dev,
801                         struct mlx5_flow_handle *dev_handle)
802 {
803         struct mlx5_priv *priv = dev->data->dev_private;
804         const int mark = dev_handle->mark;
805         const int tunnel = !!(dev_handle->layers & MLX5_FLOW_LAYER_TUNNEL);
806         struct mlx5_hrxq *hrxq;
807         unsigned int i;
808
809         if (dev_handle->fate_action != MLX5_FLOW_FATE_QUEUE)
810                 return;
811         hrxq = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_HRXQ],
812                               dev_handle->rix_hrxq);
813         if (!hrxq)
814                 return;
815         MLX5_ASSERT(dev->data->dev_started);
816         for (i = 0; i != hrxq->ind_table->queues_n; ++i) {
817                 int idx = hrxq->ind_table->queues[i];
818                 struct mlx5_rxq_ctrl *rxq_ctrl =
819                         container_of((*priv->rxqs)[idx],
820                                      struct mlx5_rxq_ctrl, rxq);
821
822                 if (priv->config.dv_flow_en &&
823                     priv->config.dv_xmeta_en != MLX5_XMETA_MODE_LEGACY &&
824                     mlx5_flow_ext_mreg_supported(dev)) {
825                         rxq_ctrl->rxq.mark = 1;
826                         rxq_ctrl->flow_mark_n = 1;
827                 } else if (mark) {
828                         rxq_ctrl->flow_mark_n--;
829                         rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n;
830                 }
831                 if (tunnel) {
832                         unsigned int j;
833
834                         /* Decrease the counter matching the flow. */
835                         for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
836                                 if ((tunnels_info[j].tunnel &
837                                      dev_handle->layers) ==
838                                     tunnels_info[j].tunnel) {
839                                         rxq_ctrl->flow_tunnels_n[j]--;
840                                         break;
841                                 }
842                         }
843                         flow_rxq_tunnel_ptype_update(rxq_ctrl);
844                 }
845         }
846 }
847
848 /**
849  * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the
850  * @p flow if no other flow uses it with the same kind of request.
851  *
852  * @param dev
853  *   Pointer to Ethernet device.
854  * @param[in] flow
855  *   Pointer to the flow.
856  */
857 static void
858 flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
859 {
860         struct mlx5_priv *priv = dev->data->dev_private;
861         uint32_t handle_idx;
862         struct mlx5_flow_handle *dev_handle;
863
864         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
865                        handle_idx, dev_handle, next)
866                 flow_drv_rxq_flags_trim(dev, dev_handle);
867 }
868
869 /**
870  * Clear the Mark/Flag and Tunnel ptype information in all Rx queues.
871  *
872  * @param dev
873  *   Pointer to Ethernet device.
874  */
875 static void
876 flow_rxq_flags_clear(struct rte_eth_dev *dev)
877 {
878         struct mlx5_priv *priv = dev->data->dev_private;
879         unsigned int i;
880
881         for (i = 0; i != priv->rxqs_n; ++i) {
882                 struct mlx5_rxq_ctrl *rxq_ctrl;
883                 unsigned int j;
884
885                 if (!(*priv->rxqs)[i])
886                         continue;
887                 rxq_ctrl = container_of((*priv->rxqs)[i],
888                                         struct mlx5_rxq_ctrl, rxq);
889                 rxq_ctrl->flow_mark_n = 0;
890                 rxq_ctrl->rxq.mark = 0;
891                 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j)
892                         rxq_ctrl->flow_tunnels_n[j] = 0;
893                 rxq_ctrl->rxq.tunnel = 0;
894         }
895 }
896
897 /*
898  * return a pointer to the desired action in the list of actions.
899  *
900  * @param[in] actions
901  *   The list of actions to search the action in.
902  * @param[in] action
903  *   The action to find.
904  *
905  * @return
906  *   Pointer to the action in the list, if found. NULL otherwise.
907  */
908 const struct rte_flow_action *
909 mlx5_flow_find_action(const struct rte_flow_action *actions,
910                       enum rte_flow_action_type action)
911 {
912         if (actions == NULL)
913                 return NULL;
914         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++)
915                 if (actions->type == action)
916                         return actions;
917         return NULL;
918 }
919
920 /*
921  * Validate the flag action.
922  *
923  * @param[in] action_flags
924  *   Bit-fields that holds the actions detected until now.
925  * @param[in] attr
926  *   Attributes of flow that includes this action.
927  * @param[out] error
928  *   Pointer to error structure.
929  *
930  * @return
931  *   0 on success, a negative errno value otherwise and rte_errno is set.
932  */
933 int
934 mlx5_flow_validate_action_flag(uint64_t action_flags,
935                                const struct rte_flow_attr *attr,
936                                struct rte_flow_error *error)
937 {
938         if (action_flags & MLX5_FLOW_ACTION_MARK)
939                 return rte_flow_error_set(error, EINVAL,
940                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
941                                           "can't mark and flag in same flow");
942         if (action_flags & MLX5_FLOW_ACTION_FLAG)
943                 return rte_flow_error_set(error, EINVAL,
944                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
945                                           "can't have 2 flag"
946                                           " actions in same flow");
947         if (attr->egress)
948                 return rte_flow_error_set(error, ENOTSUP,
949                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
950                                           "flag action not supported for "
951                                           "egress");
952         return 0;
953 }
954
955 /*
956  * Validate the mark action.
957  *
958  * @param[in] action
959  *   Pointer to the queue action.
960  * @param[in] action_flags
961  *   Bit-fields that holds the actions detected until now.
962  * @param[in] attr
963  *   Attributes of flow that includes this action.
964  * @param[out] error
965  *   Pointer to error structure.
966  *
967  * @return
968  *   0 on success, a negative errno value otherwise and rte_errno is set.
969  */
970 int
971 mlx5_flow_validate_action_mark(const struct rte_flow_action *action,
972                                uint64_t action_flags,
973                                const struct rte_flow_attr *attr,
974                                struct rte_flow_error *error)
975 {
976         const struct rte_flow_action_mark *mark = action->conf;
977
978         if (!mark)
979                 return rte_flow_error_set(error, EINVAL,
980                                           RTE_FLOW_ERROR_TYPE_ACTION,
981                                           action,
982                                           "configuration cannot be null");
983         if (mark->id >= MLX5_FLOW_MARK_MAX)
984                 return rte_flow_error_set(error, EINVAL,
985                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
986                                           &mark->id,
987                                           "mark id must in 0 <= id < "
988                                           RTE_STR(MLX5_FLOW_MARK_MAX));
989         if (action_flags & MLX5_FLOW_ACTION_FLAG)
990                 return rte_flow_error_set(error, EINVAL,
991                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
992                                           "can't flag and mark in same flow");
993         if (action_flags & MLX5_FLOW_ACTION_MARK)
994                 return rte_flow_error_set(error, EINVAL,
995                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
996                                           "can't have 2 mark actions in same"
997                                           " flow");
998         if (attr->egress)
999                 return rte_flow_error_set(error, ENOTSUP,
1000                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1001                                           "mark action not supported for "
1002                                           "egress");
1003         return 0;
1004 }
1005
1006 /*
1007  * Validate the drop action.
1008  *
1009  * @param[in] action_flags
1010  *   Bit-fields that holds the actions detected until now.
1011  * @param[in] attr
1012  *   Attributes of flow that includes this action.
1013  * @param[out] error
1014  *   Pointer to error structure.
1015  *
1016  * @return
1017  *   0 on success, a negative errno value otherwise and rte_errno is set.
1018  */
1019 int
1020 mlx5_flow_validate_action_drop(uint64_t action_flags __rte_unused,
1021                                const struct rte_flow_attr *attr,
1022                                struct rte_flow_error *error)
1023 {
1024         if (attr->egress)
1025                 return rte_flow_error_set(error, ENOTSUP,
1026                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1027                                           "drop action not supported for "
1028                                           "egress");
1029         return 0;
1030 }
1031
1032 /*
1033  * Validate the queue action.
1034  *
1035  * @param[in] action
1036  *   Pointer to the queue action.
1037  * @param[in] action_flags
1038  *   Bit-fields that holds the actions detected until now.
1039  * @param[in] dev
1040  *   Pointer to the Ethernet device structure.
1041  * @param[in] attr
1042  *   Attributes of flow that includes this action.
1043  * @param[out] error
1044  *   Pointer to error structure.
1045  *
1046  * @return
1047  *   0 on success, a negative errno value otherwise and rte_errno is set.
1048  */
1049 int
1050 mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
1051                                 uint64_t action_flags,
1052                                 struct rte_eth_dev *dev,
1053                                 const struct rte_flow_attr *attr,
1054                                 struct rte_flow_error *error)
1055 {
1056         struct mlx5_priv *priv = dev->data->dev_private;
1057         const struct rte_flow_action_queue *queue = action->conf;
1058
1059         if (action_flags & MLX5_FLOW_FATE_ACTIONS)
1060                 return rte_flow_error_set(error, EINVAL,
1061                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1062                                           "can't have 2 fate actions in"
1063                                           " same flow");
1064         if (!priv->rxqs_n)
1065                 return rte_flow_error_set(error, EINVAL,
1066                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1067                                           NULL, "No Rx queues configured");
1068         if (queue->index >= priv->rxqs_n)
1069                 return rte_flow_error_set(error, EINVAL,
1070                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1071                                           &queue->index,
1072                                           "queue index out of range");
1073         if (!(*priv->rxqs)[queue->index])
1074                 return rte_flow_error_set(error, EINVAL,
1075                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1076                                           &queue->index,
1077                                           "queue is not configured");
1078         if (attr->egress)
1079                 return rte_flow_error_set(error, ENOTSUP,
1080                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1081                                           "queue action not supported for "
1082                                           "egress");
1083         return 0;
1084 }
1085
1086 /*
1087  * Validate the rss action.
1088  *
1089  * @param[in] action
1090  *   Pointer to the queue action.
1091  * @param[in] action_flags
1092  *   Bit-fields that holds the actions detected until now.
1093  * @param[in] dev
1094  *   Pointer to the Ethernet device structure.
1095  * @param[in] attr
1096  *   Attributes of flow that includes this action.
1097  * @param[in] item_flags
1098  *   Items that were detected.
1099  * @param[out] error
1100  *   Pointer to error structure.
1101  *
1102  * @return
1103  *   0 on success, a negative errno value otherwise and rte_errno is set.
1104  */
1105 int
1106 mlx5_flow_validate_action_rss(const struct rte_flow_action *action,
1107                               uint64_t action_flags,
1108                               struct rte_eth_dev *dev,
1109                               const struct rte_flow_attr *attr,
1110                               uint64_t item_flags,
1111                               struct rte_flow_error *error)
1112 {
1113         struct mlx5_priv *priv = dev->data->dev_private;
1114         const struct rte_flow_action_rss *rss = action->conf;
1115         int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1116         unsigned int i;
1117
1118         if (action_flags & MLX5_FLOW_FATE_ACTIONS)
1119                 return rte_flow_error_set(error, EINVAL,
1120                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1121                                           "can't have 2 fate actions"
1122                                           " in same flow");
1123         if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT &&
1124             rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ)
1125                 return rte_flow_error_set(error, ENOTSUP,
1126                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1127                                           &rss->func,
1128                                           "RSS hash function not supported");
1129 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
1130         if (rss->level > 2)
1131 #else
1132         if (rss->level > 1)
1133 #endif
1134                 return rte_flow_error_set(error, ENOTSUP,
1135                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1136                                           &rss->level,
1137                                           "tunnel RSS is not supported");
1138         /* allow RSS key_len 0 in case of NULL (default) RSS key. */
1139         if (rss->key_len == 0 && rss->key != NULL)
1140                 return rte_flow_error_set(error, ENOTSUP,
1141                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1142                                           &rss->key_len,
1143                                           "RSS hash key length 0");
1144         if (rss->key_len > 0 && rss->key_len < MLX5_RSS_HASH_KEY_LEN)
1145                 return rte_flow_error_set(error, ENOTSUP,
1146                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1147                                           &rss->key_len,
1148                                           "RSS hash key too small");
1149         if (rss->key_len > MLX5_RSS_HASH_KEY_LEN)
1150                 return rte_flow_error_set(error, ENOTSUP,
1151                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1152                                           &rss->key_len,
1153                                           "RSS hash key too large");
1154         if (rss->queue_num > priv->config.ind_table_max_size)
1155                 return rte_flow_error_set(error, ENOTSUP,
1156                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1157                                           &rss->queue_num,
1158                                           "number of queues too large");
1159         if (rss->types & MLX5_RSS_HF_MASK)
1160                 return rte_flow_error_set(error, ENOTSUP,
1161                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1162                                           &rss->types,
1163                                           "some RSS protocols are not"
1164                                           " supported");
1165         if ((rss->types & (ETH_RSS_L3_SRC_ONLY | ETH_RSS_L3_DST_ONLY)) &&
1166             !(rss->types & ETH_RSS_IP))
1167                 return rte_flow_error_set(error, EINVAL,
1168                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
1169                                           "L3 partial RSS requested but L3 RSS"
1170                                           " type not specified");
1171         if ((rss->types & (ETH_RSS_L4_SRC_ONLY | ETH_RSS_L4_DST_ONLY)) &&
1172             !(rss->types & (ETH_RSS_UDP | ETH_RSS_TCP)))
1173                 return rte_flow_error_set(error, EINVAL,
1174                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
1175                                           "L4 partial RSS requested but L4 RSS"
1176                                           " type not specified");
1177         if (!priv->rxqs_n)
1178                 return rte_flow_error_set(error, EINVAL,
1179                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1180                                           NULL, "No Rx queues configured");
1181         if (!rss->queue_num)
1182                 return rte_flow_error_set(error, EINVAL,
1183                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1184                                           NULL, "No queues configured");
1185         for (i = 0; i != rss->queue_num; ++i) {
1186                 if (rss->queue[i] >= priv->rxqs_n)
1187                         return rte_flow_error_set
1188                                 (error, EINVAL,
1189                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1190                                  &rss->queue[i], "queue index out of range");
1191                 if (!(*priv->rxqs)[rss->queue[i]])
1192                         return rte_flow_error_set
1193                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1194                                  &rss->queue[i], "queue is not configured");
1195         }
1196         if (attr->egress)
1197                 return rte_flow_error_set(error, ENOTSUP,
1198                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1199                                           "rss action not supported for "
1200                                           "egress");
1201         if (rss->level > 1 &&  !tunnel)
1202                 return rte_flow_error_set(error, EINVAL,
1203                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF, NULL,
1204                                           "inner RSS is not supported for "
1205                                           "non-tunnel flows");
1206         return 0;
1207 }
1208
1209 /*
1210  * Validate the count action.
1211  *
1212  * @param[in] dev
1213  *   Pointer to the Ethernet device structure.
1214  * @param[in] attr
1215  *   Attributes of flow that includes this action.
1216  * @param[out] error
1217  *   Pointer to error structure.
1218  *
1219  * @return
1220  *   0 on success, a negative errno value otherwise and rte_errno is set.
1221  */
1222 int
1223 mlx5_flow_validate_action_count(struct rte_eth_dev *dev __rte_unused,
1224                                 const struct rte_flow_attr *attr,
1225                                 struct rte_flow_error *error)
1226 {
1227         if (attr->egress)
1228                 return rte_flow_error_set(error, ENOTSUP,
1229                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1230                                           "count action not supported for "
1231                                           "egress");
1232         return 0;
1233 }
1234
1235 /**
1236  * Verify the @p attributes will be correctly understood by the NIC and store
1237  * them in the @p flow if everything is correct.
1238  *
1239  * @param[in] dev
1240  *   Pointer to the Ethernet device structure.
1241  * @param[in] attributes
1242  *   Pointer to flow attributes
1243  * @param[out] error
1244  *   Pointer to error structure.
1245  *
1246  * @return
1247  *   0 on success, a negative errno value otherwise and rte_errno is set.
1248  */
1249 int
1250 mlx5_flow_validate_attributes(struct rte_eth_dev *dev,
1251                               const struct rte_flow_attr *attributes,
1252                               struct rte_flow_error *error)
1253 {
1254         struct mlx5_priv *priv = dev->data->dev_private;
1255         uint32_t priority_max = priv->config.flow_prio - 1;
1256
1257         if (attributes->group)
1258                 return rte_flow_error_set(error, ENOTSUP,
1259                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1260                                           NULL, "groups is not supported");
1261         if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
1262             attributes->priority >= priority_max)
1263                 return rte_flow_error_set(error, ENOTSUP,
1264                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1265                                           NULL, "priority out of range");
1266         if (attributes->egress)
1267                 return rte_flow_error_set(error, ENOTSUP,
1268                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1269                                           "egress is not supported");
1270         if (attributes->transfer && !priv->config.dv_esw_en)
1271                 return rte_flow_error_set(error, ENOTSUP,
1272                                           RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1273                                           NULL, "transfer is not supported");
1274         if (!attributes->ingress)
1275                 return rte_flow_error_set(error, EINVAL,
1276                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1277                                           NULL,
1278                                           "ingress attribute is mandatory");
1279         return 0;
1280 }
1281
1282 /**
1283  * Validate ICMP6 item.
1284  *
1285  * @param[in] item
1286  *   Item specification.
1287  * @param[in] item_flags
1288  *   Bit-fields that holds the items detected until now.
1289  * @param[out] error
1290  *   Pointer to error structure.
1291  *
1292  * @return
1293  *   0 on success, a negative errno value otherwise and rte_errno is set.
1294  */
1295 int
1296 mlx5_flow_validate_item_icmp6(const struct rte_flow_item *item,
1297                                uint64_t item_flags,
1298                                uint8_t target_protocol,
1299                                struct rte_flow_error *error)
1300 {
1301         const struct rte_flow_item_icmp6 *mask = item->mask;
1302         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1303         const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1304                                       MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1305         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1306                                       MLX5_FLOW_LAYER_OUTER_L4;
1307         int ret;
1308
1309         if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMPV6)
1310                 return rte_flow_error_set(error, EINVAL,
1311                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1312                                           "protocol filtering not compatible"
1313                                           " with ICMP6 layer");
1314         if (!(item_flags & l3m))
1315                 return rte_flow_error_set(error, EINVAL,
1316                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1317                                           "IPv6 is mandatory to filter on"
1318                                           " ICMP6");
1319         if (item_flags & l4m)
1320                 return rte_flow_error_set(error, EINVAL,
1321                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1322                                           "multiple L4 layers not supported");
1323         if (!mask)
1324                 mask = &rte_flow_item_icmp6_mask;
1325         ret = mlx5_flow_item_acceptable
1326                 (item, (const uint8_t *)mask,
1327                  (const uint8_t *)&rte_flow_item_icmp6_mask,
1328                  sizeof(struct rte_flow_item_icmp6), error);
1329         if (ret < 0)
1330                 return ret;
1331         return 0;
1332 }
1333
1334 /**
1335  * Validate ICMP item.
1336  *
1337  * @param[in] item
1338  *   Item specification.
1339  * @param[in] item_flags
1340  *   Bit-fields that holds the items detected until now.
1341  * @param[out] error
1342  *   Pointer to error structure.
1343  *
1344  * @return
1345  *   0 on success, a negative errno value otherwise and rte_errno is set.
1346  */
1347 int
1348 mlx5_flow_validate_item_icmp(const struct rte_flow_item *item,
1349                              uint64_t item_flags,
1350                              uint8_t target_protocol,
1351                              struct rte_flow_error *error)
1352 {
1353         const struct rte_flow_item_icmp *mask = item->mask;
1354         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1355         const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1356                                       MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1357         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1358                                       MLX5_FLOW_LAYER_OUTER_L4;
1359         int ret;
1360
1361         if (target_protocol != 0xFF && target_protocol != IPPROTO_ICMP)
1362                 return rte_flow_error_set(error, EINVAL,
1363                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1364                                           "protocol filtering not compatible"
1365                                           " with ICMP layer");
1366         if (!(item_flags & l3m))
1367                 return rte_flow_error_set(error, EINVAL,
1368                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1369                                           "IPv4 is mandatory to filter"
1370                                           " on ICMP");
1371         if (item_flags & l4m)
1372                 return rte_flow_error_set(error, EINVAL,
1373                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1374                                           "multiple L4 layers not supported");
1375         if (!mask)
1376                 mask = &rte_flow_item_icmp_mask;
1377         ret = mlx5_flow_item_acceptable
1378                 (item, (const uint8_t *)mask,
1379                  (const uint8_t *)&rte_flow_item_icmp_mask,
1380                  sizeof(struct rte_flow_item_icmp), error);
1381         if (ret < 0)
1382                 return ret;
1383         return 0;
1384 }
1385
1386 /**
1387  * Validate Ethernet item.
1388  *
1389  * @param[in] item
1390  *   Item specification.
1391  * @param[in] item_flags
1392  *   Bit-fields that holds the items detected until now.
1393  * @param[out] error
1394  *   Pointer to error structure.
1395  *
1396  * @return
1397  *   0 on success, a negative errno value otherwise and rte_errno is set.
1398  */
1399 int
1400 mlx5_flow_validate_item_eth(const struct rte_flow_item *item,
1401                             uint64_t item_flags,
1402                             struct rte_flow_error *error)
1403 {
1404         const struct rte_flow_item_eth *mask = item->mask;
1405         const struct rte_flow_item_eth nic_mask = {
1406                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
1407                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
1408                 .type = RTE_BE16(0xffff),
1409         };
1410         int ret;
1411         int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1412         const uint64_t ethm = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1413                                        MLX5_FLOW_LAYER_OUTER_L2;
1414
1415         if (item_flags & ethm)
1416                 return rte_flow_error_set(error, ENOTSUP,
1417                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1418                                           "multiple L2 layers not supported");
1419         if ((!tunnel && (item_flags & MLX5_FLOW_LAYER_OUTER_L3)) ||
1420             (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_L3)))
1421                 return rte_flow_error_set(error, EINVAL,
1422                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1423                                           "L2 layer should not follow "
1424                                           "L3 layers");
1425         if ((!tunnel && (item_flags & MLX5_FLOW_LAYER_OUTER_VLAN)) ||
1426             (tunnel && (item_flags & MLX5_FLOW_LAYER_INNER_VLAN)))
1427                 return rte_flow_error_set(error, EINVAL,
1428                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1429                                           "L2 layer should not follow VLAN");
1430         if (!mask)
1431                 mask = &rte_flow_item_eth_mask;
1432         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1433                                         (const uint8_t *)&nic_mask,
1434                                         sizeof(struct rte_flow_item_eth),
1435                                         error);
1436         return ret;
1437 }
1438
1439 /**
1440  * Validate VLAN item.
1441  *
1442  * @param[in] item
1443  *   Item specification.
1444  * @param[in] item_flags
1445  *   Bit-fields that holds the items detected until now.
1446  * @param[in] dev
1447  *   Ethernet device flow is being created on.
1448  * @param[out] error
1449  *   Pointer to error structure.
1450  *
1451  * @return
1452  *   0 on success, a negative errno value otherwise and rte_errno is set.
1453  */
1454 int
1455 mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
1456                              uint64_t item_flags,
1457                              struct rte_eth_dev *dev,
1458                              struct rte_flow_error *error)
1459 {
1460         const struct rte_flow_item_vlan *spec = item->spec;
1461         const struct rte_flow_item_vlan *mask = item->mask;
1462         const struct rte_flow_item_vlan nic_mask = {
1463                 .tci = RTE_BE16(UINT16_MAX),
1464                 .inner_type = RTE_BE16(UINT16_MAX),
1465         };
1466         uint16_t vlan_tag = 0;
1467         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1468         int ret;
1469         const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
1470                                         MLX5_FLOW_LAYER_INNER_L4) :
1471                                        (MLX5_FLOW_LAYER_OUTER_L3 |
1472                                         MLX5_FLOW_LAYER_OUTER_L4);
1473         const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
1474                                         MLX5_FLOW_LAYER_OUTER_VLAN;
1475
1476         if (item_flags & vlanm)
1477                 return rte_flow_error_set(error, EINVAL,
1478                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1479                                           "multiple VLAN layers not supported");
1480         else if ((item_flags & l34m) != 0)
1481                 return rte_flow_error_set(error, EINVAL,
1482                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1483                                           "VLAN cannot follow L3/L4 layer");
1484         if (!mask)
1485                 mask = &rte_flow_item_vlan_mask;
1486         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1487                                         (const uint8_t *)&nic_mask,
1488                                         sizeof(struct rte_flow_item_vlan),
1489                                         error);
1490         if (ret)
1491                 return ret;
1492         if (!tunnel && mask->tci != RTE_BE16(0x0fff)) {
1493                 struct mlx5_priv *priv = dev->data->dev_private;
1494
1495                 if (priv->vmwa_context) {
1496                         /*
1497                          * Non-NULL context means we have a virtual machine
1498                          * and SR-IOV enabled, we have to create VLAN interface
1499                          * to make hypervisor to setup E-Switch vport
1500                          * context correctly. We avoid creating the multiple
1501                          * VLAN interfaces, so we cannot support VLAN tag mask.
1502                          */
1503                         return rte_flow_error_set(error, EINVAL,
1504                                                   RTE_FLOW_ERROR_TYPE_ITEM,
1505                                                   item,
1506                                                   "VLAN tag mask is not"
1507                                                   " supported in virtual"
1508                                                   " environment");
1509                 }
1510         }
1511         if (spec) {
1512                 vlan_tag = spec->tci;
1513                 vlan_tag &= mask->tci;
1514         }
1515         /*
1516          * From verbs perspective an empty VLAN is equivalent
1517          * to a packet without VLAN layer.
1518          */
1519         if (!vlan_tag)
1520                 return rte_flow_error_set(error, EINVAL,
1521                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1522                                           item->spec,
1523                                           "VLAN cannot be empty");
1524         return 0;
1525 }
1526
1527 /**
1528  * Validate IPV4 item.
1529  *
1530  * @param[in] item
1531  *   Item specification.
1532  * @param[in] item_flags
1533  *   Bit-fields that holds the items detected until now.
1534  * @param[in] acc_mask
1535  *   Acceptable mask, if NULL default internal default mask
1536  *   will be used to check whether item fields are supported.
1537  * @param[out] error
1538  *   Pointer to error structure.
1539  *
1540  * @return
1541  *   0 on success, a negative errno value otherwise and rte_errno is set.
1542  */
1543 int
1544 mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
1545                              uint64_t item_flags,
1546                              uint64_t last_item,
1547                              uint16_t ether_type,
1548                              const struct rte_flow_item_ipv4 *acc_mask,
1549                              struct rte_flow_error *error)
1550 {
1551         const struct rte_flow_item_ipv4 *mask = item->mask;
1552         const struct rte_flow_item_ipv4 *spec = item->spec;
1553         const struct rte_flow_item_ipv4 nic_mask = {
1554                 .hdr = {
1555                         .src_addr = RTE_BE32(0xffffffff),
1556                         .dst_addr = RTE_BE32(0xffffffff),
1557                         .type_of_service = 0xff,
1558                         .next_proto_id = 0xff,
1559                 },
1560         };
1561         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1562         const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
1563                                       MLX5_FLOW_LAYER_OUTER_L3;
1564         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1565                                       MLX5_FLOW_LAYER_OUTER_L4;
1566         int ret;
1567         uint8_t next_proto = 0xFF;
1568         const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 |
1569                                   MLX5_FLOW_LAYER_OUTER_VLAN |
1570                                   MLX5_FLOW_LAYER_INNER_VLAN);
1571
1572         if ((last_item & l2_vlan) && ether_type &&
1573             ether_type != RTE_ETHER_TYPE_IPV4)
1574                 return rte_flow_error_set(error, EINVAL,
1575                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1576                                           "IPv4 cannot follow L2/VLAN layer "
1577                                           "which ether type is not IPv4");
1578         if (item_flags & MLX5_FLOW_LAYER_IPIP) {
1579                 if (mask && spec)
1580                         next_proto = mask->hdr.next_proto_id &
1581                                      spec->hdr.next_proto_id;
1582                 if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6)
1583                         return rte_flow_error_set(error, EINVAL,
1584                                                   RTE_FLOW_ERROR_TYPE_ITEM,
1585                                                   item,
1586                                                   "multiple tunnel "
1587                                                   "not supported");
1588         }
1589         if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP)
1590                 return rte_flow_error_set(error, EINVAL,
1591                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1592                                           "wrong tunnel type - IPv6 specified "
1593                                           "but IPv4 item provided");
1594         if (item_flags & l3m)
1595                 return rte_flow_error_set(error, ENOTSUP,
1596                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1597                                           "multiple L3 layers not supported");
1598         else if (item_flags & l4m)
1599                 return rte_flow_error_set(error, EINVAL,
1600                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1601                                           "L3 cannot follow an L4 layer.");
1602         else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) &&
1603                   !(item_flags & MLX5_FLOW_LAYER_INNER_L2))
1604                 return rte_flow_error_set(error, EINVAL,
1605                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1606                                           "L3 cannot follow an NVGRE layer.");
1607         if (!mask)
1608                 mask = &rte_flow_item_ipv4_mask;
1609         else if (mask->hdr.next_proto_id != 0 &&
1610                  mask->hdr.next_proto_id != 0xff)
1611                 return rte_flow_error_set(error, EINVAL,
1612                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
1613                                           "partial mask is not supported"
1614                                           " for protocol");
1615         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1616                                         acc_mask ? (const uint8_t *)acc_mask
1617                                                  : (const uint8_t *)&nic_mask,
1618                                         sizeof(struct rte_flow_item_ipv4),
1619                                         error);
1620         if (ret < 0)
1621                 return ret;
1622         return 0;
1623 }
1624
1625 /**
1626  * Validate IPV6 item.
1627  *
1628  * @param[in] item
1629  *   Item specification.
1630  * @param[in] item_flags
1631  *   Bit-fields that holds the items detected until now.
1632  * @param[in] acc_mask
1633  *   Acceptable mask, if NULL default internal default mask
1634  *   will be used to check whether item fields are supported.
1635  * @param[out] error
1636  *   Pointer to error structure.
1637  *
1638  * @return
1639  *   0 on success, a negative errno value otherwise and rte_errno is set.
1640  */
1641 int
1642 mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
1643                              uint64_t item_flags,
1644                              uint64_t last_item,
1645                              uint16_t ether_type,
1646                              const struct rte_flow_item_ipv6 *acc_mask,
1647                              struct rte_flow_error *error)
1648 {
1649         const struct rte_flow_item_ipv6 *mask = item->mask;
1650         const struct rte_flow_item_ipv6 *spec = item->spec;
1651         const struct rte_flow_item_ipv6 nic_mask = {
1652                 .hdr = {
1653                         .src_addr =
1654                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
1655                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
1656                         .dst_addr =
1657                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
1658                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
1659                         .vtc_flow = RTE_BE32(0xffffffff),
1660                         .proto = 0xff,
1661                 },
1662         };
1663         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1664         const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
1665                                       MLX5_FLOW_LAYER_OUTER_L3;
1666         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1667                                       MLX5_FLOW_LAYER_OUTER_L4;
1668         int ret;
1669         uint8_t next_proto = 0xFF;
1670         const uint64_t l2_vlan = (MLX5_FLOW_LAYER_L2 |
1671                                   MLX5_FLOW_LAYER_OUTER_VLAN |
1672                                   MLX5_FLOW_LAYER_INNER_VLAN);
1673
1674         if ((last_item & l2_vlan) && ether_type &&
1675             ether_type != RTE_ETHER_TYPE_IPV6)
1676                 return rte_flow_error_set(error, EINVAL,
1677                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1678                                           "IPv6 cannot follow L2/VLAN layer "
1679                                           "which ether type is not IPv6");
1680         if (item_flags & MLX5_FLOW_LAYER_IPV6_ENCAP) {
1681                 if (mask && spec)
1682                         next_proto = mask->hdr.proto & spec->hdr.proto;
1683                 if (next_proto == IPPROTO_IPIP || next_proto == IPPROTO_IPV6)
1684                         return rte_flow_error_set(error, EINVAL,
1685                                                   RTE_FLOW_ERROR_TYPE_ITEM,
1686                                                   item,
1687                                                   "multiple tunnel "
1688                                                   "not supported");
1689         }
1690         if (item_flags & MLX5_FLOW_LAYER_IPIP)
1691                 return rte_flow_error_set(error, EINVAL,
1692                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1693                                           "wrong tunnel type - IPv4 specified "
1694                                           "but IPv6 item provided");
1695         if (item_flags & l3m)
1696                 return rte_flow_error_set(error, ENOTSUP,
1697                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1698                                           "multiple L3 layers not supported");
1699         else if (item_flags & l4m)
1700                 return rte_flow_error_set(error, EINVAL,
1701                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1702                                           "L3 cannot follow an L4 layer.");
1703         else if ((item_flags & MLX5_FLOW_LAYER_NVGRE) &&
1704                   !(item_flags & MLX5_FLOW_LAYER_INNER_L2))
1705                 return rte_flow_error_set(error, EINVAL,
1706                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1707                                           "L3 cannot follow an NVGRE layer.");
1708         if (!mask)
1709                 mask = &rte_flow_item_ipv6_mask;
1710         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1711                                         acc_mask ? (const uint8_t *)acc_mask
1712                                                  : (const uint8_t *)&nic_mask,
1713                                         sizeof(struct rte_flow_item_ipv6),
1714                                         error);
1715         if (ret < 0)
1716                 return ret;
1717         return 0;
1718 }
1719
1720 /**
1721  * Validate UDP item.
1722  *
1723  * @param[in] item
1724  *   Item specification.
1725  * @param[in] item_flags
1726  *   Bit-fields that holds the items detected until now.
1727  * @param[in] target_protocol
1728  *   The next protocol in the previous item.
1729  * @param[in] flow_mask
1730  *   mlx5 flow-specific (DV, verbs, etc.) supported header fields mask.
1731  * @param[out] error
1732  *   Pointer to error structure.
1733  *
1734  * @return
1735  *   0 on success, a negative errno value otherwise and rte_errno is set.
1736  */
1737 int
1738 mlx5_flow_validate_item_udp(const struct rte_flow_item *item,
1739                             uint64_t item_flags,
1740                             uint8_t target_protocol,
1741                             struct rte_flow_error *error)
1742 {
1743         const struct rte_flow_item_udp *mask = item->mask;
1744         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1745         const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
1746                                       MLX5_FLOW_LAYER_OUTER_L3;
1747         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1748                                       MLX5_FLOW_LAYER_OUTER_L4;
1749         int ret;
1750
1751         if (target_protocol != 0xff && target_protocol != IPPROTO_UDP)
1752                 return rte_flow_error_set(error, EINVAL,
1753                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1754                                           "protocol filtering not compatible"
1755                                           " with UDP layer");
1756         if (!(item_flags & l3m))
1757                 return rte_flow_error_set(error, EINVAL,
1758                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1759                                           "L3 is mandatory to filter on L4");
1760         if (item_flags & l4m)
1761                 return rte_flow_error_set(error, EINVAL,
1762                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1763                                           "multiple L4 layers not supported");
1764         if (!mask)
1765                 mask = &rte_flow_item_udp_mask;
1766         ret = mlx5_flow_item_acceptable
1767                 (item, (const uint8_t *)mask,
1768                  (const uint8_t *)&rte_flow_item_udp_mask,
1769                  sizeof(struct rte_flow_item_udp), error);
1770         if (ret < 0)
1771                 return ret;
1772         return 0;
1773 }
1774
1775 /**
1776  * Validate TCP item.
1777  *
1778  * @param[in] item
1779  *   Item specification.
1780  * @param[in] item_flags
1781  *   Bit-fields that holds the items detected until now.
1782  * @param[in] target_protocol
1783  *   The next protocol in the previous item.
1784  * @param[out] error
1785  *   Pointer to error structure.
1786  *
1787  * @return
1788  *   0 on success, a negative errno value otherwise and rte_errno is set.
1789  */
1790 int
1791 mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,
1792                             uint64_t item_flags,
1793                             uint8_t target_protocol,
1794                             const struct rte_flow_item_tcp *flow_mask,
1795                             struct rte_flow_error *error)
1796 {
1797         const struct rte_flow_item_tcp *mask = item->mask;
1798         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1799         const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
1800                                       MLX5_FLOW_LAYER_OUTER_L3;
1801         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1802                                       MLX5_FLOW_LAYER_OUTER_L4;
1803         int ret;
1804
1805         MLX5_ASSERT(flow_mask);
1806         if (target_protocol != 0xff && target_protocol != IPPROTO_TCP)
1807                 return rte_flow_error_set(error, EINVAL,
1808                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1809                                           "protocol filtering not compatible"
1810                                           " with TCP layer");
1811         if (!(item_flags & l3m))
1812                 return rte_flow_error_set(error, EINVAL,
1813                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1814                                           "L3 is mandatory to filter on L4");
1815         if (item_flags & l4m)
1816                 return rte_flow_error_set(error, EINVAL,
1817                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1818                                           "multiple L4 layers not supported");
1819         if (!mask)
1820                 mask = &rte_flow_item_tcp_mask;
1821         ret = mlx5_flow_item_acceptable
1822                 (item, (const uint8_t *)mask,
1823                  (const uint8_t *)flow_mask,
1824                  sizeof(struct rte_flow_item_tcp), error);
1825         if (ret < 0)
1826                 return ret;
1827         return 0;
1828 }
1829
1830 /**
1831  * Validate VXLAN item.
1832  *
1833  * @param[in] item
1834  *   Item specification.
1835  * @param[in] item_flags
1836  *   Bit-fields that holds the items detected until now.
1837  * @param[in] target_protocol
1838  *   The next protocol in the previous item.
1839  * @param[out] error
1840  *   Pointer to error structure.
1841  *
1842  * @return
1843  *   0 on success, a negative errno value otherwise and rte_errno is set.
1844  */
1845 int
1846 mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item,
1847                               uint64_t item_flags,
1848                               struct rte_flow_error *error)
1849 {
1850         const struct rte_flow_item_vxlan *spec = item->spec;
1851         const struct rte_flow_item_vxlan *mask = item->mask;
1852         int ret;
1853         union vni {
1854                 uint32_t vlan_id;
1855                 uint8_t vni[4];
1856         } id = { .vlan_id = 0, };
1857
1858
1859         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
1860                 return rte_flow_error_set(error, ENOTSUP,
1861                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1862                                           "multiple tunnel layers not"
1863                                           " supported");
1864         /*
1865          * Verify only UDPv4 is present as defined in
1866          * https://tools.ietf.org/html/rfc7348
1867          */
1868         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
1869                 return rte_flow_error_set(error, EINVAL,
1870                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1871                                           "no outer UDP layer found");
1872         if (!mask)
1873                 mask = &rte_flow_item_vxlan_mask;
1874         ret = mlx5_flow_item_acceptable
1875                 (item, (const uint8_t *)mask,
1876                  (const uint8_t *)&rte_flow_item_vxlan_mask,
1877                  sizeof(struct rte_flow_item_vxlan),
1878                  error);
1879         if (ret < 0)
1880                 return ret;
1881         if (spec) {
1882                 memcpy(&id.vni[1], spec->vni, 3);
1883                 memcpy(&id.vni[1], mask->vni, 3);
1884         }
1885         if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
1886                 return rte_flow_error_set(error, ENOTSUP,
1887                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1888                                           "VXLAN tunnel must be fully defined");
1889         return 0;
1890 }
1891
1892 /**
1893  * Validate VXLAN_GPE item.
1894  *
1895  * @param[in] item
1896  *   Item specification.
1897  * @param[in] item_flags
1898  *   Bit-fields that holds the items detected until now.
1899  * @param[in] priv
1900  *   Pointer to the private data structure.
1901  * @param[in] target_protocol
1902  *   The next protocol in the previous item.
1903  * @param[out] error
1904  *   Pointer to error structure.
1905  *
1906  * @return
1907  *   0 on success, a negative errno value otherwise and rte_errno is set.
1908  */
1909 int
1910 mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,
1911                                   uint64_t item_flags,
1912                                   struct rte_eth_dev *dev,
1913                                   struct rte_flow_error *error)
1914 {
1915         struct mlx5_priv *priv = dev->data->dev_private;
1916         const struct rte_flow_item_vxlan_gpe *spec = item->spec;
1917         const struct rte_flow_item_vxlan_gpe *mask = item->mask;
1918         int ret;
1919         union vni {
1920                 uint32_t vlan_id;
1921                 uint8_t vni[4];
1922         } id = { .vlan_id = 0, };
1923
1924         if (!priv->config.l3_vxlan_en)
1925                 return rte_flow_error_set(error, ENOTSUP,
1926                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1927                                           "L3 VXLAN is not enabled by device"
1928                                           " parameter and/or not configured in"
1929                                           " firmware");
1930         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
1931                 return rte_flow_error_set(error, ENOTSUP,
1932                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1933                                           "multiple tunnel layers not"
1934                                           " supported");
1935         /*
1936          * Verify only UDPv4 is present as defined in
1937          * https://tools.ietf.org/html/rfc7348
1938          */
1939         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
1940                 return rte_flow_error_set(error, EINVAL,
1941                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1942                                           "no outer UDP layer found");
1943         if (!mask)
1944                 mask = &rte_flow_item_vxlan_gpe_mask;
1945         ret = mlx5_flow_item_acceptable
1946                 (item, (const uint8_t *)mask,
1947                  (const uint8_t *)&rte_flow_item_vxlan_gpe_mask,
1948                  sizeof(struct rte_flow_item_vxlan_gpe),
1949                  error);
1950         if (ret < 0)
1951                 return ret;
1952         if (spec) {
1953                 if (spec->protocol)
1954                         return rte_flow_error_set(error, ENOTSUP,
1955                                                   RTE_FLOW_ERROR_TYPE_ITEM,
1956                                                   item,
1957                                                   "VxLAN-GPE protocol"
1958                                                   " not supported");
1959                 memcpy(&id.vni[1], spec->vni, 3);
1960                 memcpy(&id.vni[1], mask->vni, 3);
1961         }
1962         if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
1963                 return rte_flow_error_set(error, ENOTSUP,
1964                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1965                                           "VXLAN-GPE tunnel must be fully"
1966                                           " defined");
1967         return 0;
1968 }
1969 /**
1970  * Validate GRE Key item.
1971  *
1972  * @param[in] item
1973  *   Item specification.
1974  * @param[in] item_flags
1975  *   Bit flags to mark detected items.
1976  * @param[in] gre_item
1977  *   Pointer to gre_item
1978  * @param[out] error
1979  *   Pointer to error structure.
1980  *
1981  * @return
1982  *   0 on success, a negative errno value otherwise and rte_errno is set.
1983  */
1984 int
1985 mlx5_flow_validate_item_gre_key(const struct rte_flow_item *item,
1986                                 uint64_t item_flags,
1987                                 const struct rte_flow_item *gre_item,
1988                                 struct rte_flow_error *error)
1989 {
1990         const rte_be32_t *mask = item->mask;
1991         int ret = 0;
1992         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
1993         const struct rte_flow_item_gre *gre_spec;
1994         const struct rte_flow_item_gre *gre_mask;
1995
1996         if (item_flags & MLX5_FLOW_LAYER_GRE_KEY)
1997                 return rte_flow_error_set(error, ENOTSUP,
1998                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1999                                           "Multiple GRE key not support");
2000         if (!(item_flags & MLX5_FLOW_LAYER_GRE))
2001                 return rte_flow_error_set(error, ENOTSUP,
2002                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2003                                           "No preceding GRE header");
2004         if (item_flags & MLX5_FLOW_LAYER_INNER)
2005                 return rte_flow_error_set(error, ENOTSUP,
2006                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2007                                           "GRE key following a wrong item");
2008         gre_mask = gre_item->mask;
2009         if (!gre_mask)
2010                 gre_mask = &rte_flow_item_gre_mask;
2011         gre_spec = gre_item->spec;
2012         if (gre_spec && (gre_mask->c_rsvd0_ver & RTE_BE16(0x2000)) &&
2013                          !(gre_spec->c_rsvd0_ver & RTE_BE16(0x2000)))
2014                 return rte_flow_error_set(error, EINVAL,
2015                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2016                                           "Key bit must be on");
2017
2018         if (!mask)
2019                 mask = &gre_key_default_mask;
2020         ret = mlx5_flow_item_acceptable
2021                 (item, (const uint8_t *)mask,
2022                  (const uint8_t *)&gre_key_default_mask,
2023                  sizeof(rte_be32_t), error);
2024         return ret;
2025 }
2026
2027 /**
2028  * Validate GRE item.
2029  *
2030  * @param[in] item
2031  *   Item specification.
2032  * @param[in] item_flags
2033  *   Bit flags to mark detected items.
2034  * @param[in] target_protocol
2035  *   The next protocol in the previous item.
2036  * @param[out] error
2037  *   Pointer to error structure.
2038  *
2039  * @return
2040  *   0 on success, a negative errno value otherwise and rte_errno is set.
2041  */
2042 int
2043 mlx5_flow_validate_item_gre(const struct rte_flow_item *item,
2044                             uint64_t item_flags,
2045                             uint8_t target_protocol,
2046                             struct rte_flow_error *error)
2047 {
2048         const struct rte_flow_item_gre *spec __rte_unused = item->spec;
2049         const struct rte_flow_item_gre *mask = item->mask;
2050         int ret;
2051         const struct rte_flow_item_gre nic_mask = {
2052                 .c_rsvd0_ver = RTE_BE16(0xB000),
2053                 .protocol = RTE_BE16(UINT16_MAX),
2054         };
2055
2056         if (target_protocol != 0xff && target_protocol != IPPROTO_GRE)
2057                 return rte_flow_error_set(error, EINVAL,
2058                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2059                                           "protocol filtering not compatible"
2060                                           " with this GRE layer");
2061         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2062                 return rte_flow_error_set(error, ENOTSUP,
2063                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2064                                           "multiple tunnel layers not"
2065                                           " supported");
2066         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3))
2067                 return rte_flow_error_set(error, ENOTSUP,
2068                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2069                                           "L3 Layer is missing");
2070         if (!mask)
2071                 mask = &rte_flow_item_gre_mask;
2072         ret = mlx5_flow_item_acceptable
2073                 (item, (const uint8_t *)mask,
2074                  (const uint8_t *)&nic_mask,
2075                  sizeof(struct rte_flow_item_gre), error);
2076         if (ret < 0)
2077                 return ret;
2078 #ifndef HAVE_MLX5DV_DR
2079 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
2080         if (spec && (spec->protocol & mask->protocol))
2081                 return rte_flow_error_set(error, ENOTSUP,
2082                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2083                                           "without MPLS support the"
2084                                           " specification cannot be used for"
2085                                           " filtering");
2086 #endif
2087 #endif
2088         return 0;
2089 }
2090
2091 /**
2092  * Validate Geneve item.
2093  *
2094  * @param[in] item
2095  *   Item specification.
2096  * @param[in] itemFlags
2097  *   Bit-fields that holds the items detected until now.
2098  * @param[in] enPriv
2099  *   Pointer to the private data structure.
2100  * @param[out] error
2101  *   Pointer to error structure.
2102  *
2103  * @return
2104  *   0 on success, a negative errno value otherwise and rte_errno is set.
2105  */
2106
2107 int
2108 mlx5_flow_validate_item_geneve(const struct rte_flow_item *item,
2109                                uint64_t item_flags,
2110                                struct rte_eth_dev *dev,
2111                                struct rte_flow_error *error)
2112 {
2113         struct mlx5_priv *priv = dev->data->dev_private;
2114         const struct rte_flow_item_geneve *spec = item->spec;
2115         const struct rte_flow_item_geneve *mask = item->mask;
2116         int ret;
2117         uint16_t gbhdr;
2118         uint8_t opt_len = priv->config.hca_attr.geneve_max_opt_len ?
2119                           MLX5_GENEVE_OPT_LEN_1 : MLX5_GENEVE_OPT_LEN_0;
2120         const struct rte_flow_item_geneve nic_mask = {
2121                 .ver_opt_len_o_c_rsvd0 = RTE_BE16(0x3f80),
2122                 .vni = "\xff\xff\xff",
2123                 .protocol = RTE_BE16(UINT16_MAX),
2124         };
2125
2126         if (!priv->config.hca_attr.tunnel_stateless_geneve_rx)
2127                 return rte_flow_error_set(error, ENOTSUP,
2128                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2129                                           "L3 Geneve is not enabled by device"
2130                                           " parameter and/or not configured in"
2131                                           " firmware");
2132         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2133                 return rte_flow_error_set(error, ENOTSUP,
2134                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2135                                           "multiple tunnel layers not"
2136                                           " supported");
2137         /*
2138          * Verify only UDPv4 is present as defined in
2139          * https://tools.ietf.org/html/rfc7348
2140          */
2141         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
2142                 return rte_flow_error_set(error, EINVAL,
2143                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2144                                           "no outer UDP layer found");
2145         if (!mask)
2146                 mask = &rte_flow_item_geneve_mask;
2147         ret = mlx5_flow_item_acceptable
2148                                   (item, (const uint8_t *)mask,
2149                                    (const uint8_t *)&nic_mask,
2150                                    sizeof(struct rte_flow_item_geneve), error);
2151         if (ret)
2152                 return ret;
2153         if (spec) {
2154                 gbhdr = rte_be_to_cpu_16(spec->ver_opt_len_o_c_rsvd0);
2155                 if (MLX5_GENEVE_VER_VAL(gbhdr) ||
2156                      MLX5_GENEVE_CRITO_VAL(gbhdr) ||
2157                      MLX5_GENEVE_RSVD_VAL(gbhdr) || spec->rsvd1)
2158                         return rte_flow_error_set(error, ENOTSUP,
2159                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2160                                                   item,
2161                                                   "Geneve protocol unsupported"
2162                                                   " fields are being used");
2163                 if (MLX5_GENEVE_OPTLEN_VAL(gbhdr) > opt_len)
2164                         return rte_flow_error_set
2165                                         (error, ENOTSUP,
2166                                          RTE_FLOW_ERROR_TYPE_ITEM,
2167                                          item,
2168                                          "Unsupported Geneve options length");
2169         }
2170         if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
2171                 return rte_flow_error_set
2172                                     (error, ENOTSUP,
2173                                      RTE_FLOW_ERROR_TYPE_ITEM, item,
2174                                      "Geneve tunnel must be fully defined");
2175         return 0;
2176 }
2177
2178 /**
2179  * Validate MPLS item.
2180  *
2181  * @param[in] dev
2182  *   Pointer to the rte_eth_dev structure.
2183  * @param[in] item
2184  *   Item specification.
2185  * @param[in] item_flags
2186  *   Bit-fields that holds the items detected until now.
2187  * @param[in] prev_layer
2188  *   The protocol layer indicated in previous item.
2189  * @param[out] error
2190  *   Pointer to error structure.
2191  *
2192  * @return
2193  *   0 on success, a negative errno value otherwise and rte_errno is set.
2194  */
2195 int
2196 mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused,
2197                              const struct rte_flow_item *item __rte_unused,
2198                              uint64_t item_flags __rte_unused,
2199                              uint64_t prev_layer __rte_unused,
2200                              struct rte_flow_error *error)
2201 {
2202 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
2203         const struct rte_flow_item_mpls *mask = item->mask;
2204         struct mlx5_priv *priv = dev->data->dev_private;
2205         int ret;
2206
2207         if (!priv->config.mpls_en)
2208                 return rte_flow_error_set(error, ENOTSUP,
2209                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2210                                           "MPLS not supported or"
2211                                           " disabled in firmware"
2212                                           " configuration.");
2213         /* MPLS over IP, UDP, GRE is allowed */
2214         if (!(prev_layer & (MLX5_FLOW_LAYER_OUTER_L3 |
2215                             MLX5_FLOW_LAYER_OUTER_L4_UDP |
2216                             MLX5_FLOW_LAYER_GRE)))
2217                 return rte_flow_error_set(error, EINVAL,
2218                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2219                                           "protocol filtering not compatible"
2220                                           " with MPLS layer");
2221         /* Multi-tunnel isn't allowed but MPLS over GRE is an exception. */
2222         if ((item_flags & MLX5_FLOW_LAYER_TUNNEL) &&
2223             !(item_flags & MLX5_FLOW_LAYER_GRE))
2224                 return rte_flow_error_set(error, ENOTSUP,
2225                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2226                                           "multiple tunnel layers not"
2227                                           " supported");
2228         if (!mask)
2229                 mask = &rte_flow_item_mpls_mask;
2230         ret = mlx5_flow_item_acceptable
2231                 (item, (const uint8_t *)mask,
2232                  (const uint8_t *)&rte_flow_item_mpls_mask,
2233                  sizeof(struct rte_flow_item_mpls), error);
2234         if (ret < 0)
2235                 return ret;
2236         return 0;
2237 #endif
2238         return rte_flow_error_set(error, ENOTSUP,
2239                                   RTE_FLOW_ERROR_TYPE_ITEM, item,
2240                                   "MPLS is not supported by Verbs, please"
2241                                   " update.");
2242 }
2243
2244 /**
2245  * Validate NVGRE item.
2246  *
2247  * @param[in] item
2248  *   Item specification.
2249  * @param[in] item_flags
2250  *   Bit flags to mark detected items.
2251  * @param[in] target_protocol
2252  *   The next protocol in the previous item.
2253  * @param[out] error
2254  *   Pointer to error structure.
2255  *
2256  * @return
2257  *   0 on success, a negative errno value otherwise and rte_errno is set.
2258  */
2259 int
2260 mlx5_flow_validate_item_nvgre(const struct rte_flow_item *item,
2261                               uint64_t item_flags,
2262                               uint8_t target_protocol,
2263                               struct rte_flow_error *error)
2264 {
2265         const struct rte_flow_item_nvgre *mask = item->mask;
2266         int ret;
2267
2268         if (target_protocol != 0xff && target_protocol != IPPROTO_GRE)
2269                 return rte_flow_error_set(error, EINVAL,
2270                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2271                                           "protocol filtering not compatible"
2272                                           " with this GRE layer");
2273         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
2274                 return rte_flow_error_set(error, ENOTSUP,
2275                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2276                                           "multiple tunnel layers not"
2277                                           " supported");
2278         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3))
2279                 return rte_flow_error_set(error, ENOTSUP,
2280                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
2281                                           "L3 Layer is missing");
2282         if (!mask)
2283                 mask = &rte_flow_item_nvgre_mask;
2284         ret = mlx5_flow_item_acceptable
2285                 (item, (const uint8_t *)mask,
2286                  (const uint8_t *)&rte_flow_item_nvgre_mask,
2287                  sizeof(struct rte_flow_item_nvgre), error);
2288         if (ret < 0)
2289                 return ret;
2290         return 0;
2291 }
2292
2293 /* Allocate unique ID for the split Q/RSS subflows. */
2294 static uint32_t
2295 flow_qrss_get_id(struct rte_eth_dev *dev)
2296 {
2297         struct mlx5_priv *priv = dev->data->dev_private;
2298         uint32_t qrss_id, ret;
2299
2300         ret = mlx5_flow_id_get(priv->qrss_id_pool, &qrss_id);
2301         if (ret)
2302                 return 0;
2303         MLX5_ASSERT(qrss_id);
2304         return qrss_id;
2305 }
2306
2307 /* Free unique ID for the split Q/RSS subflows. */
2308 static void
2309 flow_qrss_free_id(struct rte_eth_dev *dev,  uint32_t qrss_id)
2310 {
2311         struct mlx5_priv *priv = dev->data->dev_private;
2312
2313         if (qrss_id)
2314                 mlx5_flow_id_release(priv->qrss_id_pool, qrss_id);
2315 }
2316
2317 /**
2318  * Release resource related QUEUE/RSS action split.
2319  *
2320  * @param dev
2321  *   Pointer to Ethernet device.
2322  * @param flow
2323  *   Flow to release id's from.
2324  */
2325 static void
2326 flow_mreg_split_qrss_release(struct rte_eth_dev *dev,
2327                              struct rte_flow *flow)
2328 {
2329         struct mlx5_priv *priv = dev->data->dev_private;
2330         uint32_t handle_idx;
2331         struct mlx5_flow_handle *dev_handle;
2332
2333         SILIST_FOREACH(priv->sh->ipool[MLX5_IPOOL_MLX5_FLOW], flow->dev_handles,
2334                        handle_idx, dev_handle, next)
2335                 if (dev_handle->split_flow_id)
2336                         flow_qrss_free_id(dev, dev_handle->split_flow_id);
2337 }
2338
2339 static int
2340 flow_null_validate(struct rte_eth_dev *dev __rte_unused,
2341                    const struct rte_flow_attr *attr __rte_unused,
2342                    const struct rte_flow_item items[] __rte_unused,
2343                    const struct rte_flow_action actions[] __rte_unused,
2344                    bool external __rte_unused,
2345                    struct rte_flow_error *error)
2346 {
2347         return rte_flow_error_set(error, ENOTSUP,
2348                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
2349 }
2350
2351 static struct mlx5_flow *
2352 flow_null_prepare(struct rte_eth_dev *dev __rte_unused,
2353                   const struct rte_flow_attr *attr __rte_unused,
2354                   const struct rte_flow_item items[] __rte_unused,
2355                   const struct rte_flow_action actions[] __rte_unused,
2356                   struct rte_flow_error *error)
2357 {
2358         rte_flow_error_set(error, ENOTSUP,
2359                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
2360         return NULL;
2361 }
2362
2363 static int
2364 flow_null_translate(struct rte_eth_dev *dev __rte_unused,
2365                     struct mlx5_flow *dev_flow __rte_unused,
2366                     const struct rte_flow_attr *attr __rte_unused,
2367                     const struct rte_flow_item items[] __rte_unused,
2368                     const struct rte_flow_action actions[] __rte_unused,
2369                     struct rte_flow_error *error)
2370 {
2371         return rte_flow_error_set(error, ENOTSUP,
2372                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
2373 }
2374
2375 static int
2376 flow_null_apply(struct rte_eth_dev *dev __rte_unused,
2377                 struct rte_flow *flow __rte_unused,
2378                 struct rte_flow_error *error)
2379 {
2380         return rte_flow_error_set(error, ENOTSUP,
2381                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
2382 }
2383
2384 static void
2385 flow_null_remove(struct rte_eth_dev *dev __rte_unused,
2386                  struct rte_flow *flow __rte_unused)
2387 {
2388 }
2389
2390 static void
2391 flow_null_destroy(struct rte_eth_dev *dev __rte_unused,
2392                   struct rte_flow *flow __rte_unused)
2393 {
2394 }
2395
2396 static int
2397 flow_null_query(struct rte_eth_dev *dev __rte_unused,
2398                 struct rte_flow *flow __rte_unused,
2399                 const struct rte_flow_action *actions __rte_unused,
2400                 void *data __rte_unused,
2401                 struct rte_flow_error *error)
2402 {
2403         return rte_flow_error_set(error, ENOTSUP,
2404                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL, NULL);
2405 }
2406
2407 /* Void driver to protect from null pointer reference. */
2408 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops = {
2409         .validate = flow_null_validate,
2410         .prepare = flow_null_prepare,
2411         .translate = flow_null_translate,
2412         .apply = flow_null_apply,
2413         .remove = flow_null_remove,
2414         .destroy = flow_null_destroy,
2415         .query = flow_null_query,
2416 };
2417
2418 /**
2419  * Select flow driver type according to flow attributes and device
2420  * configuration.
2421  *
2422  * @param[in] dev
2423  *   Pointer to the dev structure.
2424  * @param[in] attr
2425  *   Pointer to the flow attributes.
2426  *
2427  * @return
2428  *   flow driver type, MLX5_FLOW_TYPE_MAX otherwise.
2429  */
2430 static enum mlx5_flow_drv_type
2431 flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr)
2432 {
2433         struct mlx5_priv *priv = dev->data->dev_private;
2434         enum mlx5_flow_drv_type type = MLX5_FLOW_TYPE_MAX;
2435
2436         if (attr->transfer && priv->config.dv_esw_en)
2437                 type = MLX5_FLOW_TYPE_DV;
2438         if (!attr->transfer)
2439                 type = priv->config.dv_flow_en ? MLX5_FLOW_TYPE_DV :
2440                                                  MLX5_FLOW_TYPE_VERBS;
2441         return type;
2442 }
2443
2444 #define flow_get_drv_ops(type) flow_drv_ops[type]
2445
2446 /**
2447  * Flow driver validation API. This abstracts calling driver specific functions.
2448  * The type of flow driver is determined according to flow attributes.
2449  *
2450  * @param[in] dev
2451  *   Pointer to the dev structure.
2452  * @param[in] attr
2453  *   Pointer to the flow attributes.
2454  * @param[in] items
2455  *   Pointer to the list of items.
2456  * @param[in] actions
2457  *   Pointer to the list of actions.
2458  * @param[in] external
2459  *   This flow rule is created by request external to PMD.
2460  * @param[out] error
2461  *   Pointer to the error structure.
2462  *
2463  * @return
2464  *   0 on success, a negative errno value otherwise and rte_errno is set.
2465  */
2466 static inline int
2467 flow_drv_validate(struct rte_eth_dev *dev,
2468                   const struct rte_flow_attr *attr,
2469                   const struct rte_flow_item items[],
2470                   const struct rte_flow_action actions[],
2471                   bool external, struct rte_flow_error *error)
2472 {
2473         const struct mlx5_flow_driver_ops *fops;
2474         enum mlx5_flow_drv_type type = flow_get_drv_type(dev, attr);
2475
2476         fops = flow_get_drv_ops(type);
2477         return fops->validate(dev, attr, items, actions, external, error);
2478 }
2479
2480 /**
2481  * Flow driver preparation API. This abstracts calling driver specific
2482  * functions. Parent flow (rte_flow) should have driver type (drv_type). It
2483  * calculates the size of memory required for device flow, allocates the memory,
2484  * initializes the device flow and returns the pointer.
2485  *
2486  * @note
2487  *   This function initializes device flow structure such as dv or verbs in
2488  *   struct mlx5_flow. However, it is caller's responsibility to initialize the
2489  *   rest. For example, adding returning device flow to flow->dev_flow list and
2490  *   setting backward reference to the flow should be done out of this function.
2491  *   layers field is not filled either.
2492  *
2493  * @param[in] dev
2494  *   Pointer to the dev structure.
2495  * @param[in] attr
2496  *   Pointer to the flow attributes.
2497  * @param[in] items
2498  *   Pointer to the list of items.
2499  * @param[in] actions
2500  *   Pointer to the list of actions.
2501  * @param[out] error
2502  *   Pointer to the error structure.
2503  *
2504  * @return
2505  *   Pointer to device flow on success, otherwise NULL and rte_errno is set.
2506  */
2507 static inline struct mlx5_flow *
2508 flow_drv_prepare(struct rte_eth_dev *dev,
2509                  const struct rte_flow *flow,
2510                  const struct rte_flow_attr *attr,
2511                  const struct rte_flow_item items[],
2512                  const struct rte_flow_action actions[],
2513                  struct rte_flow_error *error)
2514 {
2515         const struct mlx5_flow_driver_ops *fops;
2516         enum mlx5_flow_drv_type type = flow->drv_type;
2517
2518         MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
2519         fops = flow_get_drv_ops(type);
2520         return fops->prepare(dev, attr, items, actions, error);
2521 }
2522
2523 /**
2524  * Flow driver translation API. This abstracts calling driver specific
2525  * functions. Parent flow (rte_flow) should have driver type (drv_type). It
2526  * translates a generic flow into a driver flow. flow_drv_prepare() must
2527  * precede.
2528  *
2529  * @note
2530  *   dev_flow->layers could be filled as a result of parsing during translation
2531  *   if needed by flow_drv_apply(). dev_flow->flow->actions can also be filled
2532  *   if necessary. As a flow can have multiple dev_flows by RSS flow expansion,
2533  *   flow->actions could be overwritten even though all the expanded dev_flows
2534  *   have the same actions.
2535  *
2536  * @param[in] dev
2537  *   Pointer to the rte dev structure.
2538  * @param[in, out] dev_flow
2539  *   Pointer to the mlx5 flow.
2540  * @param[in] attr
2541  *   Pointer to the flow attributes.
2542  * @param[in] items
2543  *   Pointer to the list of items.
2544  * @param[in] actions
2545  *   Pointer to the list of actions.
2546  * @param[out] error
2547  *   Pointer to the error structure.
2548  *
2549  * @return
2550  *   0 on success, a negative errno value otherwise and rte_errno is set.
2551  */
2552 static inline int
2553 flow_drv_translate(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow,
2554                    const struct rte_flow_attr *attr,
2555                    const struct rte_flow_item items[],
2556                    const struct rte_flow_action actions[],
2557                    struct rte_flow_error *error)
2558 {
2559         const struct mlx5_flow_driver_ops *fops;
2560         enum mlx5_flow_drv_type type = dev_flow->flow->drv_type;
2561
2562         MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
2563         fops = flow_get_drv_ops(type);
2564         return fops->translate(dev, dev_flow, attr, items, actions, error);
2565 }
2566
2567 /**
2568  * Flow driver apply API. This abstracts calling driver specific functions.
2569  * Parent flow (rte_flow) should have driver type (drv_type). It applies
2570  * translated driver flows on to device. flow_drv_translate() must precede.
2571  *
2572  * @param[in] dev
2573  *   Pointer to Ethernet device structure.
2574  * @param[in, out] flow
2575  *   Pointer to flow structure.
2576  * @param[out] error
2577  *   Pointer to error structure.
2578  *
2579  * @return
2580  *   0 on success, a negative errno value otherwise and rte_errno is set.
2581  */
2582 static inline int
2583 flow_drv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
2584                struct rte_flow_error *error)
2585 {
2586         const struct mlx5_flow_driver_ops *fops;
2587         enum mlx5_flow_drv_type type = flow->drv_type;
2588
2589         MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
2590         fops = flow_get_drv_ops(type);
2591         return fops->apply(dev, flow, error);
2592 }
2593
2594 /**
2595  * Flow driver remove API. This abstracts calling driver specific functions.
2596  * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow
2597  * on device. All the resources of the flow should be freed by calling
2598  * flow_drv_destroy().
2599  *
2600  * @param[in] dev
2601  *   Pointer to Ethernet device.
2602  * @param[in, out] flow
2603  *   Pointer to flow structure.
2604  */
2605 static inline void
2606 flow_drv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
2607 {
2608         const struct mlx5_flow_driver_ops *fops;
2609         enum mlx5_flow_drv_type type = flow->drv_type;
2610
2611         MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
2612         fops = flow_get_drv_ops(type);
2613         fops->remove(dev, flow);
2614 }
2615
2616 /**
2617  * Flow driver destroy API. This abstracts calling driver specific functions.
2618  * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow
2619  * on device and releases resources of the flow.
2620  *
2621  * @param[in] dev
2622  *   Pointer to Ethernet device.
2623  * @param[in, out] flow
2624  *   Pointer to flow structure.
2625  */
2626 static inline void
2627 flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
2628 {
2629         const struct mlx5_flow_driver_ops *fops;
2630         enum mlx5_flow_drv_type type = flow->drv_type;
2631
2632         flow_mreg_split_qrss_release(dev, flow);
2633         MLX5_ASSERT(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
2634         fops = flow_get_drv_ops(type);
2635         fops->destroy(dev, flow);
2636 }
2637
2638 /**
2639  * Validate a flow supported by the NIC.
2640  *
2641  * @see rte_flow_validate()
2642  * @see rte_flow_ops
2643  */
2644 int
2645 mlx5_flow_validate(struct rte_eth_dev *dev,
2646                    const struct rte_flow_attr *attr,
2647                    const struct rte_flow_item items[],
2648                    const struct rte_flow_action actions[],
2649                    struct rte_flow_error *error)
2650 {
2651         int ret;
2652
2653         ret = flow_drv_validate(dev, attr, items, actions, true, error);
2654         if (ret < 0)
2655                 return ret;
2656         return 0;
2657 }
2658
2659 /**
2660  * Get RSS action from the action list.
2661  *
2662  * @param[in] actions
2663  *   Pointer to the list of actions.
2664  *
2665  * @return
2666  *   Pointer to the RSS action if exist, else return NULL.
2667  */
2668 static const struct rte_flow_action_rss*
2669 flow_get_rss_action(const struct rte_flow_action actions[])
2670 {
2671         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2672                 switch (actions->type) {
2673                 case RTE_FLOW_ACTION_TYPE_RSS:
2674                         return (const struct rte_flow_action_rss *)
2675                                actions->conf;
2676                 default:
2677                         break;
2678                 }
2679         }
2680         return NULL;
2681 }
2682
2683 static unsigned int
2684 find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level)
2685 {
2686         const struct rte_flow_item *item;
2687         unsigned int has_vlan = 0;
2688
2689         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
2690                 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2691                         has_vlan = 1;
2692                         break;
2693                 }
2694         }
2695         if (has_vlan)
2696                 return rss_level < 2 ? MLX5_EXPANSION_ROOT_ETH_VLAN :
2697                                        MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN;
2698         return rss_level < 2 ? MLX5_EXPANSION_ROOT :
2699                                MLX5_EXPANSION_ROOT_OUTER;
2700 }
2701
2702 /**
2703  *  Get layer flags from the prefix flow.
2704  *
2705  *  Some flows may be split to several subflows, the prefix subflow gets the
2706  *  match items and the suffix sub flow gets the actions.
2707  *  Some actions need the user defined match item flags to get the detail for
2708  *  the action.
2709  *  This function helps the suffix flow to get the item layer flags from prefix
2710  *  subflow.
2711  *
2712  * @param[in] dev_flow
2713  *   Pointer the created preifx subflow.
2714  *
2715  * @return
2716  *   The layers get from prefix subflow.
2717  */
2718 static inline uint64_t
2719 flow_get_prefix_layer_flags(struct mlx5_flow *dev_flow)
2720 {
2721         uint64_t layers = 0;
2722
2723         /*
2724          * Layers bits could be localization, but usually the compiler will
2725          * help to do the optimization work for source code.
2726          * If no decap actions, use the layers directly.
2727          */
2728         if (!(dev_flow->act_flags & MLX5_FLOW_ACTION_DECAP))
2729                 return dev_flow->handle->layers;
2730         /* Convert L3 layers with decap action. */
2731         if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV4)
2732                 layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV4;
2733         else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L3_IPV6)
2734                 layers |= MLX5_FLOW_LAYER_OUTER_L3_IPV6;
2735         /* Convert L4 layers with decap action.  */
2736         if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_TCP)
2737                 layers |= MLX5_FLOW_LAYER_OUTER_L4_TCP;
2738         else if (dev_flow->handle->layers & MLX5_FLOW_LAYER_INNER_L4_UDP)
2739                 layers |= MLX5_FLOW_LAYER_OUTER_L4_UDP;
2740         return layers;
2741 }
2742
2743 /**
2744  * Get metadata split action information.
2745  *
2746  * @param[in] actions
2747  *   Pointer to the list of actions.
2748  * @param[out] qrss
2749  *   Pointer to the return pointer.
2750  * @param[out] qrss_type
2751  *   Pointer to the action type to return. RTE_FLOW_ACTION_TYPE_END is returned
2752  *   if no QUEUE/RSS is found.
2753  * @param[out] encap_idx
2754  *   Pointer to the index of the encap action if exists, otherwise the last
2755  *   action index.
2756  *
2757  * @return
2758  *   Total number of actions.
2759  */
2760 static int
2761 flow_parse_metadata_split_actions_info(const struct rte_flow_action actions[],
2762                                        const struct rte_flow_action **qrss,
2763                                        int *encap_idx)
2764 {
2765         const struct rte_flow_action_raw_encap *raw_encap;
2766         int actions_n = 0;
2767         int raw_decap_idx = -1;
2768
2769         *encap_idx = -1;
2770         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2771                 switch (actions->type) {
2772                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
2773                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
2774                         *encap_idx = actions_n;
2775                         break;
2776                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
2777                         raw_decap_idx = actions_n;
2778                         break;
2779                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
2780                         raw_encap = actions->conf;
2781                         if (raw_encap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
2782                                 *encap_idx = raw_decap_idx != -1 ?
2783                                                       raw_decap_idx : actions_n;
2784                         break;
2785                 case RTE_FLOW_ACTION_TYPE_QUEUE:
2786                 case RTE_FLOW_ACTION_TYPE_RSS:
2787                         *qrss = actions;
2788                         break;
2789                 default:
2790                         break;
2791                 }
2792                 actions_n++;
2793         }
2794         if (*encap_idx == -1)
2795                 *encap_idx = actions_n;
2796         /* Count RTE_FLOW_ACTION_TYPE_END. */
2797         return actions_n + 1;
2798 }
2799
2800 /**
2801  * Check meter action from the action list.
2802  *
2803  * @param[in] actions
2804  *   Pointer to the list of actions.
2805  * @param[out] mtr
2806  *   Pointer to the meter exist flag.
2807  *
2808  * @return
2809  *   Total number of actions.
2810  */
2811 static int
2812 flow_check_meter_action(const struct rte_flow_action actions[], uint32_t *mtr)
2813 {
2814         int actions_n = 0;
2815
2816         MLX5_ASSERT(mtr);
2817         *mtr = 0;
2818         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2819                 switch (actions->type) {
2820                 case RTE_FLOW_ACTION_TYPE_METER:
2821                         *mtr = 1;
2822                         break;
2823                 default:
2824                         break;
2825                 }
2826                 actions_n++;
2827         }
2828         /* Count RTE_FLOW_ACTION_TYPE_END. */
2829         return actions_n + 1;
2830 }
2831
2832 /**
2833  * Check if the flow should be splited due to hairpin.
2834  * The reason for the split is that in current HW we can't
2835  * support encap on Rx, so if a flow have encap we move it
2836  * to Tx.
2837  *
2838  * @param dev
2839  *   Pointer to Ethernet device.
2840  * @param[in] attr
2841  *   Flow rule attributes.
2842  * @param[in] actions
2843  *   Associated actions (list terminated by the END action).
2844  *
2845  * @return
2846  *   > 0 the number of actions and the flow should be split,
2847  *   0 when no split required.
2848  */
2849 static int
2850 flow_check_hairpin_split(struct rte_eth_dev *dev,
2851                          const struct rte_flow_attr *attr,
2852                          const struct rte_flow_action actions[])
2853 {
2854         int queue_action = 0;
2855         int action_n = 0;
2856         int encap = 0;
2857         const struct rte_flow_action_queue *queue;
2858         const struct rte_flow_action_rss *rss;
2859         const struct rte_flow_action_raw_encap *raw_encap;
2860
2861         if (!attr->ingress)
2862                 return 0;
2863         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2864                 switch (actions->type) {
2865                 case RTE_FLOW_ACTION_TYPE_QUEUE:
2866                         queue = actions->conf;
2867                         if (queue == NULL)
2868                                 return 0;
2869                         if (mlx5_rxq_get_type(dev, queue->index) !=
2870                             MLX5_RXQ_TYPE_HAIRPIN)
2871                                 return 0;
2872                         queue_action = 1;
2873                         action_n++;
2874                         break;
2875                 case RTE_FLOW_ACTION_TYPE_RSS:
2876                         rss = actions->conf;
2877                         if (rss == NULL || rss->queue_num == 0)
2878                                 return 0;
2879                         if (mlx5_rxq_get_type(dev, rss->queue[0]) !=
2880                             MLX5_RXQ_TYPE_HAIRPIN)
2881                                 return 0;
2882                         queue_action = 1;
2883                         action_n++;
2884                         break;
2885                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
2886                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
2887                         encap = 1;
2888                         action_n++;
2889                         break;
2890                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
2891                         raw_encap = actions->conf;
2892                         if (raw_encap->size >
2893                             (sizeof(struct rte_flow_item_eth) +
2894                              sizeof(struct rte_flow_item_ipv4)))
2895                                 encap = 1;
2896                         action_n++;
2897                         break;
2898                 default:
2899                         action_n++;
2900                         break;
2901                 }
2902         }
2903         if (encap == 1 && queue_action)
2904                 return action_n;
2905         return 0;
2906 }
2907
2908 /* Declare flow create/destroy prototype in advance. */
2909 static struct rte_flow *
2910 flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list,
2911                  const struct rte_flow_attr *attr,
2912                  const struct rte_flow_item items[],
2913                  const struct rte_flow_action actions[],
2914                  bool external, struct rte_flow_error *error);
2915
2916 static void
2917 flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
2918                   struct rte_flow *flow);
2919
2920 /**
2921  * Add a flow of copying flow metadata registers in RX_CP_TBL.
2922  *
2923  * As mark_id is unique, if there's already a registered flow for the mark_id,
2924  * return by increasing the reference counter of the resource. Otherwise, create
2925  * the resource (mcp_res) and flow.
2926  *
2927  * Flow looks like,
2928  *   - If ingress port is ANY and reg_c[1] is mark_id,
2929  *     flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL.
2930  *
2931  * For default flow (zero mark_id), flow is like,
2932  *   - If ingress port is ANY,
2933  *     reg_b := reg_c[0] and jump to RX_ACT_TBL.
2934  *
2935  * @param dev
2936  *   Pointer to Ethernet device.
2937  * @param mark_id
2938  *   ID of MARK action, zero means default flow for META.
2939  * @param[out] error
2940  *   Perform verbose error reporting if not NULL.
2941  *
2942  * @return
2943  *   Associated resource on success, NULL otherwise and rte_errno is set.
2944  */
2945 static struct mlx5_flow_mreg_copy_resource *
2946 flow_mreg_add_copy_action(struct rte_eth_dev *dev, uint32_t mark_id,
2947                           struct rte_flow_error *error)
2948 {
2949         struct mlx5_priv *priv = dev->data->dev_private;
2950         struct rte_flow_attr attr = {
2951                 .group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
2952                 .ingress = 1,
2953         };
2954         struct mlx5_rte_flow_item_tag tag_spec = {
2955                 .data = mark_id,
2956         };
2957         struct rte_flow_item items[] = {
2958                 [1] = { .type = RTE_FLOW_ITEM_TYPE_END, },
2959         };
2960         struct rte_flow_action_mark ftag = {
2961                 .id = mark_id,
2962         };
2963         struct mlx5_flow_action_copy_mreg cp_mreg = {
2964                 .dst = REG_B,
2965                 .src = 0,
2966         };
2967         struct rte_flow_action_jump jump = {
2968                 .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP,
2969         };
2970         struct rte_flow_action actions[] = {
2971                 [3] = { .type = RTE_FLOW_ACTION_TYPE_END, },
2972         };
2973         struct mlx5_flow_mreg_copy_resource *mcp_res;
2974         uint32_t idx = 0;
2975         int ret;
2976
2977         /* Fill the register fileds in the flow. */
2978         ret = mlx5_flow_get_reg_id(dev, MLX5_FLOW_MARK, 0, error);
2979         if (ret < 0)
2980                 return NULL;
2981         tag_spec.id = ret;
2982         ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error);
2983         if (ret < 0)
2984                 return NULL;
2985         cp_mreg.src = ret;
2986         /* Check if already registered. */
2987         MLX5_ASSERT(priv->mreg_cp_tbl);
2988         mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl, mark_id);
2989         if (mcp_res) {
2990                 /* For non-default rule. */
2991                 if (mark_id != MLX5_DEFAULT_COPY_ID)
2992                         mcp_res->refcnt++;
2993                 MLX5_ASSERT(mark_id != MLX5_DEFAULT_COPY_ID ||
2994                             mcp_res->refcnt == 1);
2995                 return mcp_res;
2996         }
2997         /* Provide the full width of FLAG specific value. */
2998         if (mark_id == (priv->sh->dv_regc0_mask & MLX5_FLOW_MARK_DEFAULT))
2999                 tag_spec.data = MLX5_FLOW_MARK_DEFAULT;
3000         /* Build a new flow. */
3001         if (mark_id != MLX5_DEFAULT_COPY_ID) {
3002                 items[0] = (struct rte_flow_item){
3003                         .type = MLX5_RTE_FLOW_ITEM_TYPE_TAG,
3004                         .spec = &tag_spec,
3005                 };
3006                 items[1] = (struct rte_flow_item){
3007                         .type = RTE_FLOW_ITEM_TYPE_END,
3008                 };
3009                 actions[0] = (struct rte_flow_action){
3010                         .type = MLX5_RTE_FLOW_ACTION_TYPE_MARK,
3011                         .conf = &ftag,
3012                 };
3013                 actions[1] = (struct rte_flow_action){
3014                         .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
3015                         .conf = &cp_mreg,
3016                 };
3017                 actions[2] = (struct rte_flow_action){
3018                         .type = RTE_FLOW_ACTION_TYPE_JUMP,
3019                         .conf = &jump,
3020                 };
3021                 actions[3] = (struct rte_flow_action){
3022                         .type = RTE_FLOW_ACTION_TYPE_END,
3023                 };
3024         } else {
3025                 /* Default rule, wildcard match. */
3026                 attr.priority = MLX5_FLOW_PRIO_RSVD;
3027                 items[0] = (struct rte_flow_item){
3028                         .type = RTE_FLOW_ITEM_TYPE_END,
3029                 };
3030                 actions[0] = (struct rte_flow_action){
3031                         .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
3032                         .conf = &cp_mreg,
3033                 };
3034                 actions[1] = (struct rte_flow_action){
3035                         .type = RTE_FLOW_ACTION_TYPE_JUMP,
3036                         .conf = &jump,
3037                 };
3038                 actions[2] = (struct rte_flow_action){
3039                         .type = RTE_FLOW_ACTION_TYPE_END,
3040                 };
3041         }
3042         /* Build a new entry. */
3043         mcp_res = mlx5_ipool_zmalloc(priv->sh->ipool[MLX5_IPOOL_MCP], &idx);
3044         if (!mcp_res) {
3045                 rte_errno = ENOMEM;
3046                 return NULL;
3047         }
3048         mcp_res->idx = idx;
3049         /*
3050          * The copy Flows are not included in any list. There
3051          * ones are referenced from other Flows and can not
3052          * be applied, removed, deleted in ardbitrary order
3053          * by list traversing.
3054          */
3055         mcp_res->flow = flow_list_create(dev, NULL, &attr, items,
3056                                          actions, false, error);
3057         if (!mcp_res->flow)
3058                 goto error;
3059         mcp_res->refcnt++;
3060         mcp_res->hlist_ent.key = mark_id;
3061         ret = mlx5_hlist_insert(priv->mreg_cp_tbl,
3062                                 &mcp_res->hlist_ent);
3063         MLX5_ASSERT(!ret);
3064         if (ret)
3065                 goto error;
3066         return mcp_res;
3067 error:
3068         if (mcp_res->flow)
3069                 flow_list_destroy(dev, NULL, mcp_res->flow);
3070         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx);
3071         return NULL;
3072 }
3073
3074 /**
3075  * Release flow in RX_CP_TBL.
3076  *
3077  * @param dev
3078  *   Pointer to Ethernet device.
3079  * @flow
3080  *   Parent flow for wich copying is provided.
3081  */
3082 static void
3083 flow_mreg_del_copy_action(struct rte_eth_dev *dev,
3084                           struct rte_flow *flow)
3085 {
3086         struct mlx5_flow_mreg_copy_resource *mcp_res;
3087         struct mlx5_priv *priv = dev->data->dev_private;
3088
3089         if (!flow->rix_mreg_copy)
3090                 return;
3091         mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP],
3092                                  flow->rix_mreg_copy);
3093         if (!mcp_res || !priv->mreg_cp_tbl)
3094                 return;
3095         if (flow->copy_applied) {
3096                 MLX5_ASSERT(mcp_res->appcnt);
3097                 flow->copy_applied = 0;
3098                 --mcp_res->appcnt;
3099                 if (!mcp_res->appcnt)
3100                         flow_drv_remove(dev, mcp_res->flow);
3101         }
3102         /*
3103          * We do not check availability of metadata registers here,
3104          * because copy resources are not allocated in this case.
3105          */
3106         if (--mcp_res->refcnt)
3107                 return;
3108         MLX5_ASSERT(mcp_res->flow);
3109         flow_list_destroy(dev, NULL, mcp_res->flow);
3110         mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent);
3111         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx);
3112         flow->rix_mreg_copy = 0;
3113 }
3114
3115 /**
3116  * Start flow in RX_CP_TBL.
3117  *
3118  * @param dev
3119  *   Pointer to Ethernet device.
3120  * @flow
3121  *   Parent flow for wich copying is provided.
3122  *
3123  * @return
3124  *   0 on success, a negative errno value otherwise and rte_errno is set.
3125  */
3126 static int
3127 flow_mreg_start_copy_action(struct rte_eth_dev *dev,
3128                             struct rte_flow *flow)
3129 {
3130         struct mlx5_flow_mreg_copy_resource *mcp_res;
3131         struct mlx5_priv *priv = dev->data->dev_private;
3132         int ret;
3133
3134         if (!flow->rix_mreg_copy || flow->copy_applied)
3135                 return 0;
3136         mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP],
3137                                  flow->rix_mreg_copy);
3138         if (!mcp_res)
3139                 return 0;
3140         if (!mcp_res->appcnt) {
3141                 ret = flow_drv_apply(dev, mcp_res->flow, NULL);
3142                 if (ret)
3143                         return ret;
3144         }
3145         ++mcp_res->appcnt;
3146         flow->copy_applied = 1;
3147         return 0;
3148 }
3149
3150 /**
3151  * Stop flow in RX_CP_TBL.
3152  *
3153  * @param dev
3154  *   Pointer to Ethernet device.
3155  * @flow
3156  *   Parent flow for wich copying is provided.
3157  */
3158 static void
3159 flow_mreg_stop_copy_action(struct rte_eth_dev *dev,
3160                            struct rte_flow *flow)
3161 {
3162         struct mlx5_flow_mreg_copy_resource *mcp_res;
3163         struct mlx5_priv *priv = dev->data->dev_private;
3164
3165         if (!flow->rix_mreg_copy || !flow->copy_applied)
3166                 return;
3167         mcp_res = mlx5_ipool_get(priv->sh->ipool[MLX5_IPOOL_MCP],
3168                                  flow->rix_mreg_copy);
3169         if (!mcp_res)
3170                 return;
3171         MLX5_ASSERT(mcp_res->appcnt);
3172         --mcp_res->appcnt;
3173         flow->copy_applied = 0;
3174         if (!mcp_res->appcnt)
3175                 flow_drv_remove(dev, mcp_res->flow);
3176 }
3177
3178 /**
3179  * Remove the default copy action from RX_CP_TBL.
3180  *
3181  * @param dev
3182  *   Pointer to Ethernet device.
3183  */
3184 static void
3185 flow_mreg_del_default_copy_action(struct rte_eth_dev *dev)
3186 {
3187         struct mlx5_flow_mreg_copy_resource *mcp_res;
3188         struct mlx5_priv *priv = dev->data->dev_private;
3189
3190         /* Check if default flow is registered. */
3191         if (!priv->mreg_cp_tbl)
3192                 return;
3193         mcp_res = (void *)mlx5_hlist_lookup(priv->mreg_cp_tbl,
3194                                             MLX5_DEFAULT_COPY_ID);
3195         if (!mcp_res)
3196                 return;
3197         MLX5_ASSERT(mcp_res->flow);
3198         flow_list_destroy(dev, NULL, mcp_res->flow);
3199         mlx5_hlist_remove(priv->mreg_cp_tbl, &mcp_res->hlist_ent);
3200         mlx5_ipool_free(priv->sh->ipool[MLX5_IPOOL_MCP], mcp_res->idx);
3201 }
3202
3203 /**
3204  * Add the default copy action in in RX_CP_TBL.
3205  *
3206  * @param dev
3207  *   Pointer to Ethernet device.
3208  * @param[out] error
3209  *   Perform verbose error reporting if not NULL.
3210  *
3211  * @return
3212  *   0 for success, negative value otherwise and rte_errno is set.
3213  */
3214 static int
3215 flow_mreg_add_default_copy_action(struct rte_eth_dev *dev,
3216                                   struct rte_flow_error *error)
3217 {
3218         struct mlx5_priv *priv = dev->data->dev_private;
3219         struct mlx5_flow_mreg_copy_resource *mcp_res;
3220
3221         /* Check whether extensive metadata feature is engaged. */
3222         if (!priv->config.dv_flow_en ||
3223             priv->config.dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
3224             !mlx5_flow_ext_mreg_supported(dev) ||
3225             !priv->sh->dv_regc0_mask)
3226                 return 0;
3227         mcp_res = flow_mreg_add_copy_action(dev, MLX5_DEFAULT_COPY_ID, error);
3228         if (!mcp_res)
3229                 return -rte_errno;
3230         return 0;
3231 }
3232
3233 /**
3234  * Add a flow of copying flow metadata registers in RX_CP_TBL.
3235  *
3236  * All the flow having Q/RSS action should be split by
3237  * flow_mreg_split_qrss_prep() to pass by RX_CP_TBL. A flow in the RX_CP_TBL
3238  * performs the following,
3239  *   - CQE->flow_tag := reg_c[1] (MARK)
3240  *   - CQE->flow_table_metadata (reg_b) := reg_c[0] (META)
3241  * As CQE's flow_tag is not a register, it can't be simply copied from reg_c[1]
3242  * but there should be a flow per each MARK ID set by MARK action.
3243  *
3244  * For the aforementioned reason, if there's a MARK action in flow's action
3245  * list, a corresponding flow should be added to the RX_CP_TBL in order to copy
3246  * the MARK ID to CQE's flow_tag like,
3247  *   - If reg_c[1] is mark_id,
3248  *     flow_tag := mark_id, reg_b := reg_c[0] and jump to RX_ACT_TBL.
3249  *
3250  * For SET_META action which stores value in reg_c[0], as the destination is
3251  * also a flow metadata register (reg_b), adding a default flow is enough. Zero
3252  * MARK ID means the default flow. The default flow looks like,
3253  *   - For all flow, reg_b := reg_c[0] and jump to RX_ACT_TBL.
3254  *
3255  * @param dev
3256  *   Pointer to Ethernet device.
3257  * @param flow
3258  *   Pointer to flow structure.
3259  * @param[in] actions
3260  *   Pointer to the list of actions.
3261  * @param[out] error
3262  *   Perform verbose error reporting if not NULL.
3263  *
3264  * @return
3265  *   0 on success, negative value otherwise and rte_errno is set.
3266  */
3267 static int
3268 flow_mreg_update_copy_table(struct rte_eth_dev *dev,
3269                             struct rte_flow *flow,
3270                             const struct rte_flow_action *actions,
3271                             struct rte_flow_error *error)
3272 {
3273         struct mlx5_priv *priv = dev->data->dev_private;
3274         struct mlx5_dev_config *config = &priv->config;
3275         struct mlx5_flow_mreg_copy_resource *mcp_res;
3276         const struct rte_flow_action_mark *mark;
3277
3278         /* Check whether extensive metadata feature is engaged. */
3279         if (!config->dv_flow_en ||
3280             config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
3281             !mlx5_flow_ext_mreg_supported(dev) ||
3282             !priv->sh->dv_regc0_mask)
3283                 return 0;
3284         /* Find MARK action. */
3285         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3286                 switch (actions->type) {
3287                 case RTE_FLOW_ACTION_TYPE_FLAG:
3288                         mcp_res = flow_mreg_add_copy_action
3289                                 (dev, MLX5_FLOW_MARK_DEFAULT, error);
3290                         if (!mcp_res)
3291                                 return -rte_errno;
3292                         flow->rix_mreg_copy = mcp_res->idx;
3293                         if (dev->data->dev_started) {
3294                                 mcp_res->appcnt++;
3295                                 flow->copy_applied = 1;
3296                         }
3297                         return 0;
3298                 case RTE_FLOW_ACTION_TYPE_MARK:
3299                         mark = (const struct rte_flow_action_mark *)
3300                                 actions->conf;
3301                         mcp_res =
3302                                 flow_mreg_add_copy_action(dev, mark->id, error);
3303                         if (!mcp_res)
3304                                 return -rte_errno;
3305                         flow->rix_mreg_copy = mcp_res->idx;
3306                         if (dev->data->dev_started) {
3307                                 mcp_res->appcnt++;
3308                                 flow->copy_applied = 1;
3309                         }
3310                         return 0;
3311                 default:
3312                         break;
3313                 }
3314         }
3315         return 0;
3316 }
3317
3318 #define MLX5_MAX_SPLIT_ACTIONS 24
3319 #define MLX5_MAX_SPLIT_ITEMS 24
3320
3321 /**
3322  * Split the hairpin flow.
3323  * Since HW can't support encap on Rx we move the encap to Tx.
3324  * If the count action is after the encap then we also
3325  * move the count action. in this case the count will also measure
3326  * the outer bytes.
3327  *
3328  * @param dev
3329  *   Pointer to Ethernet device.
3330  * @param[in] actions
3331  *   Associated actions (list terminated by the END action).
3332  * @param[out] actions_rx
3333  *   Rx flow actions.
3334  * @param[out] actions_tx
3335  *   Tx flow actions..
3336  * @param[out] pattern_tx
3337  *   The pattern items for the Tx flow.
3338  * @param[out] flow_id
3339  *   The flow ID connected to this flow.
3340  *
3341  * @return
3342  *   0 on success.
3343  */
3344 static int
3345 flow_hairpin_split(struct rte_eth_dev *dev,
3346                    const struct rte_flow_action actions[],
3347                    struct rte_flow_action actions_rx[],
3348                    struct rte_flow_action actions_tx[],
3349                    struct rte_flow_item pattern_tx[],
3350                    uint32_t *flow_id)
3351 {
3352         struct mlx5_priv *priv = dev->data->dev_private;
3353         const struct rte_flow_action_raw_encap *raw_encap;
3354         const struct rte_flow_action_raw_decap *raw_decap;
3355         struct mlx5_rte_flow_action_set_tag *set_tag;
3356         struct rte_flow_action *tag_action;
3357         struct mlx5_rte_flow_item_tag *tag_item;
3358         struct rte_flow_item *item;
3359         char *addr;
3360         int encap = 0;
3361
3362         mlx5_flow_id_get(priv->sh->flow_id_pool, flow_id);
3363         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3364                 switch (actions->type) {
3365                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3366                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3367                         rte_memcpy(actions_tx, actions,
3368                                sizeof(struct rte_flow_action));
3369                         actions_tx++;
3370                         break;
3371                 case RTE_FLOW_ACTION_TYPE_COUNT:
3372                         if (encap) {
3373                                 rte_memcpy(actions_tx, actions,
3374                                            sizeof(struct rte_flow_action));
3375                                 actions_tx++;
3376                         } else {
3377                                 rte_memcpy(actions_rx, actions,
3378                                            sizeof(struct rte_flow_action));
3379                                 actions_rx++;
3380                         }
3381                         break;
3382                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3383                         raw_encap = actions->conf;
3384                         if (raw_encap->size >
3385                             (sizeof(struct rte_flow_item_eth) +
3386                              sizeof(struct rte_flow_item_ipv4))) {
3387                                 memcpy(actions_tx, actions,
3388                                        sizeof(struct rte_flow_action));
3389                                 actions_tx++;
3390                                 encap = 1;
3391                         } else {
3392                                 rte_memcpy(actions_rx, actions,
3393                                            sizeof(struct rte_flow_action));
3394                                 actions_rx++;
3395                         }
3396                         break;
3397                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
3398                         raw_decap = actions->conf;
3399                         if (raw_decap->size <
3400                             (sizeof(struct rte_flow_item_eth) +
3401                              sizeof(struct rte_flow_item_ipv4))) {
3402                                 memcpy(actions_tx, actions,
3403                                        sizeof(struct rte_flow_action));
3404                                 actions_tx++;
3405                         } else {
3406                                 rte_memcpy(actions_rx, actions,
3407                                            sizeof(struct rte_flow_action));
3408                                 actions_rx++;
3409                         }
3410                         break;
3411                 default:
3412                         rte_memcpy(actions_rx, actions,
3413                                    sizeof(struct rte_flow_action));
3414                         actions_rx++;
3415                         break;
3416                 }
3417         }
3418         /* Add set meta action and end action for the Rx flow. */
3419         tag_action = actions_rx;
3420         tag_action->type = MLX5_RTE_FLOW_ACTION_TYPE_TAG;
3421         actions_rx++;
3422         rte_memcpy(actions_rx, actions, sizeof(struct rte_flow_action));
3423         actions_rx++;
3424         set_tag = (void *)actions_rx;
3425         set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_RX, 0, NULL);
3426         MLX5_ASSERT(set_tag->id > REG_NONE);
3427         set_tag->data = *flow_id;
3428         tag_action->conf = set_tag;
3429         /* Create Tx item list. */
3430         rte_memcpy(actions_tx, actions, sizeof(struct rte_flow_action));
3431         addr = (void *)&pattern_tx[2];
3432         item = pattern_tx;
3433         item->type = MLX5_RTE_FLOW_ITEM_TYPE_TAG;
3434         tag_item = (void *)addr;
3435         tag_item->data = *flow_id;
3436         tag_item->id = mlx5_flow_get_reg_id(dev, MLX5_HAIRPIN_TX, 0, NULL);
3437         MLX5_ASSERT(set_tag->id > REG_NONE);
3438         item->spec = tag_item;
3439         addr += sizeof(struct mlx5_rte_flow_item_tag);
3440         tag_item = (void *)addr;
3441         tag_item->data = UINT32_MAX;
3442         tag_item->id = UINT16_MAX;
3443         item->mask = tag_item;
3444         addr += sizeof(struct mlx5_rte_flow_item_tag);
3445         item->last = NULL;
3446         item++;
3447         item->type = RTE_FLOW_ITEM_TYPE_END;
3448         return 0;
3449 }
3450
3451 /**
3452  * The last stage of splitting chain, just creates the subflow
3453  * without any modification.
3454  *
3455  * @param[in] dev
3456  *   Pointer to Ethernet device.
3457  * @param[in] flow
3458  *   Parent flow structure pointer.
3459  * @param[in, out] sub_flow
3460  *   Pointer to return the created subflow, may be NULL.
3461  * @param[in] prefix_layers
3462  *   Prefix subflow layers, may be 0.
3463  * @param[in] attr
3464  *   Flow rule attributes.
3465  * @param[in] items
3466  *   Pattern specification (list terminated by the END pattern item).
3467  * @param[in] actions
3468  *   Associated actions (list terminated by the END action).
3469  * @param[in] external
3470  *   This flow rule is created by request external to PMD.
3471  * @param[out] error
3472  *   Perform verbose error reporting if not NULL.
3473  * @return
3474  *   0 on success, negative value otherwise
3475  */
3476 static int
3477 flow_create_split_inner(struct rte_eth_dev *dev,
3478                         struct rte_flow *flow,
3479                         struct mlx5_flow **sub_flow,
3480                         uint64_t prefix_layers,
3481                         const struct rte_flow_attr *attr,
3482                         const struct rte_flow_item items[],
3483                         const struct rte_flow_action actions[],
3484                         bool external, struct rte_flow_error *error)
3485 {
3486         struct mlx5_flow *dev_flow;
3487
3488         dev_flow = flow_drv_prepare(dev, flow, attr, items, actions, error);
3489         if (!dev_flow)
3490                 return -rte_errno;
3491         dev_flow->flow = flow;
3492         dev_flow->external = external;
3493         /* Subflow object was created, we must include one in the list. */
3494         SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
3495                       dev_flow->handle, next);
3496         /*
3497          * If dev_flow is as one of the suffix flow, some actions in suffix
3498          * flow may need some user defined item layer flags.
3499          */
3500         if (prefix_layers)
3501                 dev_flow->handle->layers = prefix_layers;
3502         if (sub_flow)
3503                 *sub_flow = dev_flow;
3504         return flow_drv_translate(dev, dev_flow, attr, items, actions, error);
3505 }
3506
3507 /**
3508  * Split the meter flow.
3509  *
3510  * As meter flow will split to three sub flow, other than meter
3511  * action, the other actions make sense to only meter accepts
3512  * the packet. If it need to be dropped, no other additional
3513  * actions should be take.
3514  *
3515  * One kind of special action which decapsulates the L3 tunnel
3516  * header will be in the prefix sub flow, as not to take the
3517  * L3 tunnel header into account.
3518  *
3519  * @param dev
3520  *   Pointer to Ethernet device.
3521  * @param[in] items
3522  *   Pattern specification (list terminated by the END pattern item).
3523  * @param[out] sfx_items
3524  *   Suffix flow match items (list terminated by the END pattern item).
3525  * @param[in] actions
3526  *   Associated actions (list terminated by the END action).
3527  * @param[out] actions_sfx
3528  *   Suffix flow actions.
3529  * @param[out] actions_pre
3530  *   Prefix flow actions.
3531  * @param[out] pattern_sfx
3532  *   The pattern items for the suffix flow.
3533  * @param[out] tag_sfx
3534  *   Pointer to suffix flow tag.
3535  *
3536  * @return
3537  *   0 on success.
3538  */
3539 static int
3540 flow_meter_split_prep(struct rte_eth_dev *dev,
3541                  const struct rte_flow_item items[],
3542                  struct rte_flow_item sfx_items[],
3543                  const struct rte_flow_action actions[],
3544                  struct rte_flow_action actions_sfx[],
3545                  struct rte_flow_action actions_pre[])
3546 {
3547         struct rte_flow_action *tag_action = NULL;
3548         struct rte_flow_item *tag_item;
3549         struct mlx5_rte_flow_action_set_tag *set_tag;
3550         struct rte_flow_error error;
3551         const struct rte_flow_action_raw_encap *raw_encap;
3552         const struct rte_flow_action_raw_decap *raw_decap;
3553         struct mlx5_rte_flow_item_tag *tag_spec;
3554         struct mlx5_rte_flow_item_tag *tag_mask;
3555         uint32_t tag_id;
3556         bool copy_vlan = false;
3557
3558         /* Prepare the actions for prefix and suffix flow. */
3559         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3560                 struct rte_flow_action **action_cur = NULL;
3561
3562                 switch (actions->type) {
3563                 case RTE_FLOW_ACTION_TYPE_METER:
3564                         /* Add the extra tag action first. */
3565                         tag_action = actions_pre;
3566                         tag_action->type = MLX5_RTE_FLOW_ACTION_TYPE_TAG;
3567                         actions_pre++;
3568                         action_cur = &actions_pre;
3569                         break;
3570                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
3571                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
3572                         action_cur = &actions_pre;
3573                         break;
3574                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3575                         raw_encap = actions->conf;
3576                         if (raw_encap->size < MLX5_ENCAPSULATION_DECISION_SIZE)
3577                                 action_cur = &actions_pre;
3578                         break;
3579                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
3580                         raw_decap = actions->conf;
3581                         if (raw_decap->size > MLX5_ENCAPSULATION_DECISION_SIZE)
3582                                 action_cur = &actions_pre;
3583                         break;
3584                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
3585                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
3586                         copy_vlan = true;
3587                         break;
3588                 default:
3589                         break;
3590                 }
3591                 if (!action_cur)
3592                         action_cur = &actions_sfx;
3593                 memcpy(*action_cur, actions, sizeof(struct rte_flow_action));
3594                 (*action_cur)++;
3595         }
3596         /* Add end action to the actions. */
3597         actions_sfx->type = RTE_FLOW_ACTION_TYPE_END;
3598         actions_pre->type = RTE_FLOW_ACTION_TYPE_END;
3599         actions_pre++;
3600         /* Set the tag. */
3601         set_tag = (void *)actions_pre;
3602         set_tag->id = mlx5_flow_get_reg_id(dev, MLX5_MTR_SFX, 0, &error);
3603         /*
3604          * Get the id from the qrss_pool to make qrss share the id with meter.
3605          */
3606         tag_id = flow_qrss_get_id(dev);
3607         set_tag->data = tag_id << MLX5_MTR_COLOR_BITS;
3608         assert(tag_action);
3609         tag_action->conf = set_tag;
3610         /* Prepare the suffix subflow items. */
3611         tag_item = sfx_items++;
3612         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3613                 int item_type = items->type;
3614
3615                 switch (item_type) {
3616                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
3617                         memcpy(sfx_items, items, sizeof(*sfx_items));
3618                         sfx_items++;
3619                         break;
3620                 case RTE_FLOW_ITEM_TYPE_VLAN:
3621                         if (copy_vlan) {
3622                                 memcpy(sfx_items, items, sizeof(*sfx_items));
3623                                 /*
3624                                  * Convert to internal match item, it is used
3625                                  * for vlan push and set vid.
3626                                  */
3627                                 sfx_items->type = MLX5_RTE_FLOW_ITEM_TYPE_VLAN;
3628                                 sfx_items++;
3629                         }
3630                         break;
3631                 default:
3632                         break;
3633                 }
3634         }
3635         sfx_items->type = RTE_FLOW_ITEM_TYPE_END;
3636         sfx_items++;
3637         tag_spec = (struct mlx5_rte_flow_item_tag *)sfx_items;
3638         tag_spec->data = tag_id << MLX5_MTR_COLOR_BITS;
3639         tag_spec->id = mlx5_flow_get_reg_id(dev, MLX5_MTR_SFX, 0, &error);
3640         tag_mask = tag_spec + 1;
3641         tag_mask->data = 0xffffff00;
3642         tag_item->type = MLX5_RTE_FLOW_ITEM_TYPE_TAG;
3643         tag_item->spec = tag_spec;
3644         tag_item->last = NULL;
3645         tag_item->mask = tag_mask;
3646         return tag_id;
3647 }
3648
3649 /**
3650  * Split action list having QUEUE/RSS for metadata register copy.
3651  *
3652  * Once Q/RSS action is detected in user's action list, the flow action
3653  * should be split in order to copy metadata registers, which will happen in
3654  * RX_CP_TBL like,
3655  *   - CQE->flow_tag := reg_c[1] (MARK)
3656  *   - CQE->flow_table_metadata (reg_b) := reg_c[0] (META)
3657  * The Q/RSS action will be performed on RX_ACT_TBL after passing by RX_CP_TBL.
3658  * This is because the last action of each flow must be a terminal action
3659  * (QUEUE, RSS or DROP).
3660  *
3661  * Flow ID must be allocated to identify actions in the RX_ACT_TBL and it is
3662  * stored and kept in the mlx5_flow structure per each sub_flow.
3663  *
3664  * The Q/RSS action is replaced with,
3665  *   - SET_TAG, setting the allocated flow ID to reg_c[2].
3666  * And the following JUMP action is added at the end,
3667  *   - JUMP, to RX_CP_TBL.
3668  *
3669  * A flow to perform remained Q/RSS action will be created in RX_ACT_TBL by
3670  * flow_create_split_metadata() routine. The flow will look like,
3671  *   - If flow ID matches (reg_c[2]), perform Q/RSS.
3672  *
3673  * @param dev
3674  *   Pointer to Ethernet device.
3675  * @param[out] split_actions
3676  *   Pointer to store split actions to jump to CP_TBL.
3677  * @param[in] actions
3678  *   Pointer to the list of original flow actions.
3679  * @param[in] qrss
3680  *   Pointer to the Q/RSS action.
3681  * @param[in] actions_n
3682  *   Number of original actions.
3683  * @param[out] error
3684  *   Perform verbose error reporting if not NULL.
3685  *
3686  * @return
3687  *   non-zero unique flow_id on success, otherwise 0 and
3688  *   error/rte_error are set.
3689  */
3690 static uint32_t
3691 flow_mreg_split_qrss_prep(struct rte_eth_dev *dev,
3692                           struct rte_flow_action *split_actions,
3693                           const struct rte_flow_action *actions,
3694                           const struct rte_flow_action *qrss,
3695                           int actions_n, struct rte_flow_error *error)
3696 {
3697         struct mlx5_rte_flow_action_set_tag *set_tag;
3698         struct rte_flow_action_jump *jump;
3699         const int qrss_idx = qrss - actions;
3700         uint32_t flow_id = 0;
3701         int ret = 0;
3702
3703         /*
3704          * Given actions will be split
3705          * - Replace QUEUE/RSS action with SET_TAG to set flow ID.
3706          * - Add jump to mreg CP_TBL.
3707          * As a result, there will be one more action.
3708          */
3709         ++actions_n;
3710         memcpy(split_actions, actions, sizeof(*split_actions) * actions_n);
3711         set_tag = (void *)(split_actions + actions_n);
3712         /*
3713          * If tag action is not set to void(it means we are not the meter
3714          * suffix flow), add the tag action. Since meter suffix flow already
3715          * has the tag added.
3716          */
3717         if (split_actions[qrss_idx].type != RTE_FLOW_ACTION_TYPE_VOID) {
3718                 /*
3719                  * Allocate the new subflow ID. This one is unique within
3720                  * device and not shared with representors. Otherwise,
3721                  * we would have to resolve multi-thread access synch
3722                  * issue. Each flow on the shared device is appended
3723                  * with source vport identifier, so the resulting
3724                  * flows will be unique in the shared (by master and
3725                  * representors) domain even if they have coinciding
3726                  * IDs.
3727                  */
3728                 flow_id = flow_qrss_get_id(dev);
3729                 if (!flow_id)
3730                         return rte_flow_error_set(error, ENOMEM,
3731                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3732                                                   NULL, "can't allocate id "
3733                                                   "for split Q/RSS subflow");
3734                 /* Internal SET_TAG action to set flow ID. */
3735                 *set_tag = (struct mlx5_rte_flow_action_set_tag){
3736                         .data = flow_id,
3737                 };
3738                 ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0, error);
3739                 if (ret < 0)
3740                         return ret;
3741                 set_tag->id = ret;
3742                 /* Construct new actions array. */
3743                 /* Replace QUEUE/RSS action. */
3744                 split_actions[qrss_idx] = (struct rte_flow_action){
3745                         .type = MLX5_RTE_FLOW_ACTION_TYPE_TAG,
3746                         .conf = set_tag,
3747                 };
3748         }
3749         /* JUMP action to jump to mreg copy table (CP_TBL). */
3750         jump = (void *)(set_tag + 1);
3751         *jump = (struct rte_flow_action_jump){
3752                 .group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
3753         };
3754         split_actions[actions_n - 2] = (struct rte_flow_action){
3755                 .type = RTE_FLOW_ACTION_TYPE_JUMP,
3756                 .conf = jump,
3757         };
3758         split_actions[actions_n - 1] = (struct rte_flow_action){
3759                 .type = RTE_FLOW_ACTION_TYPE_END,
3760         };
3761         return flow_id;
3762 }
3763
3764 /**
3765  * Extend the given action list for Tx metadata copy.
3766  *
3767  * Copy the given action list to the ext_actions and add flow metadata register
3768  * copy action in order to copy reg_a set by WQE to reg_c[0].
3769  *
3770  * @param[out] ext_actions
3771  *   Pointer to the extended action list.
3772  * @param[in] actions
3773  *   Pointer to the list of actions.
3774  * @param[in] actions_n
3775  *   Number of actions in the list.
3776  * @param[out] error
3777  *   Perform verbose error reporting if not NULL.
3778  * @param[in] encap_idx
3779  *   The encap action inndex.
3780  *
3781  * @return
3782  *   0 on success, negative value otherwise
3783  */
3784 static int
3785 flow_mreg_tx_copy_prep(struct rte_eth_dev *dev,
3786                        struct rte_flow_action *ext_actions,
3787                        const struct rte_flow_action *actions,
3788                        int actions_n, struct rte_flow_error *error,
3789                        int encap_idx)
3790 {
3791         struct mlx5_flow_action_copy_mreg *cp_mreg =
3792                 (struct mlx5_flow_action_copy_mreg *)
3793                         (ext_actions + actions_n + 1);
3794         int ret;
3795
3796         ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_RX, 0, error);
3797         if (ret < 0)
3798                 return ret;
3799         cp_mreg->dst = ret;
3800         ret = mlx5_flow_get_reg_id(dev, MLX5_METADATA_TX, 0, error);
3801         if (ret < 0)
3802                 return ret;
3803         cp_mreg->src = ret;
3804         if (encap_idx != 0)
3805                 memcpy(ext_actions, actions, sizeof(*ext_actions) * encap_idx);
3806         if (encap_idx == actions_n - 1) {
3807                 ext_actions[actions_n - 1] = (struct rte_flow_action){
3808                         .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
3809                         .conf = cp_mreg,
3810                 };
3811                 ext_actions[actions_n] = (struct rte_flow_action){
3812                         .type = RTE_FLOW_ACTION_TYPE_END,
3813                 };
3814         } else {
3815                 ext_actions[encap_idx] = (struct rte_flow_action){
3816                         .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
3817                         .conf = cp_mreg,
3818                 };
3819                 memcpy(ext_actions + encap_idx + 1, actions + encap_idx,
3820                                 sizeof(*ext_actions) * (actions_n - encap_idx));
3821         }
3822         return 0;
3823 }
3824
3825 /**
3826  * The splitting for metadata feature.
3827  *
3828  * - Q/RSS action on NIC Rx should be split in order to pass by
3829  *   the mreg copy table (RX_CP_TBL) and then it jumps to the
3830  *   action table (RX_ACT_TBL) which has the split Q/RSS action.
3831  *
3832  * - All the actions on NIC Tx should have a mreg copy action to
3833  *   copy reg_a from WQE to reg_c[0].
3834  *
3835  * @param dev
3836  *   Pointer to Ethernet device.
3837  * @param[in] flow
3838  *   Parent flow structure pointer.
3839  * @param[in] prefix_layers
3840  *   Prefix flow layer flags.
3841  * @param[in] attr
3842  *   Flow rule attributes.
3843  * @param[in] items
3844  *   Pattern specification (list terminated by the END pattern item).
3845  * @param[in] actions
3846  *   Associated actions (list terminated by the END action).
3847  * @param[in] external
3848  *   This flow rule is created by request external to PMD.
3849  * @param[out] error
3850  *   Perform verbose error reporting if not NULL.
3851  * @return
3852  *   0 on success, negative value otherwise
3853  */
3854 static int
3855 flow_create_split_metadata(struct rte_eth_dev *dev,
3856                            struct rte_flow *flow,
3857                            uint64_t prefix_layers,
3858                            const struct rte_flow_attr *attr,
3859                            const struct rte_flow_item items[],
3860                            const struct rte_flow_action actions[],
3861                            bool external, struct rte_flow_error *error)
3862 {
3863         struct mlx5_priv *priv = dev->data->dev_private;
3864         struct mlx5_dev_config *config = &priv->config;
3865         const struct rte_flow_action *qrss = NULL;
3866         struct rte_flow_action *ext_actions = NULL;
3867         struct mlx5_flow *dev_flow = NULL;
3868         uint32_t qrss_id = 0;
3869         int mtr_sfx = 0;
3870         size_t act_size;
3871         int actions_n;
3872         int encap_idx;
3873         int ret;
3874
3875         /* Check whether extensive metadata feature is engaged. */
3876         if (!config->dv_flow_en ||
3877             config->dv_xmeta_en == MLX5_XMETA_MODE_LEGACY ||
3878             !mlx5_flow_ext_mreg_supported(dev))
3879                 return flow_create_split_inner(dev, flow, NULL, prefix_layers,
3880                                                attr, items, actions, external,
3881                                                error);
3882         actions_n = flow_parse_metadata_split_actions_info(actions, &qrss,
3883                                                            &encap_idx);
3884         if (qrss) {
3885                 /* Exclude hairpin flows from splitting. */
3886                 if (qrss->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
3887                         const struct rte_flow_action_queue *queue;
3888
3889                         queue = qrss->conf;
3890                         if (mlx5_rxq_get_type(dev, queue->index) ==
3891                             MLX5_RXQ_TYPE_HAIRPIN)
3892                                 qrss = NULL;
3893                 } else if (qrss->type == RTE_FLOW_ACTION_TYPE_RSS) {
3894                         const struct rte_flow_action_rss *rss;
3895
3896                         rss = qrss->conf;
3897                         if (mlx5_rxq_get_type(dev, rss->queue[0]) ==
3898                             MLX5_RXQ_TYPE_HAIRPIN)
3899                                 qrss = NULL;
3900                 }
3901         }
3902         if (qrss) {
3903                 /* Check if it is in meter suffix table. */
3904                 mtr_sfx = attr->group == (attr->transfer ?
3905                           (MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) :
3906                           MLX5_FLOW_TABLE_LEVEL_SUFFIX);
3907                 /*
3908                  * Q/RSS action on NIC Rx should be split in order to pass by
3909                  * the mreg copy table (RX_CP_TBL) and then it jumps to the
3910                  * action table (RX_ACT_TBL) which has the split Q/RSS action.
3911                  */
3912                 act_size = sizeof(struct rte_flow_action) * (actions_n + 1) +
3913                            sizeof(struct rte_flow_action_set_tag) +
3914                            sizeof(struct rte_flow_action_jump);
3915                 ext_actions = rte_zmalloc(__func__, act_size, 0);
3916                 if (!ext_actions)
3917                         return rte_flow_error_set(error, ENOMEM,
3918                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3919                                                   NULL, "no memory to split "
3920                                                   "metadata flow");
3921                 /*
3922                  * If we are the suffix flow of meter, tag already exist.
3923                  * Set the tag action to void.
3924                  */
3925                 if (mtr_sfx)
3926                         ext_actions[qrss - actions].type =
3927                                                 RTE_FLOW_ACTION_TYPE_VOID;
3928                 else
3929                         ext_actions[qrss - actions].type =
3930                                                 MLX5_RTE_FLOW_ACTION_TYPE_TAG;
3931                 /*
3932                  * Create the new actions list with removed Q/RSS action
3933                  * and appended set tag and jump to register copy table
3934                  * (RX_CP_TBL). We should preallocate unique tag ID here
3935                  * in advance, because it is needed for set tag action.
3936                  */
3937                 qrss_id = flow_mreg_split_qrss_prep(dev, ext_actions, actions,
3938                                                     qrss, actions_n, error);
3939                 if (!mtr_sfx && !qrss_id) {
3940                         ret = -rte_errno;
3941                         goto exit;
3942                 }
3943         } else if (attr->egress && !attr->transfer) {
3944                 /*
3945                  * All the actions on NIC Tx should have a metadata register
3946                  * copy action to copy reg_a from WQE to reg_c[meta]
3947                  */
3948                 act_size = sizeof(struct rte_flow_action) * (actions_n + 1) +
3949                            sizeof(struct mlx5_flow_action_copy_mreg);
3950                 ext_actions = rte_zmalloc(__func__, act_size, 0);
3951                 if (!ext_actions)
3952                         return rte_flow_error_set(error, ENOMEM,
3953                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3954                                                   NULL, "no memory to split "
3955                                                   "metadata flow");
3956                 /* Create the action list appended with copy register. */
3957                 ret = flow_mreg_tx_copy_prep(dev, ext_actions, actions,
3958                                              actions_n, error, encap_idx);
3959                 if (ret < 0)
3960                         goto exit;
3961         }
3962         /* Add the unmodified original or prefix subflow. */
3963         ret = flow_create_split_inner(dev, flow, &dev_flow, prefix_layers, attr,
3964                                       items, ext_actions ? ext_actions :
3965                                       actions, external, error);
3966         if (ret < 0)
3967                 goto exit;
3968         MLX5_ASSERT(dev_flow);
3969         if (qrss) {
3970                 const struct rte_flow_attr q_attr = {
3971                         .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP,
3972                         .ingress = 1,
3973                 };
3974                 /* Internal PMD action to set register. */
3975                 struct mlx5_rte_flow_item_tag q_tag_spec = {
3976                         .data = qrss_id,
3977                         .id = 0,
3978                 };
3979                 struct rte_flow_item q_items[] = {
3980                         {
3981                                 .type = MLX5_RTE_FLOW_ITEM_TYPE_TAG,
3982                                 .spec = &q_tag_spec,
3983                                 .last = NULL,
3984                                 .mask = NULL,
3985                         },
3986                         {
3987                                 .type = RTE_FLOW_ITEM_TYPE_END,
3988                         },
3989                 };
3990                 struct rte_flow_action q_actions[] = {
3991                         {
3992                                 .type = qrss->type,
3993                                 .conf = qrss->conf,
3994                         },
3995                         {
3996                                 .type = RTE_FLOW_ACTION_TYPE_END,
3997                         },
3998                 };
3999                 uint64_t layers = flow_get_prefix_layer_flags(dev_flow);
4000
4001                 /*
4002                  * Configure the tag item only if there is no meter subflow.
4003                  * Since tag is already marked in the meter suffix subflow
4004                  * we can just use the meter suffix items as is.
4005                  */
4006                 if (qrss_id) {
4007                         /* Not meter subflow. */
4008                         MLX5_ASSERT(!mtr_sfx);
4009                         /*
4010                          * Put unique id in prefix flow due to it is destroyed
4011                          * after suffix flow and id will be freed after there
4012                          * is no actual flows with this id and identifier
4013                          * reallocation becomes possible (for example, for
4014                          * other flows in other threads).
4015                          */
4016                         dev_flow->handle->split_flow_id = qrss_id;
4017                         ret = mlx5_flow_get_reg_id(dev, MLX5_COPY_MARK, 0,
4018                                                    error);
4019                         if (ret < 0)
4020                                 goto exit;
4021                         q_tag_spec.id = ret;
4022                 }
4023                 dev_flow = NULL;
4024                 /* Add suffix subflow to execute Q/RSS. */
4025                 ret = flow_create_split_inner(dev, flow, &dev_flow, layers,
4026                                               &q_attr, mtr_sfx ? items :
4027                                               q_items, q_actions,
4028                                               external, error);
4029                 if (ret < 0)
4030                         goto exit;
4031                 /* qrss ID should be freed if failed. */
4032                 qrss_id = 0;
4033                 MLX5_ASSERT(dev_flow);
4034         }
4035
4036 exit:
4037         /*
4038          * We do not destroy the partially created sub_flows in case of error.
4039          * These ones are included into parent flow list and will be destroyed
4040          * by flow_drv_destroy.
4041          */
4042         flow_qrss_free_id(dev, qrss_id);
4043         rte_free(ext_actions);
4044         return ret;
4045 }
4046
4047 /**
4048  * The splitting for meter feature.
4049  *
4050  * - The meter flow will be split to two flows as prefix and
4051  *   suffix flow. The packets make sense only it pass the prefix
4052  *   meter action.
4053  *
4054  * - Reg_C_5 is used for the packet to match betweend prefix and
4055  *   suffix flow.
4056  *
4057  * @param dev
4058  *   Pointer to Ethernet device.
4059  * @param[in] flow
4060  *   Parent flow structure pointer.
4061  * @param[in] attr
4062  *   Flow rule attributes.
4063  * @param[in] items
4064  *   Pattern specification (list terminated by the END pattern item).
4065  * @param[in] actions
4066  *   Associated actions (list terminated by the END action).
4067  * @param[in] external
4068  *   This flow rule is created by request external to PMD.
4069  * @param[out] error
4070  *   Perform verbose error reporting if not NULL.
4071  * @return
4072  *   0 on success, negative value otherwise
4073  */
4074 static int
4075 flow_create_split_meter(struct rte_eth_dev *dev,
4076                            struct rte_flow *flow,
4077                            const struct rte_flow_attr *attr,
4078                            const struct rte_flow_item items[],
4079                            const struct rte_flow_action actions[],
4080                            bool external, struct rte_flow_error *error)
4081 {
4082         struct mlx5_priv *priv = dev->data->dev_private;
4083         struct rte_flow_action *sfx_actions = NULL;
4084         struct rte_flow_action *pre_actions = NULL;
4085         struct rte_flow_item *sfx_items = NULL;
4086         struct mlx5_flow *dev_flow = NULL;
4087         struct rte_flow_attr sfx_attr = *attr;
4088         uint32_t mtr = 0;
4089         uint32_t mtr_tag_id = 0;
4090         size_t act_size;
4091         size_t item_size;
4092         int actions_n = 0;
4093         int ret;
4094
4095         if (priv->mtr_en)
4096                 actions_n = flow_check_meter_action(actions, &mtr);
4097         if (mtr) {
4098                 /* The five prefix actions: meter, decap, encap, tag, end. */
4099                 act_size = sizeof(struct rte_flow_action) * (actions_n + 5) +
4100                            sizeof(struct mlx5_rte_flow_action_set_tag);
4101                 /* tag, vlan, port id, end. */
4102 #define METER_SUFFIX_ITEM 4
4103                 item_size = sizeof(struct rte_flow_item) * METER_SUFFIX_ITEM +
4104                             sizeof(struct mlx5_rte_flow_item_tag) * 2;
4105                 sfx_actions = rte_zmalloc(__func__, (act_size + item_size), 0);
4106                 if (!sfx_actions)
4107                         return rte_flow_error_set(error, ENOMEM,
4108                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4109                                                   NULL, "no memory to split "
4110                                                   "meter flow");
4111                 sfx_items = (struct rte_flow_item *)((char *)sfx_actions +
4112                              act_size);
4113                 pre_actions = sfx_actions + actions_n;
4114                 mtr_tag_id = flow_meter_split_prep(dev, items, sfx_items,
4115                                                    actions, sfx_actions,
4116                                                    pre_actions);
4117                 if (!mtr_tag_id) {
4118                         ret = -rte_errno;
4119                         goto exit;
4120                 }
4121                 /* Add the prefix subflow. */
4122                 ret = flow_create_split_inner(dev, flow, &dev_flow, 0, attr,
4123                                               items, pre_actions, external,
4124                                               error);
4125                 if (ret) {
4126                         ret = -rte_errno;
4127                         goto exit;
4128                 }
4129                 dev_flow->handle->split_flow_id = mtr_tag_id;
4130                 /* Setting the sfx group atrr. */
4131                 sfx_attr.group = sfx_attr.transfer ?
4132                                 (MLX5_FLOW_TABLE_LEVEL_SUFFIX - 1) :
4133                                  MLX5_FLOW_TABLE_LEVEL_SUFFIX;
4134         }
4135         /* Add the prefix subflow. */
4136         ret = flow_create_split_metadata(dev, flow, dev_flow ?
4137                                          flow_get_prefix_layer_flags(dev_flow) :
4138                                          0, &sfx_attr,
4139                                          sfx_items ? sfx_items : items,
4140                                          sfx_actions ? sfx_actions : actions,
4141                                          external, error);
4142 exit:
4143         if (sfx_actions)
4144                 rte_free(sfx_actions);
4145         return ret;
4146 }
4147
4148 /**
4149  * Split the flow to subflow set. The splitters might be linked
4150  * in the chain, like this:
4151  * flow_create_split_outer() calls:
4152  *   flow_create_split_meter() calls:
4153  *     flow_create_split_metadata(meter_subflow_0) calls:
4154  *       flow_create_split_inner(metadata_subflow_0)
4155  *       flow_create_split_inner(metadata_subflow_1)
4156  *       flow_create_split_inner(metadata_subflow_2)
4157  *     flow_create_split_metadata(meter_subflow_1) calls:
4158  *       flow_create_split_inner(metadata_subflow_0)
4159  *       flow_create_split_inner(metadata_subflow_1)
4160  *       flow_create_split_inner(metadata_subflow_2)
4161  *
4162  * This provide flexible way to add new levels of flow splitting.
4163  * The all of successfully created subflows are included to the
4164  * parent flow dev_flow list.
4165  *
4166  * @param dev
4167  *   Pointer to Ethernet device.
4168  * @param[in] flow
4169  *   Parent flow structure pointer.
4170  * @param[in] attr
4171  *   Flow rule attributes.
4172  * @param[in] items
4173  *   Pattern specification (list terminated by the END pattern item).
4174  * @param[in] actions
4175  *   Associated actions (list terminated by the END action).
4176  * @param[in] external
4177  *   This flow rule is created by request external to PMD.
4178  * @param[out] error
4179  *   Perform verbose error reporting if not NULL.
4180  * @return
4181  *   0 on success, negative value otherwise
4182  */
4183 static int
4184 flow_create_split_outer(struct rte_eth_dev *dev,
4185                         struct rte_flow *flow,
4186                         const struct rte_flow_attr *attr,
4187                         const struct rte_flow_item items[],
4188                         const struct rte_flow_action actions[],
4189                         bool external, struct rte_flow_error *error)
4190 {
4191         int ret;
4192
4193         ret = flow_create_split_meter(dev, flow, attr, items,
4194                                          actions, external, error);
4195         MLX5_ASSERT(ret <= 0);
4196         return ret;
4197 }
4198
4199 /**
4200  * Create a flow and add it to @p list.
4201  *
4202  * @param dev
4203  *   Pointer to Ethernet device.
4204  * @param list
4205  *   Pointer to a TAILQ flow list. If this parameter NULL,
4206  *   no list insertion occurred, flow is just created,
4207  *   this is caller's responsibility to track the
4208  *   created flow.
4209  * @param[in] attr
4210  *   Flow rule attributes.
4211  * @param[in] items
4212  *   Pattern specification (list terminated by the END pattern item).
4213  * @param[in] actions
4214  *   Associated actions (list terminated by the END action).
4215  * @param[in] external
4216  *   This flow rule is created by request external to PMD.
4217  * @param[out] error
4218  *   Perform verbose error reporting if not NULL.
4219  *
4220  * @return
4221  *   A flow on success, NULL otherwise and rte_errno is set.
4222  */
4223 static struct rte_flow *
4224 flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list,
4225                  const struct rte_flow_attr *attr,
4226                  const struct rte_flow_item items[],
4227                  const struct rte_flow_action actions[],
4228                  bool external, struct rte_flow_error *error)
4229 {
4230         struct mlx5_priv *priv = dev->data->dev_private;
4231         struct rte_flow *flow = NULL;
4232         struct mlx5_flow *dev_flow;
4233         const struct rte_flow_action_rss *rss;
4234         union {
4235                 struct rte_flow_expand_rss buf;
4236                 uint8_t buffer[2048];
4237         } expand_buffer;
4238         union {
4239                 struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS];
4240                 uint8_t buffer[2048];
4241         } actions_rx;
4242         union {
4243                 struct rte_flow_action actions[MLX5_MAX_SPLIT_ACTIONS];
4244                 uint8_t buffer[2048];
4245         } actions_hairpin_tx;
4246         union {
4247                 struct rte_flow_item items[MLX5_MAX_SPLIT_ITEMS];
4248                 uint8_t buffer[2048];
4249         } items_tx;
4250         struct rte_flow_expand_rss *buf = &expand_buffer.buf;
4251         struct mlx5_flow_rss_desc *rss_desc = &((struct mlx5_flow_rss_desc *)
4252                                               priv->rss_desc)[!!priv->flow_idx];
4253         const struct rte_flow_action *p_actions_rx = actions;
4254         uint32_t i;
4255         int hairpin_flow = 0;
4256         uint32_t hairpin_id = 0;
4257         struct rte_flow_attr attr_tx = { .priority = 0 };
4258         int ret = flow_drv_validate(dev, attr, items, p_actions_rx, external,
4259                                     error);
4260
4261         if (ret < 0)
4262                 return NULL;
4263         hairpin_flow = flow_check_hairpin_split(dev, attr, actions);
4264         if (hairpin_flow > 0) {
4265                 if (hairpin_flow > MLX5_MAX_SPLIT_ACTIONS) {
4266                         rte_errno = EINVAL;
4267                         return NULL;
4268                 }
4269                 flow_hairpin_split(dev, actions, actions_rx.actions,
4270                                    actions_hairpin_tx.actions, items_tx.items,
4271                                    &hairpin_id);
4272                 p_actions_rx = actions_rx.actions;
4273         }
4274         flow = rte_calloc(__func__, 1, sizeof(struct rte_flow), 0);
4275         if (!flow) {
4276                 rte_errno = ENOMEM;
4277                 goto error_before_flow;
4278         }
4279         flow->drv_type = flow_get_drv_type(dev, attr);
4280         if (hairpin_id != 0)
4281                 flow->hairpin_flow_id = hairpin_id;
4282         MLX5_ASSERT(flow->drv_type > MLX5_FLOW_TYPE_MIN &&
4283                     flow->drv_type < MLX5_FLOW_TYPE_MAX);
4284         memset(rss_desc, 0, sizeof(*rss_desc));
4285         rss = flow_get_rss_action(p_actions_rx);
4286         if (rss) {
4287                 /*
4288                  * The following information is required by
4289                  * mlx5_flow_hashfields_adjust() in advance.
4290                  */
4291                 rss_desc->level = rss->level;
4292                 /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
4293                 rss_desc->types = !rss->types ? ETH_RSS_IP : rss->types;
4294         }
4295         flow->dev_handles = 0;
4296         if (rss && rss->types) {
4297                 unsigned int graph_root;
4298
4299                 graph_root = find_graph_root(items, rss->level);
4300                 ret = rte_flow_expand_rss(buf, sizeof(expand_buffer.buffer),
4301                                           items, rss->types,
4302                                           mlx5_support_expansion,
4303                                           graph_root);
4304                 MLX5_ASSERT(ret > 0 &&
4305                        (unsigned int)ret < sizeof(expand_buffer.buffer));
4306         } else {
4307                 buf->entries = 1;
4308                 buf->entry[0].pattern = (void *)(uintptr_t)items;
4309         }
4310         /*
4311          * Record the start index when there is a nested call. All sub-flows
4312          * need to be translated before another calling.
4313          * No need to use ping-pong buffer to save memory here.
4314          */
4315         if (priv->flow_idx) {
4316                 MLX5_ASSERT(!priv->flow_nested_idx);
4317                 priv->flow_nested_idx = priv->flow_idx;
4318         }
4319         for (i = 0; i < buf->entries; ++i) {
4320                 /*
4321                  * The splitter may create multiple dev_flows,
4322                  * depending on configuration. In the simplest
4323                  * case it just creates unmodified original flow.
4324                  */
4325                 ret = flow_create_split_outer(dev, flow, attr,
4326                                               buf->entry[i].pattern,
4327                                               p_actions_rx, external,
4328                                               error);
4329                 if (ret < 0)
4330                         goto error;
4331         }
4332         /* Create the tx flow. */
4333         if (hairpin_flow) {
4334                 attr_tx.group = MLX5_HAIRPIN_TX_TABLE;
4335                 attr_tx.ingress = 0;
4336                 attr_tx.egress = 1;
4337                 dev_flow = flow_drv_prepare(dev, flow, &attr_tx, items_tx.items,
4338                                             actions_hairpin_tx.actions, error);
4339                 if (!dev_flow)
4340                         goto error;
4341                 dev_flow->flow = flow;
4342                 dev_flow->external = 0;
4343                 SILIST_INSERT(&flow->dev_handles, dev_flow->handle_idx,
4344                               dev_flow->handle, next);
4345                 ret = flow_drv_translate(dev, dev_flow, &attr_tx,
4346                                          items_tx.items,
4347                                          actions_hairpin_tx.actions, error);
4348                 if (ret < 0)
4349                         goto error;
4350         }
4351         /*
4352          * Update the metadata register copy table. If extensive
4353          * metadata feature is enabled and registers are supported
4354          * we might create the extra rte_flow for each unique
4355          * MARK/FLAG action ID.
4356          *
4357          * The table is updated for ingress Flows only, because
4358          * the egress Flows belong to the different device and
4359          * copy table should be updated in peer NIC Rx domain.
4360          */
4361         if (attr->ingress &&
4362             (external || attr->group != MLX5_FLOW_MREG_CP_TABLE_GROUP)) {
4363                 ret = flow_mreg_update_copy_table(dev, flow, actions, error);
4364                 if (ret)
4365                         goto error;
4366         }
4367         /*
4368          * If the flow is external (from application) OR device is started, then
4369          * the flow will be applied immediately.
4370          */
4371         if (external || dev->data->dev_started) {
4372                 ret = flow_drv_apply(dev, flow, error);
4373                 if (ret < 0)
4374                         goto error;
4375         }
4376         if (list)
4377                 TAILQ_INSERT_TAIL(list, flow, next);
4378         flow_rxq_flags_set(dev, flow);
4379         /* Nested flow creation index recovery. */
4380         priv->flow_idx = priv->flow_nested_idx;
4381         if (priv->flow_nested_idx)
4382                 priv->flow_nested_idx = 0;
4383         return flow;
4384 error:
4385         MLX5_ASSERT(flow);
4386         ret = rte_errno; /* Save rte_errno before cleanup. */
4387         flow_mreg_del_copy_action(dev, flow);
4388         flow_drv_destroy(dev, flow);
4389         rte_free(flow);
4390         rte_errno = ret; /* Restore rte_errno. */
4391 error_before_flow:
4392         ret = rte_errno;
4393         if (hairpin_id)
4394                 mlx5_flow_id_release(priv->sh->flow_id_pool,
4395                                      hairpin_id);
4396         rte_errno = ret;
4397         priv->flow_idx = priv->flow_nested_idx;
4398         if (priv->flow_nested_idx)
4399                 priv->flow_nested_idx = 0;
4400         return NULL;
4401 }
4402
4403 /**
4404  * Create a dedicated flow rule on e-switch table 0 (root table), to direct all
4405  * incoming packets to table 1.
4406  *
4407  * Other flow rules, requested for group n, will be created in
4408  * e-switch table n+1.
4409  * Jump action to e-switch group n will be created to group n+1.
4410  *
4411  * Used when working in switchdev mode, to utilise advantages of table 1
4412  * and above.
4413  *
4414  * @param dev
4415  *   Pointer to Ethernet device.
4416  *
4417  * @return
4418  *   Pointer to flow on success, NULL otherwise and rte_errno is set.
4419  */
4420 struct rte_flow *
4421 mlx5_flow_create_esw_table_zero_flow(struct rte_eth_dev *dev)
4422 {
4423         const struct rte_flow_attr attr = {
4424                 .group = 0,
4425                 .priority = 0,
4426                 .ingress = 1,
4427                 .egress = 0,
4428                 .transfer = 1,
4429         };
4430         const struct rte_flow_item pattern = {
4431                 .type = RTE_FLOW_ITEM_TYPE_END,
4432         };
4433         struct rte_flow_action_jump jump = {
4434                 .group = 1,
4435         };
4436         const struct rte_flow_action actions[] = {
4437                 {
4438                         .type = RTE_FLOW_ACTION_TYPE_JUMP,
4439                         .conf = &jump,
4440                 },
4441                 {
4442                         .type = RTE_FLOW_ACTION_TYPE_END,
4443                 },
4444         };
4445         struct mlx5_priv *priv = dev->data->dev_private;
4446         struct rte_flow_error error;
4447
4448         return flow_list_create(dev, &priv->ctrl_flows, &attr, &pattern,
4449                                 actions, false, &error);
4450 }
4451
4452 /**
4453  * Create a flow.
4454  *
4455  * @see rte_flow_create()
4456  * @see rte_flow_ops
4457  */
4458 struct rte_flow *
4459 mlx5_flow_create(struct rte_eth_dev *dev,
4460                  const struct rte_flow_attr *attr,
4461                  const struct rte_flow_item items[],
4462                  const struct rte_flow_action actions[],
4463                  struct rte_flow_error *error)
4464 {
4465         struct mlx5_priv *priv = dev->data->dev_private;
4466
4467         /*
4468          * If the device is not started yet, it is not allowed to created a
4469          * flow from application. PMD default flows and traffic control flows
4470          * are not affected.
4471          */
4472         if (unlikely(!dev->data->dev_started)) {
4473                 rte_errno = ENODEV;
4474                 DRV_LOG(DEBUG, "port %u is not started when "
4475                         "inserting a flow", dev->data->port_id);
4476                 return NULL;
4477         }
4478         return flow_list_create(dev, &priv->flows,
4479                                 attr, items, actions, true, error);
4480 }
4481
4482 /**
4483  * Destroy a flow in a list.
4484  *
4485  * @param dev
4486  *   Pointer to Ethernet device.
4487  * @param list
4488  *   Pointer to a TAILQ flow list. If this parameter NULL,
4489  *   there is no flow removal from the list.
4490  * @param[in] flow
4491  *   Flow to destroy.
4492  */
4493 static void
4494 flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
4495                   struct rte_flow *flow)
4496 {
4497         struct mlx5_priv *priv = dev->data->dev_private;
4498         struct mlx5_fdir_flow *priv_fdir_flow = NULL;
4499
4500         /*
4501          * Update RX queue flags only if port is started, otherwise it is
4502          * already clean.
4503          */
4504         if (dev->data->dev_started)
4505                 flow_rxq_flags_trim(dev, flow);
4506         if (flow->hairpin_flow_id)
4507                 mlx5_flow_id_release(priv->sh->flow_id_pool,
4508                                      flow->hairpin_flow_id);
4509         flow_drv_destroy(dev, flow);
4510         if (list)
4511                 TAILQ_REMOVE(list, flow, next);
4512         flow_mreg_del_copy_action(dev, flow);
4513         if (flow->fdir) {
4514                 LIST_FOREACH(priv_fdir_flow, &priv->fdir_flows, next) {
4515                         if (priv_fdir_flow->flow == flow)
4516                                 break;
4517                 }
4518                 if (priv_fdir_flow) {
4519                         LIST_REMOVE(priv_fdir_flow, next);
4520                         rte_free(priv_fdir_flow->fdir);
4521                         rte_free(priv_fdir_flow);
4522                 }
4523         }
4524         rte_free(flow);
4525 }
4526
4527 /**
4528  * Destroy all flows.
4529  *
4530  * @param dev
4531  *   Pointer to Ethernet device.
4532  * @param list
4533  *   Pointer to a TAILQ flow list.
4534  * @param active
4535  *   If flushing is called avtively.
4536  */
4537 void
4538 mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list,
4539                      bool active)
4540 {
4541         uint32_t num_flushed = 0;
4542
4543         while (!TAILQ_EMPTY(list)) {
4544                 struct rte_flow *flow;
4545
4546                 flow = TAILQ_FIRST(list);
4547                 flow_list_destroy(dev, list, flow);
4548                 num_flushed++;
4549         }
4550         if (active) {
4551                 DRV_LOG(INFO, "port %u: %u flows flushed before stopping",
4552                         dev->data->port_id, num_flushed);
4553         }
4554 }
4555
4556 /**
4557  * Remove all flows.
4558  *
4559  * @param dev
4560  *   Pointer to Ethernet device.
4561  * @param list
4562  *   Pointer to a TAILQ flow list.
4563  */
4564 void
4565 mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list)
4566 {
4567         struct rte_flow *flow;
4568
4569         TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next) {
4570                 flow_drv_remove(dev, flow);
4571                 flow_mreg_stop_copy_action(dev, flow);
4572         }
4573         flow_mreg_del_default_copy_action(dev);
4574         flow_rxq_flags_clear(dev);
4575 }
4576
4577 /**
4578  * Add all flows.
4579  *
4580  * @param dev
4581  *   Pointer to Ethernet device.
4582  * @param list
4583  *   Pointer to a TAILQ flow list.
4584  *
4585  * @return
4586  *   0 on success, a negative errno value otherwise and rte_errno is set.
4587  */
4588 int
4589 mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
4590 {
4591         struct rte_flow *flow;
4592         struct rte_flow_error error;
4593         int ret = 0;
4594
4595         /* Make sure default copy action (reg_c[0] -> reg_b) is created. */
4596         ret = flow_mreg_add_default_copy_action(dev, &error);
4597         if (ret < 0)
4598                 return -rte_errno;
4599         /* Apply Flows created by application. */
4600         TAILQ_FOREACH(flow, list, next) {
4601                 ret = flow_mreg_start_copy_action(dev, flow);
4602                 if (ret < 0)
4603                         goto error;
4604                 ret = flow_drv_apply(dev, flow, &error);
4605                 if (ret < 0)
4606                         goto error;
4607                 flow_rxq_flags_set(dev, flow);
4608         }
4609         return 0;
4610 error:
4611         ret = rte_errno; /* Save rte_errno before cleanup. */
4612         mlx5_flow_stop(dev, list);
4613         rte_errno = ret; /* Restore rte_errno. */
4614         return -rte_errno;
4615 }
4616
4617 /**
4618  * Stop all default actions for flows.
4619  *
4620  * @param dev
4621  *   Pointer to Ethernet device.
4622  */
4623 void
4624 mlx5_flow_stop_default(struct rte_eth_dev *dev)
4625 {
4626         flow_mreg_del_default_copy_action(dev);
4627 }
4628
4629 /**
4630  * Start all default actions for flows.
4631  *
4632  * @param dev
4633  *   Pointer to Ethernet device.
4634  * @return
4635  *   0 on success, a negative errno value otherwise and rte_errno is set.
4636  */
4637 int
4638 mlx5_flow_start_default(struct rte_eth_dev *dev)
4639 {
4640         struct rte_flow_error error;
4641
4642         /* Make sure default copy action (reg_c[0] -> reg_b) is created. */
4643         return flow_mreg_add_default_copy_action(dev, &error);
4644 }
4645
4646 /**
4647  * Allocate intermediate resources for flow creation.
4648  *
4649  * @param dev
4650  *   Pointer to Ethernet device.
4651  */
4652 void
4653 mlx5_flow_alloc_intermediate(struct rte_eth_dev *dev)
4654 {
4655         struct mlx5_priv *priv = dev->data->dev_private;
4656
4657         if (!priv->inter_flows) {
4658                 priv->inter_flows = rte_calloc(__func__, 1,
4659                                     MLX5_NUM_MAX_DEV_FLOWS *
4660                                     sizeof(struct mlx5_flow) +
4661                                     (sizeof(struct mlx5_flow_rss_desc) +
4662                                     sizeof(uint16_t) * UINT16_MAX) * 2, 0);
4663                 if (!priv->inter_flows) {
4664                         DRV_LOG(ERR, "can't allocate intermediate memory.");
4665                         return;
4666                 }
4667         }
4668         priv->rss_desc = &((struct mlx5_flow *)priv->inter_flows)
4669                          [MLX5_NUM_MAX_DEV_FLOWS];
4670         /* Reset the index. */
4671         priv->flow_idx = 0;
4672         priv->flow_nested_idx = 0;
4673 }
4674
4675 /**
4676  * Free intermediate resources for flows.
4677  *
4678  * @param dev
4679  *   Pointer to Ethernet device.
4680  */
4681 void
4682 mlx5_flow_free_intermediate(struct rte_eth_dev *dev)
4683 {
4684         struct mlx5_priv *priv = dev->data->dev_private;
4685
4686         rte_free(priv->inter_flows);
4687         priv->inter_flows = NULL;
4688 }
4689
4690 /**
4691  * Verify the flow list is empty
4692  *
4693  * @param dev
4694  *  Pointer to Ethernet device.
4695  *
4696  * @return the number of flows not released.
4697  */
4698 int
4699 mlx5_flow_verify(struct rte_eth_dev *dev)
4700 {
4701         struct mlx5_priv *priv = dev->data->dev_private;
4702         struct rte_flow *flow;
4703         int ret = 0;
4704
4705         TAILQ_FOREACH(flow, &priv->flows, next) {
4706                 DRV_LOG(DEBUG, "port %u flow %p still referenced",
4707                         dev->data->port_id, (void *)flow);
4708                 ++ret;
4709         }
4710         return ret;
4711 }
4712
4713 /**
4714  * Enable default hairpin egress flow.
4715  *
4716  * @param dev
4717  *   Pointer to Ethernet device.
4718  * @param queue
4719  *   The queue index.
4720  *
4721  * @return
4722  *   0 on success, a negative errno value otherwise and rte_errno is set.
4723  */
4724 int
4725 mlx5_ctrl_flow_source_queue(struct rte_eth_dev *dev,
4726                             uint32_t queue)
4727 {
4728         struct mlx5_priv *priv = dev->data->dev_private;
4729         const struct rte_flow_attr attr = {
4730                 .egress = 1,
4731                 .priority = 0,
4732         };
4733         struct mlx5_rte_flow_item_tx_queue queue_spec = {
4734                 .queue = queue,
4735         };
4736         struct mlx5_rte_flow_item_tx_queue queue_mask = {
4737                 .queue = UINT32_MAX,
4738         };
4739         struct rte_flow_item items[] = {
4740                 {
4741                         .type = MLX5_RTE_FLOW_ITEM_TYPE_TX_QUEUE,
4742                         .spec = &queue_spec,
4743                         .last = NULL,
4744                         .mask = &queue_mask,
4745                 },
4746                 {
4747                         .type = RTE_FLOW_ITEM_TYPE_END,
4748                 },
4749         };
4750         struct rte_flow_action_jump jump = {
4751                 .group = MLX5_HAIRPIN_TX_TABLE,
4752         };
4753         struct rte_flow_action actions[2];
4754         struct rte_flow *flow;
4755         struct rte_flow_error error;
4756
4757         actions[0].type = RTE_FLOW_ACTION_TYPE_JUMP;
4758         actions[0].conf = &jump;
4759         actions[1].type = RTE_FLOW_ACTION_TYPE_END;
4760         flow = flow_list_create(dev, &priv->ctrl_flows,
4761                                 &attr, items, actions, false, &error);
4762         if (!flow) {
4763                 DRV_LOG(DEBUG,
4764                         "Failed to create ctrl flow: rte_errno(%d),"
4765                         " type(%d), message(%s)",
4766                         rte_errno, error.type,
4767                         error.message ? error.message : " (no stated reason)");
4768                 return -rte_errno;
4769         }
4770         return 0;
4771 }
4772
4773 /**
4774  * Enable a control flow configured from the control plane.
4775  *
4776  * @param dev
4777  *   Pointer to Ethernet device.
4778  * @param eth_spec
4779  *   An Ethernet flow spec to apply.
4780  * @param eth_mask
4781  *   An Ethernet flow mask to apply.
4782  * @param vlan_spec
4783  *   A VLAN flow spec to apply.
4784  * @param vlan_mask
4785  *   A VLAN flow mask to apply.
4786  *
4787  * @return
4788  *   0 on success, a negative errno value otherwise and rte_errno is set.
4789  */
4790 int
4791 mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
4792                     struct rte_flow_item_eth *eth_spec,
4793                     struct rte_flow_item_eth *eth_mask,
4794                     struct rte_flow_item_vlan *vlan_spec,
4795                     struct rte_flow_item_vlan *vlan_mask)
4796 {
4797         struct mlx5_priv *priv = dev->data->dev_private;
4798         const struct rte_flow_attr attr = {
4799                 .ingress = 1,
4800                 .priority = MLX5_FLOW_PRIO_RSVD,
4801         };
4802         struct rte_flow_item items[] = {
4803                 {
4804                         .type = RTE_FLOW_ITEM_TYPE_ETH,
4805                         .spec = eth_spec,
4806                         .last = NULL,
4807                         .mask = eth_mask,
4808                 },
4809                 {
4810                         .type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN :
4811                                               RTE_FLOW_ITEM_TYPE_END,
4812                         .spec = vlan_spec,
4813                         .last = NULL,
4814                         .mask = vlan_mask,
4815                 },
4816                 {
4817                         .type = RTE_FLOW_ITEM_TYPE_END,
4818                 },
4819         };
4820         uint16_t queue[priv->reta_idx_n];
4821         struct rte_flow_action_rss action_rss = {
4822                 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
4823                 .level = 0,
4824                 .types = priv->rss_conf.rss_hf,
4825                 .key_len = priv->rss_conf.rss_key_len,
4826                 .queue_num = priv->reta_idx_n,
4827                 .key = priv->rss_conf.rss_key,
4828                 .queue = queue,
4829         };
4830         struct rte_flow_action actions[] = {
4831                 {
4832                         .type = RTE_FLOW_ACTION_TYPE_RSS,
4833                         .conf = &action_rss,
4834                 },
4835                 {
4836                         .type = RTE_FLOW_ACTION_TYPE_END,
4837                 },
4838         };
4839         struct rte_flow *flow;
4840         struct rte_flow_error error;
4841         unsigned int i;
4842
4843         if (!priv->reta_idx_n || !priv->rxqs_n) {
4844                 return 0;
4845         }
4846         for (i = 0; i != priv->reta_idx_n; ++i)
4847                 queue[i] = (*priv->reta_idx)[i];
4848         flow = flow_list_create(dev, &priv->ctrl_flows,
4849                                 &attr, items, actions, false, &error);
4850         if (!flow)
4851                 return -rte_errno;
4852         return 0;
4853 }
4854
4855 /**
4856  * Enable a flow control configured from the control plane.
4857  *
4858  * @param dev
4859  *   Pointer to Ethernet device.
4860  * @param eth_spec
4861  *   An Ethernet flow spec to apply.
4862  * @param eth_mask
4863  *   An Ethernet flow mask to apply.
4864  *
4865  * @return
4866  *   0 on success, a negative errno value otherwise and rte_errno is set.
4867  */
4868 int
4869 mlx5_ctrl_flow(struct rte_eth_dev *dev,
4870                struct rte_flow_item_eth *eth_spec,
4871                struct rte_flow_item_eth *eth_mask)
4872 {
4873         return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL);
4874 }
4875
4876 /**
4877  * Destroy a flow.
4878  *
4879  * @see rte_flow_destroy()
4880  * @see rte_flow_ops
4881  */
4882 int
4883 mlx5_flow_destroy(struct rte_eth_dev *dev,
4884                   struct rte_flow *flow,
4885                   struct rte_flow_error *error __rte_unused)
4886 {
4887         struct mlx5_priv *priv = dev->data->dev_private;
4888
4889         flow_list_destroy(dev, &priv->flows, flow);
4890         return 0;
4891 }
4892
4893 /**
4894  * Destroy all flows.
4895  *
4896  * @see rte_flow_flush()
4897  * @see rte_flow_ops
4898  */
4899 int
4900 mlx5_flow_flush(struct rte_eth_dev *dev,
4901                 struct rte_flow_error *error __rte_unused)
4902 {
4903         struct mlx5_priv *priv = dev->data->dev_private;
4904
4905         mlx5_flow_list_flush(dev, &priv->flows, false);
4906         return 0;
4907 }
4908
4909 /**
4910  * Isolated mode.
4911  *
4912  * @see rte_flow_isolate()
4913  * @see rte_flow_ops
4914  */
4915 int
4916 mlx5_flow_isolate(struct rte_eth_dev *dev,
4917                   int enable,
4918                   struct rte_flow_error *error)
4919 {
4920         struct mlx5_priv *priv = dev->data->dev_private;
4921
4922         if (dev->data->dev_started) {
4923                 rte_flow_error_set(error, EBUSY,
4924                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4925                                    NULL,
4926                                    "port must be stopped first");
4927                 return -rte_errno;
4928         }
4929         priv->isolated = !!enable;
4930         if (enable)
4931                 dev->dev_ops = &mlx5_dev_ops_isolate;
4932         else
4933                 dev->dev_ops = &mlx5_dev_ops;
4934         return 0;
4935 }
4936
4937 /**
4938  * Query a flow.
4939  *
4940  * @see rte_flow_query()
4941  * @see rte_flow_ops
4942  */
4943 static int
4944 flow_drv_query(struct rte_eth_dev *dev,
4945                struct rte_flow *flow,
4946                const struct rte_flow_action *actions,
4947                void *data,
4948                struct rte_flow_error *error)
4949 {
4950         const struct mlx5_flow_driver_ops *fops;
4951         enum mlx5_flow_drv_type ftype = flow->drv_type;
4952
4953         MLX5_ASSERT(ftype > MLX5_FLOW_TYPE_MIN && ftype < MLX5_FLOW_TYPE_MAX);
4954         fops = flow_get_drv_ops(ftype);
4955
4956         return fops->query(dev, flow, actions, data, error);
4957 }
4958
4959 /**
4960  * Query a flow.
4961  *
4962  * @see rte_flow_query()
4963  * @see rte_flow_ops
4964  */
4965 int
4966 mlx5_flow_query(struct rte_eth_dev *dev,
4967                 struct rte_flow *flow,
4968                 const struct rte_flow_action *actions,
4969                 void *data,
4970                 struct rte_flow_error *error)
4971 {
4972         int ret;
4973
4974         ret = flow_drv_query(dev, flow, actions, data, error);
4975         if (ret < 0)
4976                 return ret;
4977         return 0;
4978 }
4979
4980 /**
4981  * Convert a flow director filter to a generic flow.
4982  *
4983  * @param dev
4984  *   Pointer to Ethernet device.
4985  * @param fdir_filter
4986  *   Flow director filter to add.
4987  * @param attributes
4988  *   Generic flow parameters structure.
4989  *
4990  * @return
4991  *   0 on success, a negative errno value otherwise and rte_errno is set.
4992  */
4993 static int
4994 flow_fdir_filter_convert(struct rte_eth_dev *dev,
4995                          const struct rte_eth_fdir_filter *fdir_filter,
4996                          struct mlx5_fdir *attributes)
4997 {
4998         struct mlx5_priv *priv = dev->data->dev_private;
4999         const struct rte_eth_fdir_input *input = &fdir_filter->input;
5000         const struct rte_eth_fdir_masks *mask =
5001                 &dev->data->dev_conf.fdir_conf.mask;
5002
5003         /* Validate queue number. */
5004         if (fdir_filter->action.rx_queue >= priv->rxqs_n) {
5005                 DRV_LOG(ERR, "port %u invalid queue number %d",
5006                         dev->data->port_id, fdir_filter->action.rx_queue);
5007                 rte_errno = EINVAL;
5008                 return -rte_errno;
5009         }
5010         attributes->attr.ingress = 1;
5011         attributes->items[0] = (struct rte_flow_item) {
5012                 .type = RTE_FLOW_ITEM_TYPE_ETH,
5013                 .spec = &attributes->l2,
5014                 .mask = &attributes->l2_mask,
5015         };
5016         switch (fdir_filter->action.behavior) {
5017         case RTE_ETH_FDIR_ACCEPT:
5018                 attributes->actions[0] = (struct rte_flow_action){
5019                         .type = RTE_FLOW_ACTION_TYPE_QUEUE,
5020                         .conf = &attributes->queue,
5021                 };
5022                 break;
5023         case RTE_ETH_FDIR_REJECT:
5024                 attributes->actions[0] = (struct rte_flow_action){
5025                         .type = RTE_FLOW_ACTION_TYPE_DROP,
5026                 };
5027                 break;
5028         default:
5029                 DRV_LOG(ERR, "port %u invalid behavior %d",
5030                         dev->data->port_id,
5031                         fdir_filter->action.behavior);
5032                 rte_errno = ENOTSUP;
5033                 return -rte_errno;
5034         }
5035         attributes->queue.index = fdir_filter->action.rx_queue;
5036         /* Handle L3. */
5037         switch (fdir_filter->input.flow_type) {
5038         case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
5039         case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
5040         case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
5041                 attributes->l3.ipv4.hdr = (struct rte_ipv4_hdr){
5042                         .src_addr = input->flow.ip4_flow.src_ip,
5043                         .dst_addr = input->flow.ip4_flow.dst_ip,
5044                         .time_to_live = input->flow.ip4_flow.ttl,
5045                         .type_of_service = input->flow.ip4_flow.tos,
5046                 };
5047                 attributes->l3_mask.ipv4.hdr = (struct rte_ipv4_hdr){
5048                         .src_addr = mask->ipv4_mask.src_ip,
5049                         .dst_addr = mask->ipv4_mask.dst_ip,
5050                         .time_to_live = mask->ipv4_mask.ttl,
5051                         .type_of_service = mask->ipv4_mask.tos,
5052                         .next_proto_id = mask->ipv4_mask.proto,
5053                 };
5054                 attributes->items[1] = (struct rte_flow_item){
5055                         .type = RTE_FLOW_ITEM_TYPE_IPV4,
5056                         .spec = &attributes->l3,
5057                         .mask = &attributes->l3_mask,
5058                 };
5059                 break;
5060         case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
5061         case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
5062         case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
5063                 attributes->l3.ipv6.hdr = (struct rte_ipv6_hdr){
5064                         .hop_limits = input->flow.ipv6_flow.hop_limits,
5065                         .proto = input->flow.ipv6_flow.proto,
5066                 };
5067
5068                 memcpy(attributes->l3.ipv6.hdr.src_addr,
5069                        input->flow.ipv6_flow.src_ip,
5070                        RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
5071                 memcpy(attributes->l3.ipv6.hdr.dst_addr,
5072                        input->flow.ipv6_flow.dst_ip,
5073                        RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
5074                 memcpy(attributes->l3_mask.ipv6.hdr.src_addr,
5075                        mask->ipv6_mask.src_ip,
5076                        RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr));
5077                 memcpy(attributes->l3_mask.ipv6.hdr.dst_addr,
5078                        mask->ipv6_mask.dst_ip,
5079                        RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr));
5080                 attributes->items[1] = (struct rte_flow_item){
5081                         .type = RTE_FLOW_ITEM_TYPE_IPV6,
5082                         .spec = &attributes->l3,
5083                         .mask = &attributes->l3_mask,
5084                 };
5085                 break;
5086         default:
5087                 DRV_LOG(ERR, "port %u invalid flow type%d",
5088                         dev->data->port_id, fdir_filter->input.flow_type);
5089                 rte_errno = ENOTSUP;
5090                 return -rte_errno;
5091         }
5092         /* Handle L4. */
5093         switch (fdir_filter->input.flow_type) {
5094         case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
5095                 attributes->l4.udp.hdr = (struct rte_udp_hdr){
5096                         .src_port = input->flow.udp4_flow.src_port,
5097                         .dst_port = input->flow.udp4_flow.dst_port,
5098                 };
5099                 attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){
5100                         .src_port = mask->src_port_mask,
5101                         .dst_port = mask->dst_port_mask,
5102                 };
5103                 attributes->items[2] = (struct rte_flow_item){
5104                         .type = RTE_FLOW_ITEM_TYPE_UDP,
5105                         .spec = &attributes->l4,
5106                         .mask = &attributes->l4_mask,
5107                 };
5108                 break;
5109         case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
5110                 attributes->l4.tcp.hdr = (struct rte_tcp_hdr){
5111                         .src_port = input->flow.tcp4_flow.src_port,
5112                         .dst_port = input->flow.tcp4_flow.dst_port,
5113                 };
5114                 attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){
5115                         .src_port = mask->src_port_mask,
5116                         .dst_port = mask->dst_port_mask,
5117                 };
5118                 attributes->items[2] = (struct rte_flow_item){
5119                         .type = RTE_FLOW_ITEM_TYPE_TCP,
5120                         .spec = &attributes->l4,
5121                         .mask = &attributes->l4_mask,
5122                 };
5123                 break;
5124         case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
5125                 attributes->l4.udp.hdr = (struct rte_udp_hdr){
5126                         .src_port = input->flow.udp6_flow.src_port,
5127                         .dst_port = input->flow.udp6_flow.dst_port,
5128                 };
5129                 attributes->l4_mask.udp.hdr = (struct rte_udp_hdr){
5130                         .src_port = mask->src_port_mask,
5131                         .dst_port = mask->dst_port_mask,
5132                 };
5133                 attributes->items[2] = (struct rte_flow_item){
5134                         .type = RTE_FLOW_ITEM_TYPE_UDP,
5135                         .spec = &attributes->l4,
5136                         .mask = &attributes->l4_mask,
5137                 };
5138                 break;
5139         case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
5140                 attributes->l4.tcp.hdr = (struct rte_tcp_hdr){
5141                         .src_port = input->flow.tcp6_flow.src_port,
5142                         .dst_port = input->flow.tcp6_flow.dst_port,
5143                 };
5144                 attributes->l4_mask.tcp.hdr = (struct rte_tcp_hdr){
5145                         .src_port = mask->src_port_mask,
5146                         .dst_port = mask->dst_port_mask,
5147                 };
5148                 attributes->items[2] = (struct rte_flow_item){
5149                         .type = RTE_FLOW_ITEM_TYPE_TCP,
5150                         .spec = &attributes->l4,
5151                         .mask = &attributes->l4_mask,
5152                 };
5153                 break;
5154         case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
5155         case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
5156                 break;
5157         default:
5158                 DRV_LOG(ERR, "port %u invalid flow type%d",
5159                         dev->data->port_id, fdir_filter->input.flow_type);
5160                 rte_errno = ENOTSUP;
5161                 return -rte_errno;
5162         }
5163         return 0;
5164 }
5165
5166 #define FLOW_FDIR_CMP(f1, f2, fld) \
5167         memcmp(&(f1)->fld, &(f2)->fld, sizeof(f1->fld))
5168
5169 /**
5170  * Compare two FDIR flows. If items and actions are identical, the two flows are
5171  * regarded as same.
5172  *
5173  * @param dev
5174  *   Pointer to Ethernet device.
5175  * @param f1
5176  *   FDIR flow to compare.
5177  * @param f2
5178  *   FDIR flow to compare.
5179  *
5180  * @return
5181  *   Zero on match, 1 otherwise.
5182  */
5183 static int
5184 flow_fdir_cmp(const struct mlx5_fdir *f1, const struct mlx5_fdir *f2)
5185 {
5186         if (FLOW_FDIR_CMP(f1, f2, attr) ||
5187             FLOW_FDIR_CMP(f1, f2, l2) ||
5188             FLOW_FDIR_CMP(f1, f2, l2_mask) ||
5189             FLOW_FDIR_CMP(f1, f2, l3) ||
5190             FLOW_FDIR_CMP(f1, f2, l3_mask) ||
5191             FLOW_FDIR_CMP(f1, f2, l4) ||
5192             FLOW_FDIR_CMP(f1, f2, l4_mask) ||
5193             FLOW_FDIR_CMP(f1, f2, actions[0].type))
5194                 return 1;
5195         if (f1->actions[0].type == RTE_FLOW_ACTION_TYPE_QUEUE &&
5196             FLOW_FDIR_CMP(f1, f2, queue))
5197                 return 1;
5198         return 0;
5199 }
5200
5201 /**
5202  * Search device flow list to find out a matched FDIR flow.
5203  *
5204  * @param dev
5205  *   Pointer to Ethernet device.
5206  * @param fdir_flow
5207  *   FDIR flow to lookup.
5208  *
5209  * @return
5210  *   Pointer of flow if found, NULL otherwise.
5211  */
5212 static struct rte_flow *
5213 flow_fdir_filter_lookup(struct rte_eth_dev *dev, struct mlx5_fdir *fdir_flow)
5214 {
5215         struct mlx5_priv *priv = dev->data->dev_private;
5216         struct rte_flow *flow = NULL;
5217         struct mlx5_fdir_flow *priv_fdir_flow = NULL;
5218
5219         MLX5_ASSERT(fdir_flow);
5220         LIST_FOREACH(priv_fdir_flow, &priv->fdir_flows, next) {
5221                 if (!flow_fdir_cmp(priv_fdir_flow->fdir, fdir_flow)) {
5222                         DRV_LOG(DEBUG, "port %u found FDIR flow %p",
5223                                 dev->data->port_id, (void *)flow);
5224                         flow = priv_fdir_flow->flow;
5225                         break;
5226                 }
5227         }
5228         return flow;
5229 }
5230
5231 /**
5232  * Add new flow director filter and store it in list.
5233  *
5234  * @param dev
5235  *   Pointer to Ethernet device.
5236  * @param fdir_filter
5237  *   Flow director filter to add.
5238  *
5239  * @return
5240  *   0 on success, a negative errno value otherwise and rte_errno is set.
5241  */
5242 static int
5243 flow_fdir_filter_add(struct rte_eth_dev *dev,
5244                      const struct rte_eth_fdir_filter *fdir_filter)
5245 {
5246         struct mlx5_priv *priv = dev->data->dev_private;
5247         struct mlx5_fdir *fdir_flow;
5248         struct rte_flow *flow;
5249         struct mlx5_fdir_flow *priv_fdir_flow = NULL;
5250         int ret;
5251
5252         fdir_flow = rte_zmalloc(__func__, sizeof(*fdir_flow), 0);
5253         if (!fdir_flow) {
5254                 rte_errno = ENOMEM;
5255                 return -rte_errno;
5256         }
5257         ret = flow_fdir_filter_convert(dev, fdir_filter, fdir_flow);
5258         if (ret)
5259                 goto error;
5260         flow = flow_fdir_filter_lookup(dev, fdir_flow);
5261         if (flow) {
5262                 rte_errno = EEXIST;
5263                 goto error;
5264         }
5265         priv_fdir_flow = rte_zmalloc(__func__, sizeof(struct mlx5_fdir_flow),
5266                                      0);
5267         if (!priv_fdir_flow) {
5268                 rte_errno = ENOMEM;
5269                 goto error;
5270         }
5271         flow = flow_list_create(dev, &priv->flows, &fdir_flow->attr,
5272                                 fdir_flow->items, fdir_flow->actions, true,
5273                                 NULL);
5274         if (!flow)
5275                 goto error;
5276         flow->fdir = 1;
5277         priv_fdir_flow->fdir = fdir_flow;
5278         priv_fdir_flow->flow = flow;
5279         LIST_INSERT_HEAD(&priv->fdir_flows, priv_fdir_flow, next);
5280         DRV_LOG(DEBUG, "port %u created FDIR flow %p",
5281                 dev->data->port_id, (void *)flow);
5282         return 0;
5283 error:
5284         rte_free(priv_fdir_flow);
5285         rte_free(fdir_flow);
5286         return -rte_errno;
5287 }
5288
5289 /**
5290  * Delete specific filter.
5291  *
5292  * @param dev
5293  *   Pointer to Ethernet device.
5294  * @param fdir_filter
5295  *   Filter to be deleted.
5296  *
5297  * @return
5298  *   0 on success, a negative errno value otherwise and rte_errno is set.
5299  */
5300 static int
5301 flow_fdir_filter_delete(struct rte_eth_dev *dev,
5302                         const struct rte_eth_fdir_filter *fdir_filter)
5303 {
5304         struct mlx5_priv *priv = dev->data->dev_private;
5305         struct rte_flow *flow;
5306         struct mlx5_fdir fdir_flow = {
5307                 .attr.group = 0,
5308         };
5309         struct mlx5_fdir_flow *priv_fdir_flow = NULL;
5310         int ret;
5311
5312         ret = flow_fdir_filter_convert(dev, fdir_filter, &fdir_flow);
5313         if (ret)
5314                 return -rte_errno;
5315         LIST_FOREACH(priv_fdir_flow, &priv->fdir_flows, next) {
5316                 /* Find the fdir in priv list */
5317                 if (!flow_fdir_cmp(priv_fdir_flow->fdir, &fdir_flow))
5318                         break;
5319         }
5320         if (!priv_fdir_flow)
5321                 return 0;
5322         LIST_REMOVE(priv_fdir_flow, next);
5323         flow = priv_fdir_flow->flow;
5324         /* Fdir resource will be releasd after flow destroy. */
5325         flow->fdir = 0;
5326         flow_list_destroy(dev, &priv->flows, flow);
5327         rte_free(priv_fdir_flow->fdir);
5328         rte_free(priv_fdir_flow);
5329         DRV_LOG(DEBUG, "port %u deleted FDIR flow %p",
5330                 dev->data->port_id, (void *)flow);
5331         return 0;
5332 }
5333
5334 /**
5335  * Update queue for specific filter.
5336  *
5337  * @param dev
5338  *   Pointer to Ethernet device.
5339  * @param fdir_filter
5340  *   Filter to be updated.
5341  *
5342  * @return
5343  *   0 on success, a negative errno value otherwise and rte_errno is set.
5344  */
5345 static int
5346 flow_fdir_filter_update(struct rte_eth_dev *dev,
5347                         const struct rte_eth_fdir_filter *fdir_filter)
5348 {
5349         int ret;
5350
5351         ret = flow_fdir_filter_delete(dev, fdir_filter);
5352         if (ret)
5353                 return ret;
5354         return flow_fdir_filter_add(dev, fdir_filter);
5355 }
5356
5357 /**
5358  * Flush all filters.
5359  *
5360  * @param dev
5361  *   Pointer to Ethernet device.
5362  */
5363 static void
5364 flow_fdir_filter_flush(struct rte_eth_dev *dev)
5365 {
5366         struct mlx5_priv *priv = dev->data->dev_private;
5367         struct mlx5_fdir_flow *priv_fdir_flow = NULL;
5368
5369         while (!LIST_EMPTY(&priv->fdir_flows)) {
5370                 priv_fdir_flow = LIST_FIRST(&priv->fdir_flows);
5371                 LIST_REMOVE(priv_fdir_flow, next);
5372                 priv_fdir_flow->flow->fdir = 0;
5373                 flow_list_destroy(dev, &priv->flows, priv_fdir_flow->flow);
5374                 rte_free(priv_fdir_flow->fdir);
5375                 rte_free(priv_fdir_flow);
5376         }
5377 }
5378
5379 /**
5380  * Get flow director information.
5381  *
5382  * @param dev
5383  *   Pointer to Ethernet device.
5384  * @param[out] fdir_info
5385  *   Resulting flow director information.
5386  */
5387 static void
5388 flow_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info)
5389 {
5390         struct rte_eth_fdir_masks *mask =
5391                 &dev->data->dev_conf.fdir_conf.mask;
5392
5393         fdir_info->mode = dev->data->dev_conf.fdir_conf.mode;
5394         fdir_info->guarant_spc = 0;
5395         rte_memcpy(&fdir_info->mask, mask, sizeof(fdir_info->mask));
5396         fdir_info->max_flexpayload = 0;
5397         fdir_info->flow_types_mask[0] = 0;
5398         fdir_info->flex_payload_unit = 0;
5399         fdir_info->max_flex_payload_segment_num = 0;
5400         fdir_info->flex_payload_limit = 0;
5401         memset(&fdir_info->flex_conf, 0, sizeof(fdir_info->flex_conf));
5402 }
5403
5404 /**
5405  * Deal with flow director operations.
5406  *
5407  * @param dev
5408  *   Pointer to Ethernet device.
5409  * @param filter_op
5410  *   Operation to perform.
5411  * @param arg
5412  *   Pointer to operation-specific structure.
5413  *
5414  * @return
5415  *   0 on success, a negative errno value otherwise and rte_errno is set.
5416  */
5417 static int
5418 flow_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
5419                     void *arg)
5420 {
5421         enum rte_fdir_mode fdir_mode =
5422                 dev->data->dev_conf.fdir_conf.mode;
5423
5424         if (filter_op == RTE_ETH_FILTER_NOP)
5425                 return 0;
5426         if (fdir_mode != RTE_FDIR_MODE_PERFECT &&
5427             fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
5428                 DRV_LOG(ERR, "port %u flow director mode %d not supported",
5429                         dev->data->port_id, fdir_mode);
5430                 rte_errno = EINVAL;
5431                 return -rte_errno;
5432         }
5433         switch (filter_op) {
5434         case RTE_ETH_FILTER_ADD:
5435                 return flow_fdir_filter_add(dev, arg);
5436         case RTE_ETH_FILTER_UPDATE:
5437                 return flow_fdir_filter_update(dev, arg);
5438         case RTE_ETH_FILTER_DELETE:
5439                 return flow_fdir_filter_delete(dev, arg);
5440         case RTE_ETH_FILTER_FLUSH:
5441                 flow_fdir_filter_flush(dev);
5442                 break;
5443         case RTE_ETH_FILTER_INFO:
5444                 flow_fdir_info_get(dev, arg);
5445                 break;
5446         default:
5447                 DRV_LOG(DEBUG, "port %u unknown operation %u",
5448                         dev->data->port_id, filter_op);
5449                 rte_errno = EINVAL;
5450                 return -rte_errno;
5451         }
5452         return 0;
5453 }
5454
5455 /**
5456  * Manage filter operations.
5457  *
5458  * @param dev
5459  *   Pointer to Ethernet device structure.
5460  * @param filter_type
5461  *   Filter type.
5462  * @param filter_op
5463  *   Operation to perform.
5464  * @param arg
5465  *   Pointer to operation-specific structure.
5466  *
5467  * @return
5468  *   0 on success, a negative errno value otherwise and rte_errno is set.
5469  */
5470 int
5471 mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
5472                      enum rte_filter_type filter_type,
5473                      enum rte_filter_op filter_op,
5474                      void *arg)
5475 {
5476         switch (filter_type) {
5477         case RTE_ETH_FILTER_GENERIC:
5478                 if (filter_op != RTE_ETH_FILTER_GET) {
5479                         rte_errno = EINVAL;
5480                         return -rte_errno;
5481                 }
5482                 *(const void **)arg = &mlx5_flow_ops;
5483                 return 0;
5484         case RTE_ETH_FILTER_FDIR:
5485                 return flow_fdir_ctrl_func(dev, filter_op, arg);
5486         default:
5487                 DRV_LOG(ERR, "port %u filter type (%d) not supported",
5488                         dev->data->port_id, filter_type);
5489                 rte_errno = ENOTSUP;
5490                 return -rte_errno;
5491         }
5492         return 0;
5493 }
5494
5495 /**
5496  * Create the needed meter and suffix tables.
5497  *
5498  * @param[in] dev
5499  *   Pointer to Ethernet device.
5500  * @param[in] fm
5501  *   Pointer to the flow meter.
5502  *
5503  * @return
5504  *   Pointer to table set on success, NULL otherwise.
5505  */
5506 struct mlx5_meter_domains_infos *
5507 mlx5_flow_create_mtr_tbls(struct rte_eth_dev *dev,
5508                           const struct mlx5_flow_meter *fm)
5509 {
5510         const struct mlx5_flow_driver_ops *fops;
5511
5512         fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
5513         return fops->create_mtr_tbls(dev, fm);
5514 }
5515
5516 /**
5517  * Destroy the meter table set.
5518  *
5519  * @param[in] dev
5520  *   Pointer to Ethernet device.
5521  * @param[in] tbl
5522  *   Pointer to the meter table set.
5523  *
5524  * @return
5525  *   0 on success.
5526  */
5527 int
5528 mlx5_flow_destroy_mtr_tbls(struct rte_eth_dev *dev,
5529                            struct mlx5_meter_domains_infos *tbls)
5530 {
5531         const struct mlx5_flow_driver_ops *fops;
5532
5533         fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
5534         return fops->destroy_mtr_tbls(dev, tbls);
5535 }
5536
5537 /**
5538  * Create policer rules.
5539  *
5540  * @param[in] dev
5541  *   Pointer to Ethernet device.
5542  * @param[in] fm
5543  *   Pointer to flow meter structure.
5544  * @param[in] attr
5545  *   Pointer to flow attributes.
5546  *
5547  * @return
5548  *   0 on success, -1 otherwise.
5549  */
5550 int
5551 mlx5_flow_create_policer_rules(struct rte_eth_dev *dev,
5552                                struct mlx5_flow_meter *fm,
5553                                const struct rte_flow_attr *attr)
5554 {
5555         const struct mlx5_flow_driver_ops *fops;
5556
5557         fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
5558         return fops->create_policer_rules(dev, fm, attr);
5559 }
5560
5561 /**
5562  * Destroy policer rules.
5563  *
5564  * @param[in] fm
5565  *   Pointer to flow meter structure.
5566  * @param[in] attr
5567  *   Pointer to flow attributes.
5568  *
5569  * @return
5570  *   0 on success, -1 otherwise.
5571  */
5572 int
5573 mlx5_flow_destroy_policer_rules(struct rte_eth_dev *dev,
5574                                 struct mlx5_flow_meter *fm,
5575                                 const struct rte_flow_attr *attr)
5576 {
5577         const struct mlx5_flow_driver_ops *fops;
5578
5579         fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
5580         return fops->destroy_policer_rules(dev, fm, attr);
5581 }
5582
5583 /**
5584  * Allocate a counter.
5585  *
5586  * @param[in] dev
5587  *   Pointer to Ethernet device structure.
5588  *
5589  * @return
5590  *   Index to allocated counter  on success, 0 otherwise.
5591  */
5592 uint32_t
5593 mlx5_counter_alloc(struct rte_eth_dev *dev)
5594 {
5595         const struct mlx5_flow_driver_ops *fops;
5596         struct rte_flow_attr attr = { .transfer = 0 };
5597
5598         if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
5599                 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
5600                 return fops->counter_alloc(dev);
5601         }
5602         DRV_LOG(ERR,
5603                 "port %u counter allocate is not supported.",
5604                  dev->data->port_id);
5605         return 0;
5606 }
5607
5608 /**
5609  * Free a counter.
5610  *
5611  * @param[in] dev
5612  *   Pointer to Ethernet device structure.
5613  * @param[in] cnt
5614  *   Index to counter to be free.
5615  */
5616 void
5617 mlx5_counter_free(struct rte_eth_dev *dev, uint32_t cnt)
5618 {
5619         const struct mlx5_flow_driver_ops *fops;
5620         struct rte_flow_attr attr = { .transfer = 0 };
5621
5622         if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
5623                 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
5624                 fops->counter_free(dev, cnt);
5625                 return;
5626         }
5627         DRV_LOG(ERR,
5628                 "port %u counter free is not supported.",
5629                  dev->data->port_id);
5630 }
5631
5632 /**
5633  * Query counter statistics.
5634  *
5635  * @param[in] dev
5636  *   Pointer to Ethernet device structure.
5637  * @param[in] cnt
5638  *   Index to counter to query.
5639  * @param[in] clear
5640  *   Set to clear counter statistics.
5641  * @param[out] pkts
5642  *   The counter hits packets number to save.
5643  * @param[out] bytes
5644  *   The counter hits bytes number to save.
5645  *
5646  * @return
5647  *   0 on success, a negative errno value otherwise.
5648  */
5649 int
5650 mlx5_counter_query(struct rte_eth_dev *dev, uint32_t cnt,
5651                    bool clear, uint64_t *pkts, uint64_t *bytes)
5652 {
5653         const struct mlx5_flow_driver_ops *fops;
5654         struct rte_flow_attr attr = { .transfer = 0 };
5655
5656         if (flow_get_drv_type(dev, &attr) == MLX5_FLOW_TYPE_DV) {
5657                 fops = flow_get_drv_ops(MLX5_FLOW_TYPE_DV);
5658                 return fops->counter_query(dev, cnt, clear, pkts, bytes);
5659         }
5660         DRV_LOG(ERR,
5661                 "port %u counter query is not supported.",
5662                  dev->data->port_id);
5663         return -ENOTSUP;
5664 }
5665
5666 #define MLX5_POOL_QUERY_FREQ_US 1000000
5667
5668 /**
5669  * Set the periodic procedure for triggering asynchronous batch queries for all
5670  * the counter pools.
5671  *
5672  * @param[in] sh
5673  *   Pointer to mlx5_ibv_shared object.
5674  */
5675 void
5676 mlx5_set_query_alarm(struct mlx5_ibv_shared *sh)
5677 {
5678         struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(sh, 0, 0);
5679         uint32_t pools_n = rte_atomic16_read(&cont->n_valid);
5680         uint32_t us;
5681
5682         cont = MLX5_CNT_CONTAINER(sh, 1, 0);
5683         pools_n += rte_atomic16_read(&cont->n_valid);
5684         us = MLX5_POOL_QUERY_FREQ_US / pools_n;
5685         DRV_LOG(DEBUG, "Set alarm for %u pools each %u us", pools_n, us);
5686         if (rte_eal_alarm_set(us, mlx5_flow_query_alarm, sh)) {
5687                 sh->cmng.query_thread_on = 0;
5688                 DRV_LOG(ERR, "Cannot reinitialize query alarm");
5689         } else {
5690                 sh->cmng.query_thread_on = 1;
5691         }
5692 }
5693
5694 /**
5695  * The periodic procedure for triggering asynchronous batch queries for all the
5696  * counter pools. This function is probably called by the host thread.
5697  *
5698  * @param[in] arg
5699  *   The parameter for the alarm process.
5700  */
5701 void
5702 mlx5_flow_query_alarm(void *arg)
5703 {
5704         struct mlx5_ibv_shared *sh = arg;
5705         struct mlx5_devx_obj *dcs;
5706         uint16_t offset;
5707         int ret;
5708         uint8_t batch = sh->cmng.batch;
5709         uint16_t pool_index = sh->cmng.pool_index;
5710         struct mlx5_pools_container *cont;
5711         struct mlx5_pools_container *mcont;
5712         struct mlx5_flow_counter_pool *pool;
5713
5714         if (sh->cmng.pending_queries >= MLX5_MAX_PENDING_QUERIES)
5715                 goto set_alarm;
5716 next_container:
5717         cont = MLX5_CNT_CONTAINER(sh, batch, 1);
5718         mcont = MLX5_CNT_CONTAINER(sh, batch, 0);
5719         /* Check if resize was done and need to flip a container. */
5720         if (cont != mcont) {
5721                 if (cont->pools) {
5722                         /* Clean the old container. */
5723                         rte_free(cont->pools);
5724                         memset(cont, 0, sizeof(*cont));
5725                 }
5726                 rte_cio_wmb();
5727                  /* Flip the host container. */
5728                 sh->cmng.mhi[batch] ^= (uint8_t)2;
5729                 cont = mcont;
5730         }
5731         if (!cont->pools) {
5732                 /* 2 empty containers case is unexpected. */
5733                 if (unlikely(batch != sh->cmng.batch))
5734                         goto set_alarm;
5735                 batch ^= 0x1;
5736                 pool_index = 0;
5737                 goto next_container;
5738         }
5739         pool = cont->pools[pool_index];
5740         if (pool->raw_hw)
5741                 /* There is a pool query in progress. */
5742                 goto set_alarm;
5743         pool->raw_hw =
5744                 LIST_FIRST(&sh->cmng.free_stat_raws);
5745         if (!pool->raw_hw)
5746                 /* No free counter statistics raw memory. */
5747                 goto set_alarm;
5748         dcs = (struct mlx5_devx_obj *)(uintptr_t)rte_atomic64_read
5749                                                               (&pool->a64_dcs);
5750         offset = batch ? 0 : dcs->id % MLX5_COUNTERS_PER_POOL;
5751         /*
5752          * Identify the counters released between query trigger and query
5753          * handle more effiecntly. The counter released in this gap period
5754          * should wait for a new round of query as the new arrived packets
5755          * will not be taken into account.
5756          */
5757         rte_atomic64_add(&pool->start_query_gen, 1);
5758         ret = mlx5_devx_cmd_flow_counter_query(dcs, 0, MLX5_COUNTERS_PER_POOL -
5759                                                offset, NULL, NULL,
5760                                                pool->raw_hw->mem_mng->dm->id,
5761                                                (void *)(uintptr_t)
5762                                                (pool->raw_hw->data + offset),
5763                                                sh->devx_comp,
5764                                                (uint64_t)(uintptr_t)pool);
5765         if (ret) {
5766                 rte_atomic64_sub(&pool->start_query_gen, 1);
5767                 DRV_LOG(ERR, "Failed to trigger asynchronous query for dcs ID"
5768                         " %d", pool->min_dcs->id);
5769                 pool->raw_hw = NULL;
5770                 goto set_alarm;
5771         }
5772         pool->raw_hw->min_dcs_id = dcs->id;
5773         LIST_REMOVE(pool->raw_hw, next);
5774         sh->cmng.pending_queries++;
5775         pool_index++;
5776         if (pool_index >= rte_atomic16_read(&cont->n_valid)) {
5777                 batch ^= 0x1;
5778                 pool_index = 0;
5779         }
5780 set_alarm:
5781         sh->cmng.batch = batch;
5782         sh->cmng.pool_index = pool_index;
5783         mlx5_set_query_alarm(sh);
5784 }
5785
5786 /**
5787  * Handler for the HW respond about ready values from an asynchronous batch
5788  * query. This function is probably called by the host thread.
5789  *
5790  * @param[in] sh
5791  *   The pointer to the shared IB device context.
5792  * @param[in] async_id
5793  *   The Devx async ID.
5794  * @param[in] status
5795  *   The status of the completion.
5796  */
5797 void
5798 mlx5_flow_async_pool_query_handle(struct mlx5_ibv_shared *sh,
5799                                   uint64_t async_id, int status)
5800 {
5801         struct mlx5_flow_counter_pool *pool =
5802                 (struct mlx5_flow_counter_pool *)(uintptr_t)async_id;
5803         struct mlx5_counter_stats_raw *raw_to_free;
5804
5805         if (unlikely(status)) {
5806                 rte_atomic64_sub(&pool->start_query_gen, 1);
5807                 raw_to_free = pool->raw_hw;
5808         } else {
5809                 raw_to_free = pool->raw;
5810                 rte_spinlock_lock(&pool->sl);
5811                 pool->raw = pool->raw_hw;
5812                 rte_spinlock_unlock(&pool->sl);
5813                 MLX5_ASSERT(rte_atomic64_read(&pool->end_query_gen) + 1 ==
5814                             rte_atomic64_read(&pool->start_query_gen));
5815                 rte_atomic64_set(&pool->end_query_gen,
5816                                  rte_atomic64_read(&pool->start_query_gen));
5817                 /* Be sure the new raw counters data is updated in memory. */
5818                 rte_cio_wmb();
5819         }
5820         LIST_INSERT_HEAD(&sh->cmng.free_stat_raws, raw_to_free, next);
5821         pool->raw_hw = NULL;
5822         sh->cmng.pending_queries--;
5823 }
5824
5825 /**
5826  * Translate the rte_flow group index to HW table value.
5827  *
5828  * @param[in] attributes
5829  *   Pointer to flow attributes
5830  * @param[in] external
5831  *   Value is part of flow rule created by request external to PMD.
5832  * @param[in] group
5833  *   rte_flow group index value.
5834  * @param[out] fdb_def_rule
5835  *   Whether fdb jump to table 1 is configured.
5836  * @param[out] table
5837  *   HW table value.
5838  * @param[out] error
5839  *   Pointer to error structure.
5840  *
5841  * @return
5842  *   0 on success, a negative errno value otherwise and rte_errno is set.
5843  */
5844 int
5845 mlx5_flow_group_to_table(const struct rte_flow_attr *attributes, bool external,
5846                          uint32_t group, bool fdb_def_rule, uint32_t *table,
5847                          struct rte_flow_error *error)
5848 {
5849         if (attributes->transfer && external && fdb_def_rule) {
5850                 if (group == UINT32_MAX)
5851                         return rte_flow_error_set
5852                                                 (error, EINVAL,
5853                                                  RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
5854                                                  NULL,
5855                                                  "group index not supported");
5856                 *table = group + 1;
5857         } else {
5858                 *table = group;
5859         }
5860         return 0;
5861 }
5862
5863 /**
5864  * Discover availability of metadata reg_c's.
5865  *
5866  * Iteratively use test flows to check availability.
5867  *
5868  * @param[in] dev
5869  *   Pointer to the Ethernet device structure.
5870  *
5871  * @return
5872  *   0 on success, a negative errno value otherwise and rte_errno is set.
5873  */
5874 int
5875 mlx5_flow_discover_mreg_c(struct rte_eth_dev *dev)
5876 {
5877         struct mlx5_priv *priv = dev->data->dev_private;
5878         struct mlx5_dev_config *config = &priv->config;
5879         enum modify_reg idx;
5880         int n = 0;
5881
5882         /* reg_c[0] and reg_c[1] are reserved. */
5883         config->flow_mreg_c[n++] = REG_C_0;
5884         config->flow_mreg_c[n++] = REG_C_1;
5885         /* Discover availability of other reg_c's. */
5886         for (idx = REG_C_2; idx <= REG_C_7; ++idx) {
5887                 struct rte_flow_attr attr = {
5888                         .group = MLX5_FLOW_MREG_CP_TABLE_GROUP,
5889                         .priority = MLX5_FLOW_PRIO_RSVD,
5890                         .ingress = 1,
5891                 };
5892                 struct rte_flow_item items[] = {
5893                         [0] = {
5894                                 .type = RTE_FLOW_ITEM_TYPE_END,
5895                         },
5896                 };
5897                 struct rte_flow_action actions[] = {
5898                         [0] = {
5899                                 .type = MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
5900                                 .conf = &(struct mlx5_flow_action_copy_mreg){
5901                                         .src = REG_C_1,
5902                                         .dst = idx,
5903                                 },
5904                         },
5905                         [1] = {
5906                                 .type = RTE_FLOW_ACTION_TYPE_JUMP,
5907                                 .conf = &(struct rte_flow_action_jump){
5908                                         .group = MLX5_FLOW_MREG_ACT_TABLE_GROUP,
5909                                 },
5910                         },
5911                         [2] = {
5912                                 .type = RTE_FLOW_ACTION_TYPE_END,
5913                         },
5914                 };
5915                 struct rte_flow *flow;
5916                 struct rte_flow_error error;
5917
5918                 if (!config->dv_flow_en)
5919                         break;
5920                 /* Create internal flow, validation skips copy action. */
5921                 flow = flow_list_create(dev, NULL, &attr, items,
5922                                         actions, false, &error);
5923                 if (!flow)
5924                         continue;
5925                 if (dev->data->dev_started || !flow_drv_apply(dev, flow, NULL))
5926                         config->flow_mreg_c[n++] = idx;
5927                 flow_list_destroy(dev, NULL, flow);
5928         }
5929         for (; n < MLX5_MREG_C_NUM; ++n)
5930                 config->flow_mreg_c[n] = REG_NONE;
5931         return 0;
5932 }
5933
5934 /**
5935  * Dump flow raw hw data to file
5936  *
5937  * @param[in] dev
5938  *    The pointer to Ethernet device.
5939  * @param[in] file
5940  *   A pointer to a file for output.
5941  * @param[out] error
5942  *   Perform verbose error reporting if not NULL. PMDs initialize this
5943  *   structure in case of error only.
5944  * @return
5945  *   0 on success, a nagative value otherwise.
5946  */
5947 int
5948 mlx5_flow_dev_dump(struct rte_eth_dev *dev,
5949                    FILE *file,
5950                    struct rte_flow_error *error __rte_unused)
5951 {
5952         struct mlx5_priv *priv = dev->data->dev_private;
5953         struct mlx5_ibv_shared *sh = priv->sh;
5954
5955         return mlx5_devx_cmd_flow_dump(sh->fdb_domain, sh->rx_domain,
5956                                        sh->tx_domain, file);
5957 }