0fd6ed515e68cab5d0b3192df8a04a97cf2e9efa
[dpdk.git] / drivers / net / mlx5 / mlx5_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  * Copyright 2016 Mellanox Technologies, Ltd
4  */
5
6 #include <netinet/in.h>
7 #include <sys/queue.h>
8 #include <stdalign.h>
9 #include <stdint.h>
10 #include <string.h>
11
12 /* Verbs header. */
13 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
14 #ifdef PEDANTIC
15 #pragma GCC diagnostic ignored "-Wpedantic"
16 #endif
17 #include <infiniband/verbs.h>
18 #ifdef PEDANTIC
19 #pragma GCC diagnostic error "-Wpedantic"
20 #endif
21
22 #include <rte_common.h>
23 #include <rte_ether.h>
24 #include <rte_eth_ctrl.h>
25 #include <rte_ethdev_driver.h>
26 #include <rte_flow.h>
27 #include <rte_flow_driver.h>
28 #include <rte_malloc.h>
29 #include <rte_ip.h>
30
31 #include "mlx5.h"
32 #include "mlx5_defs.h"
33 #include "mlx5_prm.h"
34 #include "mlx5_glue.h"
35 #include "mlx5_flow.h"
36
37 /* Dev ops structure defined in mlx5.c */
38 extern const struct eth_dev_ops mlx5_dev_ops;
39 extern const struct eth_dev_ops mlx5_dev_ops_isolate;
40
41 /** Device flow drivers. */
42 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
43 extern const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops;
44 #endif
45 extern const struct mlx5_flow_driver_ops mlx5_flow_tcf_drv_ops;
46 extern const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops;
47
48 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops;
49
50 const struct mlx5_flow_driver_ops *flow_drv_ops[] = {
51         [MLX5_FLOW_TYPE_MIN] = &mlx5_flow_null_drv_ops,
52 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
53         [MLX5_FLOW_TYPE_DV] = &mlx5_flow_dv_drv_ops,
54 #endif
55         [MLX5_FLOW_TYPE_TCF] = &mlx5_flow_tcf_drv_ops,
56         [MLX5_FLOW_TYPE_VERBS] = &mlx5_flow_verbs_drv_ops,
57         [MLX5_FLOW_TYPE_MAX] = &mlx5_flow_null_drv_ops
58 };
59
60 enum mlx5_expansion {
61         MLX5_EXPANSION_ROOT,
62         MLX5_EXPANSION_ROOT_OUTER,
63         MLX5_EXPANSION_ROOT_ETH_VLAN,
64         MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN,
65         MLX5_EXPANSION_OUTER_ETH,
66         MLX5_EXPANSION_OUTER_ETH_VLAN,
67         MLX5_EXPANSION_OUTER_VLAN,
68         MLX5_EXPANSION_OUTER_IPV4,
69         MLX5_EXPANSION_OUTER_IPV4_UDP,
70         MLX5_EXPANSION_OUTER_IPV4_TCP,
71         MLX5_EXPANSION_OUTER_IPV6,
72         MLX5_EXPANSION_OUTER_IPV6_UDP,
73         MLX5_EXPANSION_OUTER_IPV6_TCP,
74         MLX5_EXPANSION_VXLAN,
75         MLX5_EXPANSION_VXLAN_GPE,
76         MLX5_EXPANSION_GRE,
77         MLX5_EXPANSION_MPLS,
78         MLX5_EXPANSION_ETH,
79         MLX5_EXPANSION_ETH_VLAN,
80         MLX5_EXPANSION_VLAN,
81         MLX5_EXPANSION_IPV4,
82         MLX5_EXPANSION_IPV4_UDP,
83         MLX5_EXPANSION_IPV4_TCP,
84         MLX5_EXPANSION_IPV6,
85         MLX5_EXPANSION_IPV6_UDP,
86         MLX5_EXPANSION_IPV6_TCP,
87 };
88
89 /** Supported expansion of items. */
90 static const struct rte_flow_expand_node mlx5_support_expansion[] = {
91         [MLX5_EXPANSION_ROOT] = {
92                 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
93                                                  MLX5_EXPANSION_IPV4,
94                                                  MLX5_EXPANSION_IPV6),
95                 .type = RTE_FLOW_ITEM_TYPE_END,
96         },
97         [MLX5_EXPANSION_ROOT_OUTER] = {
98                 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH,
99                                                  MLX5_EXPANSION_OUTER_IPV4,
100                                                  MLX5_EXPANSION_OUTER_IPV6),
101                 .type = RTE_FLOW_ITEM_TYPE_END,
102         },
103         [MLX5_EXPANSION_ROOT_ETH_VLAN] = {
104                 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH_VLAN),
105                 .type = RTE_FLOW_ITEM_TYPE_END,
106         },
107         [MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN] = {
108                 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_ETH_VLAN),
109                 .type = RTE_FLOW_ITEM_TYPE_END,
110         },
111         [MLX5_EXPANSION_OUTER_ETH] = {
112                 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
113                                                  MLX5_EXPANSION_OUTER_IPV6,
114                                                  MLX5_EXPANSION_MPLS),
115                 .type = RTE_FLOW_ITEM_TYPE_ETH,
116                 .rss_types = 0,
117         },
118         [MLX5_EXPANSION_OUTER_ETH_VLAN] = {
119                 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_VLAN),
120                 .type = RTE_FLOW_ITEM_TYPE_ETH,
121                 .rss_types = 0,
122         },
123         [MLX5_EXPANSION_OUTER_VLAN] = {
124                 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_OUTER_IPV4,
125                                                  MLX5_EXPANSION_OUTER_IPV6),
126                 .type = RTE_FLOW_ITEM_TYPE_VLAN,
127         },
128         [MLX5_EXPANSION_OUTER_IPV4] = {
129                 .next = RTE_FLOW_EXPAND_RSS_NEXT
130                         (MLX5_EXPANSION_OUTER_IPV4_UDP,
131                          MLX5_EXPANSION_OUTER_IPV4_TCP,
132                          MLX5_EXPANSION_GRE),
133                 .type = RTE_FLOW_ITEM_TYPE_IPV4,
134                 .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
135                         ETH_RSS_NONFRAG_IPV4_OTHER,
136         },
137         [MLX5_EXPANSION_OUTER_IPV4_UDP] = {
138                 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
139                                                  MLX5_EXPANSION_VXLAN_GPE),
140                 .type = RTE_FLOW_ITEM_TYPE_UDP,
141                 .rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
142         },
143         [MLX5_EXPANSION_OUTER_IPV4_TCP] = {
144                 .type = RTE_FLOW_ITEM_TYPE_TCP,
145                 .rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
146         },
147         [MLX5_EXPANSION_OUTER_IPV6] = {
148                 .next = RTE_FLOW_EXPAND_RSS_NEXT
149                         (MLX5_EXPANSION_OUTER_IPV6_UDP,
150                          MLX5_EXPANSION_OUTER_IPV6_TCP),
151                 .type = RTE_FLOW_ITEM_TYPE_IPV6,
152                 .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
153                         ETH_RSS_NONFRAG_IPV6_OTHER,
154         },
155         [MLX5_EXPANSION_OUTER_IPV6_UDP] = {
156                 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VXLAN,
157                                                  MLX5_EXPANSION_VXLAN_GPE),
158                 .type = RTE_FLOW_ITEM_TYPE_UDP,
159                 .rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
160         },
161         [MLX5_EXPANSION_OUTER_IPV6_TCP] = {
162                 .type = RTE_FLOW_ITEM_TYPE_TCP,
163                 .rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
164         },
165         [MLX5_EXPANSION_VXLAN] = {
166                 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH),
167                 .type = RTE_FLOW_ITEM_TYPE_VXLAN,
168         },
169         [MLX5_EXPANSION_VXLAN_GPE] = {
170                 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_ETH,
171                                                  MLX5_EXPANSION_IPV4,
172                                                  MLX5_EXPANSION_IPV6),
173                 .type = RTE_FLOW_ITEM_TYPE_VXLAN_GPE,
174         },
175         [MLX5_EXPANSION_GRE] = {
176                 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4),
177                 .type = RTE_FLOW_ITEM_TYPE_GRE,
178         },
179         [MLX5_EXPANSION_MPLS] = {
180                 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
181                                                  MLX5_EXPANSION_IPV6),
182                 .type = RTE_FLOW_ITEM_TYPE_MPLS,
183         },
184         [MLX5_EXPANSION_ETH] = {
185                 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
186                                                  MLX5_EXPANSION_IPV6),
187                 .type = RTE_FLOW_ITEM_TYPE_ETH,
188         },
189         [MLX5_EXPANSION_ETH_VLAN] = {
190                 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_VLAN),
191                 .type = RTE_FLOW_ITEM_TYPE_ETH,
192         },
193         [MLX5_EXPANSION_VLAN] = {
194                 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4,
195                                                  MLX5_EXPANSION_IPV6),
196                 .type = RTE_FLOW_ITEM_TYPE_VLAN,
197         },
198         [MLX5_EXPANSION_IPV4] = {
199                 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV4_UDP,
200                                                  MLX5_EXPANSION_IPV4_TCP),
201                 .type = RTE_FLOW_ITEM_TYPE_IPV4,
202                 .rss_types = ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
203                         ETH_RSS_NONFRAG_IPV4_OTHER,
204         },
205         [MLX5_EXPANSION_IPV4_UDP] = {
206                 .type = RTE_FLOW_ITEM_TYPE_UDP,
207                 .rss_types = ETH_RSS_NONFRAG_IPV4_UDP,
208         },
209         [MLX5_EXPANSION_IPV4_TCP] = {
210                 .type = RTE_FLOW_ITEM_TYPE_TCP,
211                 .rss_types = ETH_RSS_NONFRAG_IPV4_TCP,
212         },
213         [MLX5_EXPANSION_IPV6] = {
214                 .next = RTE_FLOW_EXPAND_RSS_NEXT(MLX5_EXPANSION_IPV6_UDP,
215                                                  MLX5_EXPANSION_IPV6_TCP),
216                 .type = RTE_FLOW_ITEM_TYPE_IPV6,
217                 .rss_types = ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
218                         ETH_RSS_NONFRAG_IPV6_OTHER,
219         },
220         [MLX5_EXPANSION_IPV6_UDP] = {
221                 .type = RTE_FLOW_ITEM_TYPE_UDP,
222                 .rss_types = ETH_RSS_NONFRAG_IPV6_UDP,
223         },
224         [MLX5_EXPANSION_IPV6_TCP] = {
225                 .type = RTE_FLOW_ITEM_TYPE_TCP,
226                 .rss_types = ETH_RSS_NONFRAG_IPV6_TCP,
227         },
228 };
229
230 static const struct rte_flow_ops mlx5_flow_ops = {
231         .validate = mlx5_flow_validate,
232         .create = mlx5_flow_create,
233         .destroy = mlx5_flow_destroy,
234         .flush = mlx5_flow_flush,
235         .isolate = mlx5_flow_isolate,
236         .query = mlx5_flow_query,
237 };
238
239 /* Convert FDIR request to Generic flow. */
240 struct mlx5_fdir {
241         struct rte_flow_attr attr;
242         struct rte_flow_item items[4];
243         struct rte_flow_item_eth l2;
244         struct rte_flow_item_eth l2_mask;
245         union {
246                 struct rte_flow_item_ipv4 ipv4;
247                 struct rte_flow_item_ipv6 ipv6;
248         } l3;
249         union {
250                 struct rte_flow_item_ipv4 ipv4;
251                 struct rte_flow_item_ipv6 ipv6;
252         } l3_mask;
253         union {
254                 struct rte_flow_item_udp udp;
255                 struct rte_flow_item_tcp tcp;
256         } l4;
257         union {
258                 struct rte_flow_item_udp udp;
259                 struct rte_flow_item_tcp tcp;
260         } l4_mask;
261         struct rte_flow_action actions[2];
262         struct rte_flow_action_queue queue;
263 };
264
265 /* Map of Verbs to Flow priority with 8 Verbs priorities. */
266 static const uint32_t priority_map_3[][MLX5_PRIORITY_MAP_MAX] = {
267         { 0, 1, 2 }, { 2, 3, 4 }, { 5, 6, 7 },
268 };
269
270 /* Map of Verbs to Flow priority with 16 Verbs priorities. */
271 static const uint32_t priority_map_5[][MLX5_PRIORITY_MAP_MAX] = {
272         { 0, 1, 2 }, { 3, 4, 5 }, { 6, 7, 8 },
273         { 9, 10, 11 }, { 12, 13, 14 },
274 };
275
276 /* Tunnel information. */
277 struct mlx5_flow_tunnel_info {
278         uint64_t tunnel; /**< Tunnel bit (see MLX5_FLOW_*). */
279         uint32_t ptype; /**< Tunnel Ptype (see RTE_PTYPE_*). */
280 };
281
282 static struct mlx5_flow_tunnel_info tunnels_info[] = {
283         {
284                 .tunnel = MLX5_FLOW_LAYER_VXLAN,
285                 .ptype = RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP,
286         },
287         {
288                 .tunnel = MLX5_FLOW_LAYER_VXLAN_GPE,
289                 .ptype = RTE_PTYPE_TUNNEL_VXLAN_GPE | RTE_PTYPE_L4_UDP,
290         },
291         {
292                 .tunnel = MLX5_FLOW_LAYER_GRE,
293                 .ptype = RTE_PTYPE_TUNNEL_GRE,
294         },
295         {
296                 .tunnel = MLX5_FLOW_LAYER_MPLS | MLX5_FLOW_LAYER_OUTER_L4_UDP,
297                 .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_UDP | RTE_PTYPE_L4_UDP,
298         },
299         {
300                 .tunnel = MLX5_FLOW_LAYER_MPLS,
301                 .ptype = RTE_PTYPE_TUNNEL_MPLS_IN_GRE,
302         },
303 };
304
305 /**
306  * Discover the maximum number of priority available.
307  *
308  * @param[in] dev
309  *   Pointer to the Ethernet device structure.
310  *
311  * @return
312  *   number of supported flow priority on success, a negative errno
313  *   value otherwise and rte_errno is set.
314  */
315 int
316 mlx5_flow_discover_priorities(struct rte_eth_dev *dev)
317 {
318         struct {
319                 struct ibv_flow_attr attr;
320                 struct ibv_flow_spec_eth eth;
321                 struct ibv_flow_spec_action_drop drop;
322         } flow_attr = {
323                 .attr = {
324                         .num_of_specs = 2,
325                 },
326                 .eth = {
327                         .type = IBV_FLOW_SPEC_ETH,
328                         .size = sizeof(struct ibv_flow_spec_eth),
329                 },
330                 .drop = {
331                         .size = sizeof(struct ibv_flow_spec_action_drop),
332                         .type = IBV_FLOW_SPEC_ACTION_DROP,
333                 },
334         };
335         struct ibv_flow *flow;
336         struct mlx5_hrxq *drop = mlx5_hrxq_drop_new(dev);
337         uint16_t vprio[] = { 8, 16 };
338         int i;
339         int priority = 0;
340
341         if (!drop) {
342                 rte_errno = ENOTSUP;
343                 return -rte_errno;
344         }
345         for (i = 0; i != RTE_DIM(vprio); i++) {
346                 flow_attr.attr.priority = vprio[i] - 1;
347                 flow = mlx5_glue->create_flow(drop->qp, &flow_attr.attr);
348                 if (!flow)
349                         break;
350                 claim_zero(mlx5_glue->destroy_flow(flow));
351                 priority = vprio[i];
352         }
353         switch (priority) {
354         case 8:
355                 priority = RTE_DIM(priority_map_3);
356                 break;
357         case 16:
358                 priority = RTE_DIM(priority_map_5);
359                 break;
360         default:
361                 rte_errno = ENOTSUP;
362                 DRV_LOG(ERR,
363                         "port %u verbs maximum priority: %d expected 8/16",
364                         dev->data->port_id, vprio[i]);
365                 return -rte_errno;
366         }
367         mlx5_hrxq_drop_release(dev);
368         DRV_LOG(INFO, "port %u flow maximum priority: %d",
369                 dev->data->port_id, priority);
370         return priority;
371 }
372
373 /**
374  * Adjust flow priority based on the highest layer and the request priority.
375  *
376  * @param[in] dev
377  *   Pointer to the Ethernet device structure.
378  * @param[in] priority
379  *   The rule base priority.
380  * @param[in] subpriority
381  *   The priority based on the items.
382  *
383  * @return
384  *   The new priority.
385  */
386 uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
387                                    uint32_t subpriority)
388 {
389         uint32_t res = 0;
390         struct priv *priv = dev->data->dev_private;
391
392         switch (priv->config.flow_prio) {
393         case RTE_DIM(priority_map_3):
394                 res = priority_map_3[priority][subpriority];
395                 break;
396         case RTE_DIM(priority_map_5):
397                 res = priority_map_5[priority][subpriority];
398                 break;
399         }
400         return  res;
401 }
402
403 /**
404  * Verify the @p item specifications (spec, last, mask) are compatible with the
405  * NIC capabilities.
406  *
407  * @param[in] item
408  *   Item specification.
409  * @param[in] mask
410  *   @p item->mask or flow default bit-masks.
411  * @param[in] nic_mask
412  *   Bit-masks covering supported fields by the NIC to compare with user mask.
413  * @param[in] size
414  *   Bit-masks size in bytes.
415  * @param[out] error
416  *   Pointer to error structure.
417  *
418  * @return
419  *   0 on success, a negative errno value otherwise and rte_errno is set.
420  */
421 int
422 mlx5_flow_item_acceptable(const struct rte_flow_item *item,
423                           const uint8_t *mask,
424                           const uint8_t *nic_mask,
425                           unsigned int size,
426                           struct rte_flow_error *error)
427 {
428         unsigned int i;
429
430         assert(nic_mask);
431         for (i = 0; i < size; ++i)
432                 if ((nic_mask[i] | mask[i]) != nic_mask[i])
433                         return rte_flow_error_set(error, ENOTSUP,
434                                                   RTE_FLOW_ERROR_TYPE_ITEM,
435                                                   item,
436                                                   "mask enables non supported"
437                                                   " bits");
438         if (!item->spec && (item->mask || item->last))
439                 return rte_flow_error_set(error, EINVAL,
440                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
441                                           "mask/last without a spec is not"
442                                           " supported");
443         if (item->spec && item->last) {
444                 uint8_t spec[size];
445                 uint8_t last[size];
446                 unsigned int i;
447                 int ret;
448
449                 for (i = 0; i < size; ++i) {
450                         spec[i] = ((const uint8_t *)item->spec)[i] & mask[i];
451                         last[i] = ((const uint8_t *)item->last)[i] & mask[i];
452                 }
453                 ret = memcmp(spec, last, size);
454                 if (ret != 0)
455                         return rte_flow_error_set(error, EINVAL,
456                                                   RTE_FLOW_ERROR_TYPE_ITEM,
457                                                   item,
458                                                   "range is not valid");
459         }
460         return 0;
461 }
462
463 /**
464  * Adjust the hash fields according to the @p flow information.
465  *
466  * @param[in] dev_flow.
467  *   Pointer to the mlx5_flow.
468  * @param[in] tunnel
469  *   1 when the hash field is for a tunnel item.
470  * @param[in] layer_types
471  *   ETH_RSS_* types.
472  * @param[in] hash_fields
473  *   Item hash fields.
474  *
475  * @return
476  *   The hash fileds that should be used.
477  */
478 uint64_t
479 mlx5_flow_hashfields_adjust(struct mlx5_flow *dev_flow,
480                             int tunnel __rte_unused, uint64_t layer_types,
481                             uint64_t hash_fields)
482 {
483         struct rte_flow *flow = dev_flow->flow;
484 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
485         int rss_request_inner = flow->rss.level >= 2;
486
487         /* Check RSS hash level for tunnel. */
488         if (tunnel && rss_request_inner)
489                 hash_fields |= IBV_RX_HASH_INNER;
490         else if (tunnel || rss_request_inner)
491                 return 0;
492 #endif
493         /* Check if requested layer matches RSS hash fields. */
494         if (!(flow->rss.types & layer_types))
495                 return 0;
496         return hash_fields;
497 }
498
499 /**
500  * Lookup and set the ptype in the data Rx part.  A single Ptype can be used,
501  * if several tunnel rules are used on this queue, the tunnel ptype will be
502  * cleared.
503  *
504  * @param rxq_ctrl
505  *   Rx queue to update.
506  */
507 static void
508 flow_rxq_tunnel_ptype_update(struct mlx5_rxq_ctrl *rxq_ctrl)
509 {
510         unsigned int i;
511         uint32_t tunnel_ptype = 0;
512
513         /* Look up for the ptype to use. */
514         for (i = 0; i != MLX5_FLOW_TUNNEL; ++i) {
515                 if (!rxq_ctrl->flow_tunnels_n[i])
516                         continue;
517                 if (!tunnel_ptype) {
518                         tunnel_ptype = tunnels_info[i].ptype;
519                 } else {
520                         tunnel_ptype = 0;
521                         break;
522                 }
523         }
524         rxq_ctrl->rxq.tunnel = tunnel_ptype;
525 }
526
527 /**
528  * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) according to the devive
529  * flow.
530  *
531  * @param[in] dev
532  *   Pointer to the Ethernet device structure.
533  * @param[in] dev_flow
534  *   Pointer to device flow structure.
535  */
536 static void
537 flow_drv_rxq_flags_set(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
538 {
539         struct priv *priv = dev->data->dev_private;
540         struct rte_flow *flow = dev_flow->flow;
541         const int mark = !!(flow->actions &
542                             (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
543         const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
544         unsigned int i;
545
546         for (i = 0; i != flow->rss.queue_num; ++i) {
547                 int idx = (*flow->queue)[i];
548                 struct mlx5_rxq_ctrl *rxq_ctrl =
549                         container_of((*priv->rxqs)[idx],
550                                      struct mlx5_rxq_ctrl, rxq);
551
552                 if (mark) {
553                         rxq_ctrl->rxq.mark = 1;
554                         rxq_ctrl->flow_mark_n++;
555                 }
556                 if (tunnel) {
557                         unsigned int j;
558
559                         /* Increase the counter matching the flow. */
560                         for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
561                                 if ((tunnels_info[j].tunnel &
562                                      dev_flow->layers) ==
563                                     tunnels_info[j].tunnel) {
564                                         rxq_ctrl->flow_tunnels_n[j]++;
565                                         break;
566                                 }
567                         }
568                         flow_rxq_tunnel_ptype_update(rxq_ctrl);
569                 }
570         }
571 }
572
573 /**
574  * Set the Rx queue flags (Mark/Flag and Tunnel Ptypes) for a flow
575  *
576  * @param[in] dev
577  *   Pointer to the Ethernet device structure.
578  * @param[in] flow
579  *   Pointer to flow structure.
580  */
581 static void
582 flow_rxq_flags_set(struct rte_eth_dev *dev, struct rte_flow *flow)
583 {
584         struct mlx5_flow *dev_flow;
585
586         LIST_FOREACH(dev_flow, &flow->dev_flows, next)
587                 flow_drv_rxq_flags_set(dev, dev_flow);
588 }
589
590 /**
591  * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the
592  * device flow if no other flow uses it with the same kind of request.
593  *
594  * @param dev
595  *   Pointer to Ethernet device.
596  * @param[in] dev_flow
597  *   Pointer to the device flow.
598  */
599 static void
600 flow_drv_rxq_flags_trim(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow)
601 {
602         struct priv *priv = dev->data->dev_private;
603         struct rte_flow *flow = dev_flow->flow;
604         const int mark = !!(flow->actions &
605                             (MLX5_FLOW_ACTION_FLAG | MLX5_FLOW_ACTION_MARK));
606         const int tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
607         unsigned int i;
608
609         assert(dev->data->dev_started);
610         for (i = 0; i != flow->rss.queue_num; ++i) {
611                 int idx = (*flow->queue)[i];
612                 struct mlx5_rxq_ctrl *rxq_ctrl =
613                         container_of((*priv->rxqs)[idx],
614                                      struct mlx5_rxq_ctrl, rxq);
615
616                 if (mark) {
617                         rxq_ctrl->flow_mark_n--;
618                         rxq_ctrl->rxq.mark = !!rxq_ctrl->flow_mark_n;
619                 }
620                 if (tunnel) {
621                         unsigned int j;
622
623                         /* Decrease the counter matching the flow. */
624                         for (j = 0; j != MLX5_FLOW_TUNNEL; ++j) {
625                                 if ((tunnels_info[j].tunnel &
626                                      dev_flow->layers) ==
627                                     tunnels_info[j].tunnel) {
628                                         rxq_ctrl->flow_tunnels_n[j]--;
629                                         break;
630                                 }
631                         }
632                         flow_rxq_tunnel_ptype_update(rxq_ctrl);
633                 }
634         }
635 }
636
637 /**
638  * Clear the Rx queue flags (Mark/Flag and Tunnel Ptype) associated with the
639  * @p flow if no other flow uses it with the same kind of request.
640  *
641  * @param dev
642  *   Pointer to Ethernet device.
643  * @param[in] flow
644  *   Pointer to the flow.
645  */
646 static void
647 flow_rxq_flags_trim(struct rte_eth_dev *dev, struct rte_flow *flow)
648 {
649         struct mlx5_flow *dev_flow;
650
651         LIST_FOREACH(dev_flow, &flow->dev_flows, next)
652                 flow_drv_rxq_flags_trim(dev, dev_flow);
653 }
654
655 /**
656  * Clear the Mark/Flag and Tunnel ptype information in all Rx queues.
657  *
658  * @param dev
659  *   Pointer to Ethernet device.
660  */
661 static void
662 flow_rxq_flags_clear(struct rte_eth_dev *dev)
663 {
664         struct priv *priv = dev->data->dev_private;
665         unsigned int i;
666
667         for (i = 0; i != priv->rxqs_n; ++i) {
668                 struct mlx5_rxq_ctrl *rxq_ctrl;
669                 unsigned int j;
670
671                 if (!(*priv->rxqs)[i])
672                         continue;
673                 rxq_ctrl = container_of((*priv->rxqs)[i],
674                                         struct mlx5_rxq_ctrl, rxq);
675                 rxq_ctrl->flow_mark_n = 0;
676                 rxq_ctrl->rxq.mark = 0;
677                 for (j = 0; j != MLX5_FLOW_TUNNEL; ++j)
678                         rxq_ctrl->flow_tunnels_n[j] = 0;
679                 rxq_ctrl->rxq.tunnel = 0;
680         }
681 }
682
683 /*
684  * Validate the flag action.
685  *
686  * @param[in] action_flags
687  *   Bit-fields that holds the actions detected until now.
688  * @param[in] attr
689  *   Attributes of flow that includes this action.
690  * @param[out] error
691  *   Pointer to error structure.
692  *
693  * @return
694  *   0 on success, a negative errno value otherwise and rte_errno is set.
695  */
696 int
697 mlx5_flow_validate_action_flag(uint64_t action_flags,
698                                const struct rte_flow_attr *attr,
699                                struct rte_flow_error *error)
700 {
701
702         if (action_flags & MLX5_FLOW_ACTION_DROP)
703                 return rte_flow_error_set(error, EINVAL,
704                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
705                                           "can't drop and flag in same flow");
706         if (action_flags & MLX5_FLOW_ACTION_MARK)
707                 return rte_flow_error_set(error, EINVAL,
708                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
709                                           "can't mark and flag in same flow");
710         if (action_flags & MLX5_FLOW_ACTION_FLAG)
711                 return rte_flow_error_set(error, EINVAL,
712                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
713                                           "can't have 2 flag"
714                                           " actions in same flow");
715         if (attr->egress)
716                 return rte_flow_error_set(error, ENOTSUP,
717                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
718                                           "flag action not supported for "
719                                           "egress");
720         return 0;
721 }
722
723 /*
724  * Validate the mark action.
725  *
726  * @param[in] action
727  *   Pointer to the queue action.
728  * @param[in] action_flags
729  *   Bit-fields that holds the actions detected until now.
730  * @param[in] attr
731  *   Attributes of flow that includes this action.
732  * @param[out] error
733  *   Pointer to error structure.
734  *
735  * @return
736  *   0 on success, a negative errno value otherwise and rte_errno is set.
737  */
738 int
739 mlx5_flow_validate_action_mark(const struct rte_flow_action *action,
740                                uint64_t action_flags,
741                                const struct rte_flow_attr *attr,
742                                struct rte_flow_error *error)
743 {
744         const struct rte_flow_action_mark *mark = action->conf;
745
746         if (!mark)
747                 return rte_flow_error_set(error, EINVAL,
748                                           RTE_FLOW_ERROR_TYPE_ACTION,
749                                           action,
750                                           "configuration cannot be null");
751         if (mark->id >= MLX5_FLOW_MARK_MAX)
752                 return rte_flow_error_set(error, EINVAL,
753                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
754                                           &mark->id,
755                                           "mark id must in 0 <= id < "
756                                           RTE_STR(MLX5_FLOW_MARK_MAX));
757         if (action_flags & MLX5_FLOW_ACTION_DROP)
758                 return rte_flow_error_set(error, EINVAL,
759                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
760                                           "can't drop and mark in same flow");
761         if (action_flags & MLX5_FLOW_ACTION_FLAG)
762                 return rte_flow_error_set(error, EINVAL,
763                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
764                                           "can't flag and mark in same flow");
765         if (action_flags & MLX5_FLOW_ACTION_MARK)
766                 return rte_flow_error_set(error, EINVAL,
767                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
768                                           "can't have 2 mark actions in same"
769                                           " flow");
770         if (attr->egress)
771                 return rte_flow_error_set(error, ENOTSUP,
772                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
773                                           "mark action not supported for "
774                                           "egress");
775         return 0;
776 }
777
778 /*
779  * Validate the drop action.
780  *
781  * @param[in] action_flags
782  *   Bit-fields that holds the actions detected until now.
783  * @param[in] attr
784  *   Attributes of flow that includes this action.
785  * @param[out] error
786  *   Pointer to error structure.
787  *
788  * @return
789  *   0 on success, a negative errno value otherwise and rte_ernno is set.
790  */
791 int
792 mlx5_flow_validate_action_drop(uint64_t action_flags,
793                                const struct rte_flow_attr *attr,
794                                struct rte_flow_error *error)
795 {
796         if (action_flags & MLX5_FLOW_ACTION_FLAG)
797                 return rte_flow_error_set(error, EINVAL,
798                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
799                                           "can't drop and flag in same flow");
800         if (action_flags & MLX5_FLOW_ACTION_MARK)
801                 return rte_flow_error_set(error, EINVAL,
802                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
803                                           "can't drop and mark in same flow");
804         if (action_flags & MLX5_FLOW_FATE_ACTIONS)
805                 return rte_flow_error_set(error, EINVAL,
806                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
807                                           "can't have 2 fate actions in"
808                                           " same flow");
809         if (attr->egress)
810                 return rte_flow_error_set(error, ENOTSUP,
811                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
812                                           "drop action not supported for "
813                                           "egress");
814         return 0;
815 }
816
817 /*
818  * Validate the queue action.
819  *
820  * @param[in] action
821  *   Pointer to the queue action.
822  * @param[in] action_flags
823  *   Bit-fields that holds the actions detected until now.
824  * @param[in] dev
825  *   Pointer to the Ethernet device structure.
826  * @param[in] attr
827  *   Attributes of flow that includes this action.
828  * @param[out] error
829  *   Pointer to error structure.
830  *
831  * @return
832  *   0 on success, a negative errno value otherwise and rte_ernno is set.
833  */
834 int
835 mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
836                                 uint64_t action_flags,
837                                 struct rte_eth_dev *dev,
838                                 const struct rte_flow_attr *attr,
839                                 struct rte_flow_error *error)
840 {
841         struct priv *priv = dev->data->dev_private;
842         const struct rte_flow_action_queue *queue = action->conf;
843
844         if (action_flags & MLX5_FLOW_FATE_ACTIONS)
845                 return rte_flow_error_set(error, EINVAL,
846                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
847                                           "can't have 2 fate actions in"
848                                           " same flow");
849         if (queue->index >= priv->rxqs_n)
850                 return rte_flow_error_set(error, EINVAL,
851                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
852                                           &queue->index,
853                                           "queue index out of range");
854         if (!(*priv->rxqs)[queue->index])
855                 return rte_flow_error_set(error, EINVAL,
856                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
857                                           &queue->index,
858                                           "queue is not configured");
859         if (attr->egress)
860                 return rte_flow_error_set(error, ENOTSUP,
861                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
862                                           "queue action not supported for "
863                                           "egress");
864         return 0;
865 }
866
867 /*
868  * Validate the rss action.
869  *
870  * @param[in] action
871  *   Pointer to the queue action.
872  * @param[in] action_flags
873  *   Bit-fields that holds the actions detected until now.
874  * @param[in] dev
875  *   Pointer to the Ethernet device structure.
876  * @param[in] attr
877  *   Attributes of flow that includes this action.
878  * @param[out] error
879  *   Pointer to error structure.
880  *
881  * @return
882  *   0 on success, a negative errno value otherwise and rte_ernno is set.
883  */
884 int
885 mlx5_flow_validate_action_rss(const struct rte_flow_action *action,
886                               uint64_t action_flags,
887                               struct rte_eth_dev *dev,
888                               const struct rte_flow_attr *attr,
889                               struct rte_flow_error *error)
890 {
891         struct priv *priv = dev->data->dev_private;
892         const struct rte_flow_action_rss *rss = action->conf;
893         unsigned int i;
894
895         if (action_flags & MLX5_FLOW_FATE_ACTIONS)
896                 return rte_flow_error_set(error, EINVAL,
897                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
898                                           "can't have 2 fate actions"
899                                           " in same flow");
900         if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT &&
901             rss->func != RTE_ETH_HASH_FUNCTION_TOEPLITZ)
902                 return rte_flow_error_set(error, ENOTSUP,
903                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
904                                           &rss->func,
905                                           "RSS hash function not supported");
906 #ifdef HAVE_IBV_DEVICE_TUNNEL_SUPPORT
907         if (rss->level > 2)
908 #else
909         if (rss->level > 1)
910 #endif
911                 return rte_flow_error_set(error, ENOTSUP,
912                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
913                                           &rss->level,
914                                           "tunnel RSS is not supported");
915         /* allow RSS key_len 0 in case of NULL (default) RSS key. */
916         if (rss->key_len == 0 && rss->key != NULL)
917                 return rte_flow_error_set(error, ENOTSUP,
918                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
919                                           &rss->key_len,
920                                           "RSS hash key length 0");
921         if (rss->key_len > 0 && rss->key_len < MLX5_RSS_HASH_KEY_LEN)
922                 return rte_flow_error_set(error, ENOTSUP,
923                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
924                                           &rss->key_len,
925                                           "RSS hash key too small");
926         if (rss->key_len > MLX5_RSS_HASH_KEY_LEN)
927                 return rte_flow_error_set(error, ENOTSUP,
928                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
929                                           &rss->key_len,
930                                           "RSS hash key too large");
931         if (rss->queue_num > priv->config.ind_table_max_size)
932                 return rte_flow_error_set(error, ENOTSUP,
933                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
934                                           &rss->queue_num,
935                                           "number of queues too large");
936         if (rss->types & MLX5_RSS_HF_MASK)
937                 return rte_flow_error_set(error, ENOTSUP,
938                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
939                                           &rss->types,
940                                           "some RSS protocols are not"
941                                           " supported");
942         for (i = 0; i != rss->queue_num; ++i) {
943                 if (!(*priv->rxqs)[rss->queue[i]])
944                         return rte_flow_error_set
945                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_ACTION_CONF,
946                                  &rss->queue[i], "queue is not configured");
947         }
948         if (attr->egress)
949                 return rte_flow_error_set(error, ENOTSUP,
950                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
951                                           "rss action not supported for "
952                                           "egress");
953         return 0;
954 }
955
956 /*
957  * Validate the count action.
958  *
959  * @param[in] dev
960  *   Pointer to the Ethernet device structure.
961  * @param[in] attr
962  *   Attributes of flow that includes this action.
963  * @param[out] error
964  *   Pointer to error structure.
965  *
966  * @return
967  *   0 on success, a negative errno value otherwise and rte_ernno is set.
968  */
969 int
970 mlx5_flow_validate_action_count(struct rte_eth_dev *dev __rte_unused,
971                                 const struct rte_flow_attr *attr,
972                                 struct rte_flow_error *error)
973 {
974         if (attr->egress)
975                 return rte_flow_error_set(error, ENOTSUP,
976                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
977                                           "count action not supported for "
978                                           "egress");
979         return 0;
980 }
981
982 /**
983  * Verify the @p attributes will be correctly understood by the NIC and store
984  * them in the @p flow if everything is correct.
985  *
986  * @param[in] dev
987  *   Pointer to the Ethernet device structure.
988  * @param[in] attributes
989  *   Pointer to flow attributes
990  * @param[out] error
991  *   Pointer to error structure.
992  *
993  * @return
994  *   0 on success, a negative errno value otherwise and rte_errno is set.
995  */
996 int
997 mlx5_flow_validate_attributes(struct rte_eth_dev *dev,
998                               const struct rte_flow_attr *attributes,
999                               struct rte_flow_error *error)
1000 {
1001         struct priv *priv = dev->data->dev_private;
1002         uint32_t priority_max = priv->config.flow_prio - 1;
1003
1004         if (attributes->group)
1005                 return rte_flow_error_set(error, ENOTSUP,
1006                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1007                                           NULL, "groups is not supported");
1008         if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
1009             attributes->priority >= priority_max)
1010                 return rte_flow_error_set(error, ENOTSUP,
1011                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1012                                           NULL, "priority out of range");
1013         if (attributes->egress)
1014                 return rte_flow_error_set(error, ENOTSUP,
1015                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, NULL,
1016                                           "egress is not supported");
1017         if (attributes->transfer)
1018                 return rte_flow_error_set(error, ENOTSUP,
1019                                           RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1020                                           NULL, "transfer is not supported");
1021         if (!attributes->ingress)
1022                 return rte_flow_error_set(error, EINVAL,
1023                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1024                                           NULL,
1025                                           "ingress attribute is mandatory");
1026         return 0;
1027 }
1028
1029 /**
1030  * Validate Ethernet item.
1031  *
1032  * @param[in] item
1033  *   Item specification.
1034  * @param[in] item_flags
1035  *   Bit-fields that holds the items detected until now.
1036  * @param[out] error
1037  *   Pointer to error structure.
1038  *
1039  * @return
1040  *   0 on success, a negative errno value otherwise and rte_errno is set.
1041  */
1042 int
1043 mlx5_flow_validate_item_eth(const struct rte_flow_item *item,
1044                             uint64_t item_flags,
1045                             struct rte_flow_error *error)
1046 {
1047         const struct rte_flow_item_eth *mask = item->mask;
1048         const struct rte_flow_item_eth nic_mask = {
1049                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
1050                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
1051                 .type = RTE_BE16(0xffff),
1052         };
1053         int ret;
1054         int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1055         const uint64_t ethm = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1056                                        MLX5_FLOW_LAYER_OUTER_L2;
1057
1058         if (item_flags & ethm)
1059                 return rte_flow_error_set(error, ENOTSUP,
1060                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1061                                           "multiple L2 layers not supported");
1062         if (!mask)
1063                 mask = &rte_flow_item_eth_mask;
1064         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1065                                         (const uint8_t *)&nic_mask,
1066                                         sizeof(struct rte_flow_item_eth),
1067                                         error);
1068         return ret;
1069 }
1070
1071 /**
1072  * Validate VLAN item.
1073  *
1074  * @param[in] item
1075  *   Item specification.
1076  * @param[in] item_flags
1077  *   Bit-fields that holds the items detected until now.
1078  * @param[out] error
1079  *   Pointer to error structure.
1080  *
1081  * @return
1082  *   0 on success, a negative errno value otherwise and rte_errno is set.
1083  */
1084 int
1085 mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
1086                              uint64_t item_flags,
1087                              struct rte_flow_error *error)
1088 {
1089         const struct rte_flow_item_vlan *spec = item->spec;
1090         const struct rte_flow_item_vlan *mask = item->mask;
1091         const struct rte_flow_item_vlan nic_mask = {
1092                 .tci = RTE_BE16(0x0fff),
1093                 .inner_type = RTE_BE16(0xffff),
1094         };
1095         uint16_t vlan_tag = 0;
1096         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1097         int ret;
1098         const uint64_t l34m = tunnel ? (MLX5_FLOW_LAYER_INNER_L3 |
1099                                         MLX5_FLOW_LAYER_INNER_L4) :
1100                                        (MLX5_FLOW_LAYER_OUTER_L3 |
1101                                         MLX5_FLOW_LAYER_OUTER_L4);
1102         const uint64_t vlanm = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
1103                                         MLX5_FLOW_LAYER_OUTER_VLAN;
1104
1105         if (item_flags & vlanm)
1106                 return rte_flow_error_set(error, EINVAL,
1107                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1108                                           "multiple VLAN layers not supported");
1109         else if ((item_flags & l34m) != 0)
1110                 return rte_flow_error_set(error, EINVAL,
1111                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1112                                           "L2 layer cannot follow L3/L4 layer");
1113         if (!mask)
1114                 mask = &rte_flow_item_vlan_mask;
1115         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1116                                         (const uint8_t *)&nic_mask,
1117                                         sizeof(struct rte_flow_item_vlan),
1118                                         error);
1119         if (ret)
1120                 return ret;
1121         if (spec) {
1122                 vlan_tag = spec->tci;
1123                 vlan_tag &= mask->tci;
1124         }
1125         /*
1126          * From verbs perspective an empty VLAN is equivalent
1127          * to a packet without VLAN layer.
1128          */
1129         if (!vlan_tag)
1130                 return rte_flow_error_set(error, EINVAL,
1131                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
1132                                           item->spec,
1133                                           "VLAN cannot be empty");
1134         return 0;
1135 }
1136
1137 /**
1138  * Validate IPV4 item.
1139  *
1140  * @param[in] item
1141  *   Item specification.
1142  * @param[in] item_flags
1143  *   Bit-fields that holds the items detected until now.
1144  * @param[in] acc_mask
1145  *   Acceptable mask, if NULL default internal default mask
1146  *   will be used to check whether item fields are supported.
1147  * @param[out] error
1148  *   Pointer to error structure.
1149  *
1150  * @return
1151  *   0 on success, a negative errno value otherwise and rte_errno is set.
1152  */
1153 int
1154 mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
1155                              uint64_t item_flags,
1156                              const struct rte_flow_item_ipv4 *acc_mask,
1157                              struct rte_flow_error *error)
1158 {
1159         const struct rte_flow_item_ipv4 *mask = item->mask;
1160         const struct rte_flow_item_ipv4 nic_mask = {
1161                 .hdr = {
1162                         .src_addr = RTE_BE32(0xffffffff),
1163                         .dst_addr = RTE_BE32(0xffffffff),
1164                         .type_of_service = 0xff,
1165                         .next_proto_id = 0xff,
1166                 },
1167         };
1168         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1169         const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
1170                                       MLX5_FLOW_LAYER_OUTER_L3;
1171         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1172                                       MLX5_FLOW_LAYER_OUTER_L4;
1173         int ret;
1174
1175         if (item_flags & l3m)
1176                 return rte_flow_error_set(error, ENOTSUP,
1177                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1178                                           "multiple L3 layers not supported");
1179         else if (item_flags & l4m)
1180                 return rte_flow_error_set(error, EINVAL,
1181                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1182                                           "L3 cannot follow an L4 layer.");
1183         if (!mask)
1184                 mask = &rte_flow_item_ipv4_mask;
1185         else if (mask->hdr.next_proto_id != 0 &&
1186                  mask->hdr.next_proto_id != 0xff)
1187                 return rte_flow_error_set(error, EINVAL,
1188                                           RTE_FLOW_ERROR_TYPE_ITEM_MASK, mask,
1189                                           "partial mask is not supported"
1190                                           " for protocol");
1191         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1192                                         acc_mask ? (const uint8_t *)acc_mask
1193                                                  : (const uint8_t *)&nic_mask,
1194                                         sizeof(struct rte_flow_item_ipv4),
1195                                         error);
1196         if (ret < 0)
1197                 return ret;
1198         return 0;
1199 }
1200
1201 /**
1202  * Validate IPV6 item.
1203  *
1204  * @param[in] item
1205  *   Item specification.
1206  * @param[in] item_flags
1207  *   Bit-fields that holds the items detected until now.
1208  * @param[in] acc_mask
1209  *   Acceptable mask, if NULL default internal default mask
1210  *   will be used to check whether item fields are supported.
1211  * @param[out] error
1212  *   Pointer to error structure.
1213  *
1214  * @return
1215  *   0 on success, a negative errno value otherwise and rte_errno is set.
1216  */
1217 int
1218 mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
1219                              uint64_t item_flags,
1220                              const struct rte_flow_item_ipv6 *acc_mask,
1221                              struct rte_flow_error *error)
1222 {
1223         const struct rte_flow_item_ipv6 *mask = item->mask;
1224         const struct rte_flow_item_ipv6 nic_mask = {
1225                 .hdr = {
1226                         .src_addr =
1227                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
1228                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
1229                         .dst_addr =
1230                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
1231                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
1232                         .vtc_flow = RTE_BE32(0xffffffff),
1233                         .proto = 0xff,
1234                         .hop_limits = 0xff,
1235                 },
1236         };
1237         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1238         const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
1239                                       MLX5_FLOW_LAYER_OUTER_L3;
1240         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1241                                       MLX5_FLOW_LAYER_OUTER_L4;
1242         int ret;
1243
1244         if (item_flags & l3m)
1245                 return rte_flow_error_set(error, ENOTSUP,
1246                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1247                                           "multiple L3 layers not supported");
1248         else if (item_flags & l4m)
1249                 return rte_flow_error_set(error, EINVAL,
1250                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1251                                           "L3 cannot follow an L4 layer.");
1252         if (!mask)
1253                 mask = &rte_flow_item_ipv6_mask;
1254         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
1255                                         acc_mask ? (const uint8_t *)acc_mask
1256                                                  : (const uint8_t *)&nic_mask,
1257                                         sizeof(struct rte_flow_item_ipv6),
1258                                         error);
1259         if (ret < 0)
1260                 return ret;
1261         return 0;
1262 }
1263
1264 /**
1265  * Validate UDP item.
1266  *
1267  * @param[in] item
1268  *   Item specification.
1269  * @param[in] item_flags
1270  *   Bit-fields that holds the items detected until now.
1271  * @param[in] target_protocol
1272  *   The next protocol in the previous item.
1273  * @param[in] flow_mask
1274  *   mlx5 flow-specific (TCF, DV, verbs, etc.) supported header fields mask.
1275  * @param[out] error
1276  *   Pointer to error structure.
1277  *
1278  * @return
1279  *   0 on success, a negative errno value otherwise and rte_errno is set.
1280  */
1281 int
1282 mlx5_flow_validate_item_udp(const struct rte_flow_item *item,
1283                             uint64_t item_flags,
1284                             uint8_t target_protocol,
1285                             struct rte_flow_error *error)
1286 {
1287         const struct rte_flow_item_udp *mask = item->mask;
1288         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1289         const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
1290                                       MLX5_FLOW_LAYER_OUTER_L3;
1291         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1292                                       MLX5_FLOW_LAYER_OUTER_L4;
1293         int ret;
1294
1295         if (target_protocol != 0xff && target_protocol != IPPROTO_UDP)
1296                 return rte_flow_error_set(error, EINVAL,
1297                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1298                                           "protocol filtering not compatible"
1299                                           " with UDP layer");
1300         if (!(item_flags & l3m))
1301                 return rte_flow_error_set(error, EINVAL,
1302                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1303                                           "L3 is mandatory to filter on L4");
1304         if (item_flags & l4m)
1305                 return rte_flow_error_set(error, EINVAL,
1306                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1307                                           "multiple L4 layers not supported");
1308         if (!mask)
1309                 mask = &rte_flow_item_udp_mask;
1310         ret = mlx5_flow_item_acceptable
1311                 (item, (const uint8_t *)mask,
1312                  (const uint8_t *)&rte_flow_item_udp_mask,
1313                  sizeof(struct rte_flow_item_udp), error);
1314         if (ret < 0)
1315                 return ret;
1316         return 0;
1317 }
1318
1319 /**
1320  * Validate TCP item.
1321  *
1322  * @param[in] item
1323  *   Item specification.
1324  * @param[in] item_flags
1325  *   Bit-fields that holds the items detected until now.
1326  * @param[in] target_protocol
1327  *   The next protocol in the previous item.
1328  * @param[out] error
1329  *   Pointer to error structure.
1330  *
1331  * @return
1332  *   0 on success, a negative errno value otherwise and rte_errno is set.
1333  */
1334 int
1335 mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,
1336                             uint64_t item_flags,
1337                             uint8_t target_protocol,
1338                             const struct rte_flow_item_tcp *flow_mask,
1339                             struct rte_flow_error *error)
1340 {
1341         const struct rte_flow_item_tcp *mask = item->mask;
1342         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1343         const uint64_t l3m = tunnel ? MLX5_FLOW_LAYER_INNER_L3 :
1344                                       MLX5_FLOW_LAYER_OUTER_L3;
1345         const uint64_t l4m = tunnel ? MLX5_FLOW_LAYER_INNER_L4 :
1346                                       MLX5_FLOW_LAYER_OUTER_L4;
1347         int ret;
1348
1349         assert(flow_mask);
1350         if (target_protocol != 0xff && target_protocol != IPPROTO_TCP)
1351                 return rte_flow_error_set(error, EINVAL,
1352                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1353                                           "protocol filtering not compatible"
1354                                           " with TCP layer");
1355         if (!(item_flags & l3m))
1356                 return rte_flow_error_set(error, EINVAL,
1357                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1358                                           "L3 is mandatory to filter on L4");
1359         if (item_flags & l4m)
1360                 return rte_flow_error_set(error, EINVAL,
1361                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1362                                           "multiple L4 layers not supported");
1363         if (!mask)
1364                 mask = &rte_flow_item_tcp_mask;
1365         ret = mlx5_flow_item_acceptable
1366                 (item, (const uint8_t *)mask,
1367                  (const uint8_t *)flow_mask,
1368                  sizeof(struct rte_flow_item_tcp), error);
1369         if (ret < 0)
1370                 return ret;
1371         return 0;
1372 }
1373
1374 /**
1375  * Validate VXLAN item.
1376  *
1377  * @param[in] item
1378  *   Item specification.
1379  * @param[in] item_flags
1380  *   Bit-fields that holds the items detected until now.
1381  * @param[in] target_protocol
1382  *   The next protocol in the previous item.
1383  * @param[out] error
1384  *   Pointer to error structure.
1385  *
1386  * @return
1387  *   0 on success, a negative errno value otherwise and rte_errno is set.
1388  */
1389 int
1390 mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item,
1391                               uint64_t item_flags,
1392                               struct rte_flow_error *error)
1393 {
1394         const struct rte_flow_item_vxlan *spec = item->spec;
1395         const struct rte_flow_item_vxlan *mask = item->mask;
1396         int ret;
1397         union vni {
1398                 uint32_t vlan_id;
1399                 uint8_t vni[4];
1400         } id = { .vlan_id = 0, };
1401         uint32_t vlan_id = 0;
1402
1403
1404         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
1405                 return rte_flow_error_set(error, ENOTSUP,
1406                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1407                                           "multiple tunnel layers not"
1408                                           " supported");
1409         /*
1410          * Verify only UDPv4 is present as defined in
1411          * https://tools.ietf.org/html/rfc7348
1412          */
1413         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
1414                 return rte_flow_error_set(error, EINVAL,
1415                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1416                                           "no outer UDP layer found");
1417         if (!mask)
1418                 mask = &rte_flow_item_vxlan_mask;
1419         ret = mlx5_flow_item_acceptable
1420                 (item, (const uint8_t *)mask,
1421                  (const uint8_t *)&rte_flow_item_vxlan_mask,
1422                  sizeof(struct rte_flow_item_vxlan),
1423                  error);
1424         if (ret < 0)
1425                 return ret;
1426         if (spec) {
1427                 memcpy(&id.vni[1], spec->vni, 3);
1428                 vlan_id = id.vlan_id;
1429                 memcpy(&id.vni[1], mask->vni, 3);
1430                 vlan_id &= id.vlan_id;
1431         }
1432         /*
1433          * Tunnel id 0 is equivalent as not adding a VXLAN layer, if
1434          * only this layer is defined in the Verbs specification it is
1435          * interpreted as wildcard and all packets will match this
1436          * rule, if it follows a full stack layer (ex: eth / ipv4 /
1437          * udp), all packets matching the layers before will also
1438          * match this rule.  To avoid such situation, VNI 0 is
1439          * currently refused.
1440          */
1441         if (!vlan_id)
1442                 return rte_flow_error_set(error, ENOTSUP,
1443                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1444                                           "VXLAN vni cannot be 0");
1445         if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
1446                 return rte_flow_error_set(error, ENOTSUP,
1447                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1448                                           "VXLAN tunnel must be fully defined");
1449         return 0;
1450 }
1451
1452 /**
1453  * Validate VXLAN_GPE item.
1454  *
1455  * @param[in] item
1456  *   Item specification.
1457  * @param[in] item_flags
1458  *   Bit-fields that holds the items detected until now.
1459  * @param[in] priv
1460  *   Pointer to the private data structure.
1461  * @param[in] target_protocol
1462  *   The next protocol in the previous item.
1463  * @param[out] error
1464  *   Pointer to error structure.
1465  *
1466  * @return
1467  *   0 on success, a negative errno value otherwise and rte_errno is set.
1468  */
1469 int
1470 mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,
1471                                   uint64_t item_flags,
1472                                   struct rte_eth_dev *dev,
1473                                   struct rte_flow_error *error)
1474 {
1475         struct priv *priv = dev->data->dev_private;
1476         const struct rte_flow_item_vxlan_gpe *spec = item->spec;
1477         const struct rte_flow_item_vxlan_gpe *mask = item->mask;
1478         int ret;
1479         union vni {
1480                 uint32_t vlan_id;
1481                 uint8_t vni[4];
1482         } id = { .vlan_id = 0, };
1483         uint32_t vlan_id = 0;
1484
1485         if (!priv->config.l3_vxlan_en)
1486                 return rte_flow_error_set(error, ENOTSUP,
1487                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1488                                           "L3 VXLAN is not enabled by device"
1489                                           " parameter and/or not configured in"
1490                                           " firmware");
1491         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
1492                 return rte_flow_error_set(error, ENOTSUP,
1493                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1494                                           "multiple tunnel layers not"
1495                                           " supported");
1496         /*
1497          * Verify only UDPv4 is present as defined in
1498          * https://tools.ietf.org/html/rfc7348
1499          */
1500         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_UDP))
1501                 return rte_flow_error_set(error, EINVAL,
1502                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1503                                           "no outer UDP layer found");
1504         if (!mask)
1505                 mask = &rte_flow_item_vxlan_gpe_mask;
1506         ret = mlx5_flow_item_acceptable
1507                 (item, (const uint8_t *)mask,
1508                  (const uint8_t *)&rte_flow_item_vxlan_gpe_mask,
1509                  sizeof(struct rte_flow_item_vxlan_gpe),
1510                  error);
1511         if (ret < 0)
1512                 return ret;
1513         if (spec) {
1514                 if (spec->protocol)
1515                         return rte_flow_error_set(error, ENOTSUP,
1516                                                   RTE_FLOW_ERROR_TYPE_ITEM,
1517                                                   item,
1518                                                   "VxLAN-GPE protocol"
1519                                                   " not supported");
1520                 memcpy(&id.vni[1], spec->vni, 3);
1521                 vlan_id = id.vlan_id;
1522                 memcpy(&id.vni[1], mask->vni, 3);
1523                 vlan_id &= id.vlan_id;
1524         }
1525         /*
1526          * Tunnel id 0 is equivalent as not adding a VXLAN layer, if only this
1527          * layer is defined in the Verbs specification it is interpreted as
1528          * wildcard and all packets will match this rule, if it follows a full
1529          * stack layer (ex: eth / ipv4 / udp), all packets matching the layers
1530          * before will also match this rule.  To avoid such situation, VNI 0
1531          * is currently refused.
1532          */
1533         if (!vlan_id)
1534                 return rte_flow_error_set(error, ENOTSUP,
1535                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1536                                           "VXLAN-GPE vni cannot be 0");
1537         if (!(item_flags & MLX5_FLOW_LAYER_OUTER))
1538                 return rte_flow_error_set(error, ENOTSUP,
1539                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1540                                           "VXLAN-GPE tunnel must be fully"
1541                                           " defined");
1542         return 0;
1543 }
1544
1545 /**
1546  * Validate GRE item.
1547  *
1548  * @param[in] item
1549  *   Item specification.
1550  * @param[in] item_flags
1551  *   Bit flags to mark detected items.
1552  * @param[in] target_protocol
1553  *   The next protocol in the previous item.
1554  * @param[out] error
1555  *   Pointer to error structure.
1556  *
1557  * @return
1558  *   0 on success, a negative errno value otherwise and rte_errno is set.
1559  */
1560 int
1561 mlx5_flow_validate_item_gre(const struct rte_flow_item *item,
1562                             uint64_t item_flags,
1563                             uint8_t target_protocol,
1564                             struct rte_flow_error *error)
1565 {
1566         const struct rte_flow_item_gre *spec __rte_unused = item->spec;
1567         const struct rte_flow_item_gre *mask = item->mask;
1568         int ret;
1569
1570         if (target_protocol != 0xff && target_protocol != IPPROTO_GRE)
1571                 return rte_flow_error_set(error, EINVAL,
1572                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1573                                           "protocol filtering not compatible"
1574                                           " with this GRE layer");
1575         if (item_flags & MLX5_FLOW_LAYER_TUNNEL)
1576                 return rte_flow_error_set(error, ENOTSUP,
1577                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1578                                           "multiple tunnel layers not"
1579                                           " supported");
1580         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L3))
1581                 return rte_flow_error_set(error, ENOTSUP,
1582                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1583                                           "L3 Layer is missing");
1584         if (!mask)
1585                 mask = &rte_flow_item_gre_mask;
1586         ret = mlx5_flow_item_acceptable
1587                 (item, (const uint8_t *)mask,
1588                  (const uint8_t *)&rte_flow_item_gre_mask,
1589                  sizeof(struct rte_flow_item_gre), error);
1590         if (ret < 0)
1591                 return ret;
1592 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
1593         if (spec && (spec->protocol & mask->protocol))
1594                 return rte_flow_error_set(error, ENOTSUP,
1595                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1596                                           "without MPLS support the"
1597                                           " specification cannot be used for"
1598                                           " filtering");
1599 #endif
1600         return 0;
1601 }
1602
1603 /**
1604  * Validate MPLS item.
1605  *
1606  * @param[in] dev
1607  *   Pointer to the rte_eth_dev structure.
1608  * @param[in] item
1609  *   Item specification.
1610  * @param[in] item_flags
1611  *   Bit-fields that holds the items detected until now.
1612  * @param[in] prev_layer
1613  *   The protocol layer indicated in previous item.
1614  * @param[out] error
1615  *   Pointer to error structure.
1616  *
1617  * @return
1618  *   0 on success, a negative errno value otherwise and rte_errno is set.
1619  */
1620 int
1621 mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev __rte_unused,
1622                              const struct rte_flow_item *item __rte_unused,
1623                              uint64_t item_flags __rte_unused,
1624                              uint64_t prev_layer __rte_unused,
1625                              struct rte_flow_error *error)
1626 {
1627 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
1628         const struct rte_flow_item_mpls *mask = item->mask;
1629         struct priv *priv = dev->data->dev_private;
1630         int ret;
1631
1632         if (!priv->config.mpls_en)
1633                 return rte_flow_error_set(error, ENOTSUP,
1634                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1635                                           "MPLS not supported or"
1636                                           " disabled in firmware"
1637                                           " configuration.");
1638         /* MPLS over IP, UDP, GRE is allowed */
1639         if (!(prev_layer & (MLX5_FLOW_LAYER_OUTER_L3 |
1640                             MLX5_FLOW_LAYER_OUTER_L4_UDP |
1641                             MLX5_FLOW_LAYER_GRE)))
1642                 return rte_flow_error_set(error, EINVAL,
1643                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1644                                           "protocol filtering not compatible"
1645                                           " with MPLS layer");
1646         /* Multi-tunnel isn't allowed but MPLS over GRE is an exception. */
1647         if ((item_flags & MLX5_FLOW_LAYER_TUNNEL) &&
1648             !(item_flags & MLX5_FLOW_LAYER_GRE))
1649                 return rte_flow_error_set(error, ENOTSUP,
1650                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
1651                                           "multiple tunnel layers not"
1652                                           " supported");
1653         if (!mask)
1654                 mask = &rte_flow_item_mpls_mask;
1655         ret = mlx5_flow_item_acceptable
1656                 (item, (const uint8_t *)mask,
1657                  (const uint8_t *)&rte_flow_item_mpls_mask,
1658                  sizeof(struct rte_flow_item_mpls), error);
1659         if (ret < 0)
1660                 return ret;
1661         return 0;
1662 #endif
1663         return rte_flow_error_set(error, ENOTSUP,
1664                                   RTE_FLOW_ERROR_TYPE_ITEM, item,
1665                                   "MPLS is not supported by Verbs, please"
1666                                   " update.");
1667 }
1668
1669 static int
1670 flow_null_validate(struct rte_eth_dev *dev __rte_unused,
1671                    const struct rte_flow_attr *attr __rte_unused,
1672                    const struct rte_flow_item items[] __rte_unused,
1673                    const struct rte_flow_action actions[] __rte_unused,
1674                    struct rte_flow_error *error __rte_unused)
1675 {
1676         rte_errno = ENOTSUP;
1677         return -rte_errno;
1678 }
1679
1680 static struct mlx5_flow *
1681 flow_null_prepare(const struct rte_flow_attr *attr __rte_unused,
1682                   const struct rte_flow_item items[] __rte_unused,
1683                   const struct rte_flow_action actions[] __rte_unused,
1684                   struct rte_flow_error *error __rte_unused)
1685 {
1686         rte_errno = ENOTSUP;
1687         return NULL;
1688 }
1689
1690 static int
1691 flow_null_translate(struct rte_eth_dev *dev __rte_unused,
1692                     struct mlx5_flow *dev_flow __rte_unused,
1693                     const struct rte_flow_attr *attr __rte_unused,
1694                     const struct rte_flow_item items[] __rte_unused,
1695                     const struct rte_flow_action actions[] __rte_unused,
1696                     struct rte_flow_error *error __rte_unused)
1697 {
1698         rte_errno = ENOTSUP;
1699         return -rte_errno;
1700 }
1701
1702 static int
1703 flow_null_apply(struct rte_eth_dev *dev __rte_unused,
1704                 struct rte_flow *flow __rte_unused,
1705                 struct rte_flow_error *error __rte_unused)
1706 {
1707         rte_errno = ENOTSUP;
1708         return -rte_errno;
1709 }
1710
1711 static void
1712 flow_null_remove(struct rte_eth_dev *dev __rte_unused,
1713                  struct rte_flow *flow __rte_unused)
1714 {
1715 }
1716
1717 static void
1718 flow_null_destroy(struct rte_eth_dev *dev __rte_unused,
1719                   struct rte_flow *flow __rte_unused)
1720 {
1721 }
1722
1723 static int
1724 flow_null_query(struct rte_eth_dev *dev __rte_unused,
1725                 struct rte_flow *flow __rte_unused,
1726                 const struct rte_flow_action *actions __rte_unused,
1727                 void *data __rte_unused,
1728                 struct rte_flow_error *error __rte_unused)
1729 {
1730         rte_errno = ENOTSUP;
1731         return -rte_errno;
1732 }
1733
1734 /* Void driver to protect from null pointer reference. */
1735 const struct mlx5_flow_driver_ops mlx5_flow_null_drv_ops = {
1736         .validate = flow_null_validate,
1737         .prepare = flow_null_prepare,
1738         .translate = flow_null_translate,
1739         .apply = flow_null_apply,
1740         .remove = flow_null_remove,
1741         .destroy = flow_null_destroy,
1742         .query = flow_null_query,
1743 };
1744
1745 /**
1746  * Select flow driver type according to flow attributes and device
1747  * configuration.
1748  *
1749  * @param[in] dev
1750  *   Pointer to the dev structure.
1751  * @param[in] attr
1752  *   Pointer to the flow attributes.
1753  *
1754  * @return
1755  *   flow driver type, MLX5_FLOW_TYPE_MAX otherwise.
1756  */
1757 static enum mlx5_flow_drv_type
1758 flow_get_drv_type(struct rte_eth_dev *dev, const struct rte_flow_attr *attr)
1759 {
1760         struct priv *priv = dev->data->dev_private;
1761         enum mlx5_flow_drv_type type = MLX5_FLOW_TYPE_MAX;
1762
1763         if (attr->transfer)
1764                 type = MLX5_FLOW_TYPE_TCF;
1765         else
1766                 type = priv->config.dv_flow_en ? MLX5_FLOW_TYPE_DV :
1767                                                  MLX5_FLOW_TYPE_VERBS;
1768         return type;
1769 }
1770
1771 #define flow_get_drv_ops(type) flow_drv_ops[type]
1772
1773 /**
1774  * Flow driver validation API. This abstracts calling driver specific functions.
1775  * The type of flow driver is determined according to flow attributes.
1776  *
1777  * @param[in] dev
1778  *   Pointer to the dev structure.
1779  * @param[in] attr
1780  *   Pointer to the flow attributes.
1781  * @param[in] items
1782  *   Pointer to the list of items.
1783  * @param[in] actions
1784  *   Pointer to the list of actions.
1785  * @param[out] error
1786  *   Pointer to the error structure.
1787  *
1788  * @return
1789  *   0 on success, a negative errno value otherwise and rte_ernno is set.
1790  */
1791 static inline int
1792 flow_drv_validate(struct rte_eth_dev *dev,
1793                   const struct rte_flow_attr *attr,
1794                   const struct rte_flow_item items[],
1795                   const struct rte_flow_action actions[],
1796                   struct rte_flow_error *error)
1797 {
1798         const struct mlx5_flow_driver_ops *fops;
1799         enum mlx5_flow_drv_type type = flow_get_drv_type(dev, attr);
1800
1801         fops = flow_get_drv_ops(type);
1802         return fops->validate(dev, attr, items, actions, error);
1803 }
1804
1805 /**
1806  * Flow driver preparation API. This abstracts calling driver specific
1807  * functions. Parent flow (rte_flow) should have driver type (drv_type). It
1808  * calculates the size of memory required for device flow, allocates the memory,
1809  * initializes the device flow and returns the pointer.
1810  *
1811  * @note
1812  *   This function initializes device flow structure such as dv, tcf or verbs in
1813  *   struct mlx5_flow. However, it is caller's responsibility to initialize the
1814  *   rest. For example, adding returning device flow to flow->dev_flow list and
1815  *   setting backward reference to the flow should be done out of this function.
1816  *   layers field is not filled either.
1817  *
1818  * @param[in] attr
1819  *   Pointer to the flow attributes.
1820  * @param[in] items
1821  *   Pointer to the list of items.
1822  * @param[in] actions
1823  *   Pointer to the list of actions.
1824  * @param[out] error
1825  *   Pointer to the error structure.
1826  *
1827  * @return
1828  *   Pointer to device flow on success, otherwise NULL and rte_ernno is set.
1829  */
1830 static inline struct mlx5_flow *
1831 flow_drv_prepare(const struct rte_flow *flow,
1832                  const struct rte_flow_attr *attr,
1833                  const struct rte_flow_item items[],
1834                  const struct rte_flow_action actions[],
1835                  struct rte_flow_error *error)
1836 {
1837         const struct mlx5_flow_driver_ops *fops;
1838         enum mlx5_flow_drv_type type = flow->drv_type;
1839
1840         assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
1841         fops = flow_get_drv_ops(type);
1842         return fops->prepare(attr, items, actions, error);
1843 }
1844
1845 /**
1846  * Flow driver translation API. This abstracts calling driver specific
1847  * functions. Parent flow (rte_flow) should have driver type (drv_type). It
1848  * translates a generic flow into a driver flow. flow_drv_prepare() must
1849  * precede.
1850  *
1851  * @note
1852  *   dev_flow->layers could be filled as a result of parsing during translation
1853  *   if needed by flow_drv_apply(). dev_flow->flow->actions can also be filled
1854  *   if necessary. As a flow can have multiple dev_flows by RSS flow expansion,
1855  *   flow->actions could be overwritten even though all the expanded dev_flows
1856  *   have the same actions.
1857  *
1858  * @param[in] dev
1859  *   Pointer to the rte dev structure.
1860  * @param[in, out] dev_flow
1861  *   Pointer to the mlx5 flow.
1862  * @param[in] attr
1863  *   Pointer to the flow attributes.
1864  * @param[in] items
1865  *   Pointer to the list of items.
1866  * @param[in] actions
1867  *   Pointer to the list of actions.
1868  * @param[out] error
1869  *   Pointer to the error structure.
1870  *
1871  * @return
1872  *   0 on success, a negative errno value otherwise and rte_ernno is set.
1873  */
1874 static inline int
1875 flow_drv_translate(struct rte_eth_dev *dev, struct mlx5_flow *dev_flow,
1876                    const struct rte_flow_attr *attr,
1877                    const struct rte_flow_item items[],
1878                    const struct rte_flow_action actions[],
1879                    struct rte_flow_error *error)
1880 {
1881         const struct mlx5_flow_driver_ops *fops;
1882         enum mlx5_flow_drv_type type = dev_flow->flow->drv_type;
1883
1884         assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
1885         fops = flow_get_drv_ops(type);
1886         return fops->translate(dev, dev_flow, attr, items, actions, error);
1887 }
1888
1889 /**
1890  * Flow driver apply API. This abstracts calling driver specific functions.
1891  * Parent flow (rte_flow) should have driver type (drv_type). It applies
1892  * translated driver flows on to device. flow_drv_translate() must precede.
1893  *
1894  * @param[in] dev
1895  *   Pointer to Ethernet device structure.
1896  * @param[in, out] flow
1897  *   Pointer to flow structure.
1898  * @param[out] error
1899  *   Pointer to error structure.
1900  *
1901  * @return
1902  *   0 on success, a negative errno value otherwise and rte_errno is set.
1903  */
1904 static inline int
1905 flow_drv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
1906                struct rte_flow_error *error)
1907 {
1908         const struct mlx5_flow_driver_ops *fops;
1909         enum mlx5_flow_drv_type type = flow->drv_type;
1910
1911         assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
1912         fops = flow_get_drv_ops(type);
1913         return fops->apply(dev, flow, error);
1914 }
1915
1916 /**
1917  * Flow driver remove API. This abstracts calling driver specific functions.
1918  * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow
1919  * on device. All the resources of the flow should be freed by calling
1920  * flow_drv_destroy().
1921  *
1922  * @param[in] dev
1923  *   Pointer to Ethernet device.
1924  * @param[in, out] flow
1925  *   Pointer to flow structure.
1926  */
1927 static inline void
1928 flow_drv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
1929 {
1930         const struct mlx5_flow_driver_ops *fops;
1931         enum mlx5_flow_drv_type type = flow->drv_type;
1932
1933         assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
1934         fops = flow_get_drv_ops(type);
1935         fops->remove(dev, flow);
1936 }
1937
1938 /**
1939  * Flow driver destroy API. This abstracts calling driver specific functions.
1940  * Parent flow (rte_flow) should have driver type (drv_type). It removes a flow
1941  * on device and releases resources of the flow.
1942  *
1943  * @param[in] dev
1944  *   Pointer to Ethernet device.
1945  * @param[in, out] flow
1946  *   Pointer to flow structure.
1947  */
1948 static inline void
1949 flow_drv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
1950 {
1951         const struct mlx5_flow_driver_ops *fops;
1952         enum mlx5_flow_drv_type type = flow->drv_type;
1953
1954         assert(type > MLX5_FLOW_TYPE_MIN && type < MLX5_FLOW_TYPE_MAX);
1955         fops = flow_get_drv_ops(type);
1956         fops->destroy(dev, flow);
1957 }
1958
1959 /**
1960  * Validate a flow supported by the NIC.
1961  *
1962  * @see rte_flow_validate()
1963  * @see rte_flow_ops
1964  */
1965 int
1966 mlx5_flow_validate(struct rte_eth_dev *dev,
1967                    const struct rte_flow_attr *attr,
1968                    const struct rte_flow_item items[],
1969                    const struct rte_flow_action actions[],
1970                    struct rte_flow_error *error)
1971 {
1972         int ret;
1973
1974         ret = flow_drv_validate(dev, attr, items, actions, error);
1975         if (ret < 0)
1976                 return ret;
1977         return 0;
1978 }
1979
1980 /**
1981  * Get RSS action from the action list.
1982  *
1983  * @param[in] actions
1984  *   Pointer to the list of actions.
1985  *
1986  * @return
1987  *   Pointer to the RSS action if exist, else return NULL.
1988  */
1989 static const struct rte_flow_action_rss*
1990 flow_get_rss_action(const struct rte_flow_action actions[])
1991 {
1992         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1993                 switch (actions->type) {
1994                 case RTE_FLOW_ACTION_TYPE_RSS:
1995                         return (const struct rte_flow_action_rss *)
1996                                actions->conf;
1997                 default:
1998                         break;
1999                 }
2000         }
2001         return NULL;
2002 }
2003
2004 static unsigned int
2005 find_graph_root(const struct rte_flow_item pattern[], uint32_t rss_level)
2006 {
2007         const struct rte_flow_item *item;
2008         unsigned int has_vlan = 0;
2009
2010         for (item = pattern; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
2011                 if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
2012                         has_vlan = 1;
2013                         break;
2014                 }
2015         }
2016         if (has_vlan)
2017                 return rss_level < 2 ? MLX5_EXPANSION_ROOT_ETH_VLAN :
2018                                        MLX5_EXPANSION_ROOT_OUTER_ETH_VLAN;
2019         return rss_level < 2 ? MLX5_EXPANSION_ROOT :
2020                                MLX5_EXPANSION_ROOT_OUTER;
2021 }
2022
2023 /**
2024  * Create a flow and add it to @p list.
2025  *
2026  * @param dev
2027  *   Pointer to Ethernet device.
2028  * @param list
2029  *   Pointer to a TAILQ flow list.
2030  * @param[in] attr
2031  *   Flow rule attributes.
2032  * @param[in] items
2033  *   Pattern specification (list terminated by the END pattern item).
2034  * @param[in] actions
2035  *   Associated actions (list terminated by the END action).
2036  * @param[out] error
2037  *   Perform verbose error reporting if not NULL.
2038  *
2039  * @return
2040  *   A flow on success, NULL otherwise and rte_errno is set.
2041  */
2042 static struct rte_flow *
2043 flow_list_create(struct rte_eth_dev *dev, struct mlx5_flows *list,
2044                  const struct rte_flow_attr *attr,
2045                  const struct rte_flow_item items[],
2046                  const struct rte_flow_action actions[],
2047                  struct rte_flow_error *error)
2048 {
2049         struct rte_flow *flow = NULL;
2050         struct mlx5_flow *dev_flow;
2051         const struct rte_flow_action_rss *rss;
2052         union {
2053                 struct rte_flow_expand_rss buf;
2054                 uint8_t buffer[2048];
2055         } expand_buffer;
2056         struct rte_flow_expand_rss *buf = &expand_buffer.buf;
2057         int ret;
2058         uint32_t i;
2059         uint32_t flow_size;
2060
2061         ret = flow_drv_validate(dev, attr, items, actions, error);
2062         if (ret < 0)
2063                 return NULL;
2064         flow_size = sizeof(struct rte_flow);
2065         rss = flow_get_rss_action(actions);
2066         if (rss)
2067                 flow_size += RTE_ALIGN_CEIL(rss->queue_num * sizeof(uint16_t),
2068                                             sizeof(void *));
2069         else
2070                 flow_size += RTE_ALIGN_CEIL(sizeof(uint16_t), sizeof(void *));
2071         flow = rte_calloc(__func__, 1, flow_size, 0);
2072         flow->drv_type = flow_get_drv_type(dev, attr);
2073         assert(flow->drv_type > MLX5_FLOW_TYPE_MIN &&
2074                flow->drv_type < MLX5_FLOW_TYPE_MAX);
2075         flow->queue = (void *)(flow + 1);
2076         LIST_INIT(&flow->dev_flows);
2077         if (rss && rss->types) {
2078                 unsigned int graph_root;
2079
2080                 graph_root = find_graph_root(items, rss->level);
2081                 ret = rte_flow_expand_rss(buf, sizeof(expand_buffer.buffer),
2082                                           items, rss->types,
2083                                           mlx5_support_expansion,
2084                                           graph_root);
2085                 assert(ret > 0 &&
2086                        (unsigned int)ret < sizeof(expand_buffer.buffer));
2087         } else {
2088                 buf->entries = 1;
2089                 buf->entry[0].pattern = (void *)(uintptr_t)items;
2090         }
2091         for (i = 0; i < buf->entries; ++i) {
2092                 dev_flow = flow_drv_prepare(flow, attr, buf->entry[i].pattern,
2093                                             actions, error);
2094                 if (!dev_flow)
2095                         goto error;
2096                 dev_flow->flow = flow;
2097                 LIST_INSERT_HEAD(&flow->dev_flows, dev_flow, next);
2098                 ret = flow_drv_translate(dev, dev_flow, attr,
2099                                          buf->entry[i].pattern,
2100                                          actions, error);
2101                 if (ret < 0)
2102                         goto error;
2103         }
2104         if (dev->data->dev_started) {
2105                 ret = flow_drv_apply(dev, flow, error);
2106                 if (ret < 0)
2107                         goto error;
2108         }
2109         TAILQ_INSERT_TAIL(list, flow, next);
2110         flow_rxq_flags_set(dev, flow);
2111         return flow;
2112 error:
2113         ret = rte_errno; /* Save rte_errno before cleanup. */
2114         assert(flow);
2115         flow_drv_destroy(dev, flow);
2116         rte_free(flow);
2117         rte_errno = ret; /* Restore rte_errno. */
2118         return NULL;
2119 }
2120
2121 /**
2122  * Create a flow.
2123  *
2124  * @see rte_flow_create()
2125  * @see rte_flow_ops
2126  */
2127 struct rte_flow *
2128 mlx5_flow_create(struct rte_eth_dev *dev,
2129                  const struct rte_flow_attr *attr,
2130                  const struct rte_flow_item items[],
2131                  const struct rte_flow_action actions[],
2132                  struct rte_flow_error *error)
2133 {
2134         return flow_list_create(dev,
2135                                 &((struct priv *)dev->data->dev_private)->flows,
2136                                 attr, items, actions, error);
2137 }
2138
2139 /**
2140  * Destroy a flow in a list.
2141  *
2142  * @param dev
2143  *   Pointer to Ethernet device.
2144  * @param list
2145  *   Pointer to a TAILQ flow list.
2146  * @param[in] flow
2147  *   Flow to destroy.
2148  */
2149 static void
2150 flow_list_destroy(struct rte_eth_dev *dev, struct mlx5_flows *list,
2151                   struct rte_flow *flow)
2152 {
2153         /*
2154          * Update RX queue flags only if port is started, otherwise it is
2155          * already clean.
2156          */
2157         if (dev->data->dev_started)
2158                 flow_rxq_flags_trim(dev, flow);
2159         flow_drv_destroy(dev, flow);
2160         TAILQ_REMOVE(list, flow, next);
2161         rte_free(flow->fdir);
2162         rte_free(flow);
2163 }
2164
2165 /**
2166  * Destroy all flows.
2167  *
2168  * @param dev
2169  *   Pointer to Ethernet device.
2170  * @param list
2171  *   Pointer to a TAILQ flow list.
2172  */
2173 void
2174 mlx5_flow_list_flush(struct rte_eth_dev *dev, struct mlx5_flows *list)
2175 {
2176         while (!TAILQ_EMPTY(list)) {
2177                 struct rte_flow *flow;
2178
2179                 flow = TAILQ_FIRST(list);
2180                 flow_list_destroy(dev, list, flow);
2181         }
2182 }
2183
2184 /**
2185  * Remove all flows.
2186  *
2187  * @param dev
2188  *   Pointer to Ethernet device.
2189  * @param list
2190  *   Pointer to a TAILQ flow list.
2191  */
2192 void
2193 mlx5_flow_stop(struct rte_eth_dev *dev, struct mlx5_flows *list)
2194 {
2195         struct rte_flow *flow;
2196
2197         TAILQ_FOREACH_REVERSE(flow, list, mlx5_flows, next)
2198                 flow_drv_remove(dev, flow);
2199         flow_rxq_flags_clear(dev);
2200 }
2201
2202 /**
2203  * Add all flows.
2204  *
2205  * @param dev
2206  *   Pointer to Ethernet device.
2207  * @param list
2208  *   Pointer to a TAILQ flow list.
2209  *
2210  * @return
2211  *   0 on success, a negative errno value otherwise and rte_errno is set.
2212  */
2213 int
2214 mlx5_flow_start(struct rte_eth_dev *dev, struct mlx5_flows *list)
2215 {
2216         struct rte_flow *flow;
2217         struct rte_flow_error error;
2218         int ret = 0;
2219
2220         TAILQ_FOREACH(flow, list, next) {
2221                 ret = flow_drv_apply(dev, flow, &error);
2222                 if (ret < 0)
2223                         goto error;
2224                 flow_rxq_flags_set(dev, flow);
2225         }
2226         return 0;
2227 error:
2228         ret = rte_errno; /* Save rte_errno before cleanup. */
2229         mlx5_flow_stop(dev, list);
2230         rte_errno = ret; /* Restore rte_errno. */
2231         return -rte_errno;
2232 }
2233
2234 /**
2235  * Verify the flow list is empty
2236  *
2237  * @param dev
2238  *  Pointer to Ethernet device.
2239  *
2240  * @return the number of flows not released.
2241  */
2242 int
2243 mlx5_flow_verify(struct rte_eth_dev *dev)
2244 {
2245         struct priv *priv = dev->data->dev_private;
2246         struct rte_flow *flow;
2247         int ret = 0;
2248
2249         TAILQ_FOREACH(flow, &priv->flows, next) {
2250                 DRV_LOG(DEBUG, "port %u flow %p still referenced",
2251                         dev->data->port_id, (void *)flow);
2252                 ++ret;
2253         }
2254         return ret;
2255 }
2256
2257 /**
2258  * Enable a control flow configured from the control plane.
2259  *
2260  * @param dev
2261  *   Pointer to Ethernet device.
2262  * @param eth_spec
2263  *   An Ethernet flow spec to apply.
2264  * @param eth_mask
2265  *   An Ethernet flow mask to apply.
2266  * @param vlan_spec
2267  *   A VLAN flow spec to apply.
2268  * @param vlan_mask
2269  *   A VLAN flow mask to apply.
2270  *
2271  * @return
2272  *   0 on success, a negative errno value otherwise and rte_errno is set.
2273  */
2274 int
2275 mlx5_ctrl_flow_vlan(struct rte_eth_dev *dev,
2276                     struct rte_flow_item_eth *eth_spec,
2277                     struct rte_flow_item_eth *eth_mask,
2278                     struct rte_flow_item_vlan *vlan_spec,
2279                     struct rte_flow_item_vlan *vlan_mask)
2280 {
2281         struct priv *priv = dev->data->dev_private;
2282         const struct rte_flow_attr attr = {
2283                 .ingress = 1,
2284                 .priority = MLX5_FLOW_PRIO_RSVD,
2285         };
2286         struct rte_flow_item items[] = {
2287                 {
2288                         .type = RTE_FLOW_ITEM_TYPE_ETH,
2289                         .spec = eth_spec,
2290                         .last = NULL,
2291                         .mask = eth_mask,
2292                 },
2293                 {
2294                         .type = (vlan_spec) ? RTE_FLOW_ITEM_TYPE_VLAN :
2295                                               RTE_FLOW_ITEM_TYPE_END,
2296                         .spec = vlan_spec,
2297                         .last = NULL,
2298                         .mask = vlan_mask,
2299                 },
2300                 {
2301                         .type = RTE_FLOW_ITEM_TYPE_END,
2302                 },
2303         };
2304         uint16_t queue[priv->reta_idx_n];
2305         struct rte_flow_action_rss action_rss = {
2306                 .func = RTE_ETH_HASH_FUNCTION_DEFAULT,
2307                 .level = 0,
2308                 .types = priv->rss_conf.rss_hf,
2309                 .key_len = priv->rss_conf.rss_key_len,
2310                 .queue_num = priv->reta_idx_n,
2311                 .key = priv->rss_conf.rss_key,
2312                 .queue = queue,
2313         };
2314         struct rte_flow_action actions[] = {
2315                 {
2316                         .type = RTE_FLOW_ACTION_TYPE_RSS,
2317                         .conf = &action_rss,
2318                 },
2319                 {
2320                         .type = RTE_FLOW_ACTION_TYPE_END,
2321                 },
2322         };
2323         struct rte_flow *flow;
2324         struct rte_flow_error error;
2325         unsigned int i;
2326
2327         if (!priv->reta_idx_n || !priv->rxqs_n) {
2328                 rte_errno = EINVAL;
2329                 return -rte_errno;
2330         }
2331         for (i = 0; i != priv->reta_idx_n; ++i)
2332                 queue[i] = (*priv->reta_idx)[i];
2333         flow = flow_list_create(dev, &priv->ctrl_flows,
2334                                 &attr, items, actions, &error);
2335         if (!flow)
2336                 return -rte_errno;
2337         return 0;
2338 }
2339
2340 /**
2341  * Enable a flow control configured from the control plane.
2342  *
2343  * @param dev
2344  *   Pointer to Ethernet device.
2345  * @param eth_spec
2346  *   An Ethernet flow spec to apply.
2347  * @param eth_mask
2348  *   An Ethernet flow mask to apply.
2349  *
2350  * @return
2351  *   0 on success, a negative errno value otherwise and rte_errno is set.
2352  */
2353 int
2354 mlx5_ctrl_flow(struct rte_eth_dev *dev,
2355                struct rte_flow_item_eth *eth_spec,
2356                struct rte_flow_item_eth *eth_mask)
2357 {
2358         return mlx5_ctrl_flow_vlan(dev, eth_spec, eth_mask, NULL, NULL);
2359 }
2360
2361 /**
2362  * Destroy a flow.
2363  *
2364  * @see rte_flow_destroy()
2365  * @see rte_flow_ops
2366  */
2367 int
2368 mlx5_flow_destroy(struct rte_eth_dev *dev,
2369                   struct rte_flow *flow,
2370                   struct rte_flow_error *error __rte_unused)
2371 {
2372         struct priv *priv = dev->data->dev_private;
2373
2374         flow_list_destroy(dev, &priv->flows, flow);
2375         return 0;
2376 }
2377
2378 /**
2379  * Destroy all flows.
2380  *
2381  * @see rte_flow_flush()
2382  * @see rte_flow_ops
2383  */
2384 int
2385 mlx5_flow_flush(struct rte_eth_dev *dev,
2386                 struct rte_flow_error *error __rte_unused)
2387 {
2388         struct priv *priv = dev->data->dev_private;
2389
2390         mlx5_flow_list_flush(dev, &priv->flows);
2391         return 0;
2392 }
2393
2394 /**
2395  * Isolated mode.
2396  *
2397  * @see rte_flow_isolate()
2398  * @see rte_flow_ops
2399  */
2400 int
2401 mlx5_flow_isolate(struct rte_eth_dev *dev,
2402                   int enable,
2403                   struct rte_flow_error *error)
2404 {
2405         struct priv *priv = dev->data->dev_private;
2406
2407         if (dev->data->dev_started) {
2408                 rte_flow_error_set(error, EBUSY,
2409                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2410                                    NULL,
2411                                    "port must be stopped first");
2412                 return -rte_errno;
2413         }
2414         priv->isolated = !!enable;
2415         if (enable)
2416                 dev->dev_ops = &mlx5_dev_ops_isolate;
2417         else
2418                 dev->dev_ops = &mlx5_dev_ops;
2419         return 0;
2420 }
2421
2422 /**
2423  * Query a flow.
2424  *
2425  * @see rte_flow_query()
2426  * @see rte_flow_ops
2427  */
2428 static int
2429 flow_drv_query(struct rte_eth_dev *dev,
2430                struct rte_flow *flow,
2431                const struct rte_flow_action *actions,
2432                void *data,
2433                struct rte_flow_error *error)
2434 {
2435         const struct mlx5_flow_driver_ops *fops;
2436         enum mlx5_flow_drv_type ftype = flow->drv_type;
2437
2438         assert(ftype > MLX5_FLOW_TYPE_MIN && ftype < MLX5_FLOW_TYPE_MAX);
2439         fops = flow_get_drv_ops(ftype);
2440
2441         return fops->query(dev, flow, actions, data, error);
2442 }
2443
2444 /**
2445  * Query a flow.
2446  *
2447  * @see rte_flow_query()
2448  * @see rte_flow_ops
2449  */
2450 int
2451 mlx5_flow_query(struct rte_eth_dev *dev,
2452                 struct rte_flow *flow,
2453                 const struct rte_flow_action *actions,
2454                 void *data,
2455                 struct rte_flow_error *error)
2456 {
2457         int ret;
2458
2459         ret = flow_drv_query(dev, flow, actions, data, error);
2460         if (ret < 0)
2461                 return ret;
2462         return 0;
2463 }
2464
2465 /**
2466  * Convert a flow director filter to a generic flow.
2467  *
2468  * @param dev
2469  *   Pointer to Ethernet device.
2470  * @param fdir_filter
2471  *   Flow director filter to add.
2472  * @param attributes
2473  *   Generic flow parameters structure.
2474  *
2475  * @return
2476  *   0 on success, a negative errno value otherwise and rte_errno is set.
2477  */
2478 static int
2479 flow_fdir_filter_convert(struct rte_eth_dev *dev,
2480                          const struct rte_eth_fdir_filter *fdir_filter,
2481                          struct mlx5_fdir *attributes)
2482 {
2483         struct priv *priv = dev->data->dev_private;
2484         const struct rte_eth_fdir_input *input = &fdir_filter->input;
2485         const struct rte_eth_fdir_masks *mask =
2486                 &dev->data->dev_conf.fdir_conf.mask;
2487
2488         /* Validate queue number. */
2489         if (fdir_filter->action.rx_queue >= priv->rxqs_n) {
2490                 DRV_LOG(ERR, "port %u invalid queue number %d",
2491                         dev->data->port_id, fdir_filter->action.rx_queue);
2492                 rte_errno = EINVAL;
2493                 return -rte_errno;
2494         }
2495         attributes->attr.ingress = 1;
2496         attributes->items[0] = (struct rte_flow_item) {
2497                 .type = RTE_FLOW_ITEM_TYPE_ETH,
2498                 .spec = &attributes->l2,
2499                 .mask = &attributes->l2_mask,
2500         };
2501         switch (fdir_filter->action.behavior) {
2502         case RTE_ETH_FDIR_ACCEPT:
2503                 attributes->actions[0] = (struct rte_flow_action){
2504                         .type = RTE_FLOW_ACTION_TYPE_QUEUE,
2505                         .conf = &attributes->queue,
2506                 };
2507                 break;
2508         case RTE_ETH_FDIR_REJECT:
2509                 attributes->actions[0] = (struct rte_flow_action){
2510                         .type = RTE_FLOW_ACTION_TYPE_DROP,
2511                 };
2512                 break;
2513         default:
2514                 DRV_LOG(ERR, "port %u invalid behavior %d",
2515                         dev->data->port_id,
2516                         fdir_filter->action.behavior);
2517                 rte_errno = ENOTSUP;
2518                 return -rte_errno;
2519         }
2520         attributes->queue.index = fdir_filter->action.rx_queue;
2521         /* Handle L3. */
2522         switch (fdir_filter->input.flow_type) {
2523         case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
2524         case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
2525         case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
2526                 attributes->l3.ipv4.hdr = (struct ipv4_hdr){
2527                         .src_addr = input->flow.ip4_flow.src_ip,
2528                         .dst_addr = input->flow.ip4_flow.dst_ip,
2529                         .time_to_live = input->flow.ip4_flow.ttl,
2530                         .type_of_service = input->flow.ip4_flow.tos,
2531                 };
2532                 attributes->l3_mask.ipv4.hdr = (struct ipv4_hdr){
2533                         .src_addr = mask->ipv4_mask.src_ip,
2534                         .dst_addr = mask->ipv4_mask.dst_ip,
2535                         .time_to_live = mask->ipv4_mask.ttl,
2536                         .type_of_service = mask->ipv4_mask.tos,
2537                         .next_proto_id = mask->ipv4_mask.proto,
2538                 };
2539                 attributes->items[1] = (struct rte_flow_item){
2540                         .type = RTE_FLOW_ITEM_TYPE_IPV4,
2541                         .spec = &attributes->l3,
2542                         .mask = &attributes->l3_mask,
2543                 };
2544                 break;
2545         case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
2546         case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
2547         case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
2548                 attributes->l3.ipv6.hdr = (struct ipv6_hdr){
2549                         .hop_limits = input->flow.ipv6_flow.hop_limits,
2550                         .proto = input->flow.ipv6_flow.proto,
2551                 };
2552
2553                 memcpy(attributes->l3.ipv6.hdr.src_addr,
2554                        input->flow.ipv6_flow.src_ip,
2555                        RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
2556                 memcpy(attributes->l3.ipv6.hdr.dst_addr,
2557                        input->flow.ipv6_flow.dst_ip,
2558                        RTE_DIM(attributes->l3.ipv6.hdr.src_addr));
2559                 memcpy(attributes->l3_mask.ipv6.hdr.src_addr,
2560                        mask->ipv6_mask.src_ip,
2561                        RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr));
2562                 memcpy(attributes->l3_mask.ipv6.hdr.dst_addr,
2563                        mask->ipv6_mask.dst_ip,
2564                        RTE_DIM(attributes->l3_mask.ipv6.hdr.src_addr));
2565                 attributes->items[1] = (struct rte_flow_item){
2566                         .type = RTE_FLOW_ITEM_TYPE_IPV6,
2567                         .spec = &attributes->l3,
2568                         .mask = &attributes->l3_mask,
2569                 };
2570                 break;
2571         default:
2572                 DRV_LOG(ERR, "port %u invalid flow type%d",
2573                         dev->data->port_id, fdir_filter->input.flow_type);
2574                 rte_errno = ENOTSUP;
2575                 return -rte_errno;
2576         }
2577         /* Handle L4. */
2578         switch (fdir_filter->input.flow_type) {
2579         case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
2580                 attributes->l4.udp.hdr = (struct udp_hdr){
2581                         .src_port = input->flow.udp4_flow.src_port,
2582                         .dst_port = input->flow.udp4_flow.dst_port,
2583                 };
2584                 attributes->l4_mask.udp.hdr = (struct udp_hdr){
2585                         .src_port = mask->src_port_mask,
2586                         .dst_port = mask->dst_port_mask,
2587                 };
2588                 attributes->items[2] = (struct rte_flow_item){
2589                         .type = RTE_FLOW_ITEM_TYPE_UDP,
2590                         .spec = &attributes->l4,
2591                         .mask = &attributes->l4_mask,
2592                 };
2593                 break;
2594         case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
2595                 attributes->l4.tcp.hdr = (struct tcp_hdr){
2596                         .src_port = input->flow.tcp4_flow.src_port,
2597                         .dst_port = input->flow.tcp4_flow.dst_port,
2598                 };
2599                 attributes->l4_mask.tcp.hdr = (struct tcp_hdr){
2600                         .src_port = mask->src_port_mask,
2601                         .dst_port = mask->dst_port_mask,
2602                 };
2603                 attributes->items[2] = (struct rte_flow_item){
2604                         .type = RTE_FLOW_ITEM_TYPE_TCP,
2605                         .spec = &attributes->l4,
2606                         .mask = &attributes->l4_mask,
2607                 };
2608                 break;
2609         case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
2610                 attributes->l4.udp.hdr = (struct udp_hdr){
2611                         .src_port = input->flow.udp6_flow.src_port,
2612                         .dst_port = input->flow.udp6_flow.dst_port,
2613                 };
2614                 attributes->l4_mask.udp.hdr = (struct udp_hdr){
2615                         .src_port = mask->src_port_mask,
2616                         .dst_port = mask->dst_port_mask,
2617                 };
2618                 attributes->items[2] = (struct rte_flow_item){
2619                         .type = RTE_FLOW_ITEM_TYPE_UDP,
2620                         .spec = &attributes->l4,
2621                         .mask = &attributes->l4_mask,
2622                 };
2623                 break;
2624         case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
2625                 attributes->l4.tcp.hdr = (struct tcp_hdr){
2626                         .src_port = input->flow.tcp6_flow.src_port,
2627                         .dst_port = input->flow.tcp6_flow.dst_port,
2628                 };
2629                 attributes->l4_mask.tcp.hdr = (struct tcp_hdr){
2630                         .src_port = mask->src_port_mask,
2631                         .dst_port = mask->dst_port_mask,
2632                 };
2633                 attributes->items[2] = (struct rte_flow_item){
2634                         .type = RTE_FLOW_ITEM_TYPE_TCP,
2635                         .spec = &attributes->l4,
2636                         .mask = &attributes->l4_mask,
2637                 };
2638                 break;
2639         case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
2640         case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
2641                 break;
2642         default:
2643                 DRV_LOG(ERR, "port %u invalid flow type%d",
2644                         dev->data->port_id, fdir_filter->input.flow_type);
2645                 rte_errno = ENOTSUP;
2646                 return -rte_errno;
2647         }
2648         return 0;
2649 }
2650
2651 #define FLOW_FDIR_CMP(f1, f2, fld) \
2652         memcmp(&(f1)->fld, &(f2)->fld, sizeof(f1->fld))
2653
2654 /**
2655  * Compare two FDIR flows. If items and actions are identical, the two flows are
2656  * regarded as same.
2657  *
2658  * @param dev
2659  *   Pointer to Ethernet device.
2660  * @param f1
2661  *   FDIR flow to compare.
2662  * @param f2
2663  *   FDIR flow to compare.
2664  *
2665  * @return
2666  *   Zero on match, 1 otherwise.
2667  */
2668 static int
2669 flow_fdir_cmp(const struct mlx5_fdir *f1, const struct mlx5_fdir *f2)
2670 {
2671         if (FLOW_FDIR_CMP(f1, f2, attr) ||
2672             FLOW_FDIR_CMP(f1, f2, l2) ||
2673             FLOW_FDIR_CMP(f1, f2, l2_mask) ||
2674             FLOW_FDIR_CMP(f1, f2, l3) ||
2675             FLOW_FDIR_CMP(f1, f2, l3_mask) ||
2676             FLOW_FDIR_CMP(f1, f2, l4) ||
2677             FLOW_FDIR_CMP(f1, f2, l4_mask) ||
2678             FLOW_FDIR_CMP(f1, f2, actions[0].type))
2679                 return 1;
2680         if (f1->actions[0].type == RTE_FLOW_ACTION_TYPE_QUEUE &&
2681             FLOW_FDIR_CMP(f1, f2, queue))
2682                 return 1;
2683         return 0;
2684 }
2685
2686 /**
2687  * Search device flow list to find out a matched FDIR flow.
2688  *
2689  * @param dev
2690  *   Pointer to Ethernet device.
2691  * @param fdir_flow
2692  *   FDIR flow to lookup.
2693  *
2694  * @return
2695  *   Pointer of flow if found, NULL otherwise.
2696  */
2697 static struct rte_flow *
2698 flow_fdir_filter_lookup(struct rte_eth_dev *dev, struct mlx5_fdir *fdir_flow)
2699 {
2700         struct priv *priv = dev->data->dev_private;
2701         struct rte_flow *flow = NULL;
2702
2703         assert(fdir_flow);
2704         TAILQ_FOREACH(flow, &priv->flows, next) {
2705                 if (flow->fdir && !flow_fdir_cmp(flow->fdir, fdir_flow)) {
2706                         DRV_LOG(DEBUG, "port %u found FDIR flow %p",
2707                                 dev->data->port_id, (void *)flow);
2708                         break;
2709                 }
2710         }
2711         return flow;
2712 }
2713
2714 /**
2715  * Add new flow director filter and store it in list.
2716  *
2717  * @param dev
2718  *   Pointer to Ethernet device.
2719  * @param fdir_filter
2720  *   Flow director filter to add.
2721  *
2722  * @return
2723  *   0 on success, a negative errno value otherwise and rte_errno is set.
2724  */
2725 static int
2726 flow_fdir_filter_add(struct rte_eth_dev *dev,
2727                      const struct rte_eth_fdir_filter *fdir_filter)
2728 {
2729         struct priv *priv = dev->data->dev_private;
2730         struct mlx5_fdir *fdir_flow;
2731         struct rte_flow *flow;
2732         int ret;
2733
2734         fdir_flow = rte_zmalloc(__func__, sizeof(*fdir_flow), 0);
2735         if (!fdir_flow) {
2736                 rte_errno = ENOMEM;
2737                 return -rte_errno;
2738         }
2739         ret = flow_fdir_filter_convert(dev, fdir_filter, fdir_flow);
2740         if (ret)
2741                 goto error;
2742         flow = flow_fdir_filter_lookup(dev, fdir_flow);
2743         if (flow) {
2744                 rte_errno = EEXIST;
2745                 goto error;
2746         }
2747         flow = flow_list_create(dev, &priv->flows, &fdir_flow->attr,
2748                                 fdir_flow->items, fdir_flow->actions, NULL);
2749         if (!flow)
2750                 goto error;
2751         assert(!flow->fdir);
2752         flow->fdir = fdir_flow;
2753         DRV_LOG(DEBUG, "port %u created FDIR flow %p",
2754                 dev->data->port_id, (void *)flow);
2755         return 0;
2756 error:
2757         rte_free(fdir_flow);
2758         return -rte_errno;
2759 }
2760
2761 /**
2762  * Delete specific filter.
2763  *
2764  * @param dev
2765  *   Pointer to Ethernet device.
2766  * @param fdir_filter
2767  *   Filter to be deleted.
2768  *
2769  * @return
2770  *   0 on success, a negative errno value otherwise and rte_errno is set.
2771  */
2772 static int
2773 flow_fdir_filter_delete(struct rte_eth_dev *dev,
2774                         const struct rte_eth_fdir_filter *fdir_filter)
2775 {
2776         struct priv *priv = dev->data->dev_private;
2777         struct rte_flow *flow;
2778         struct mlx5_fdir fdir_flow = {
2779                 .attr.group = 0,
2780         };
2781         int ret;
2782
2783         ret = flow_fdir_filter_convert(dev, fdir_filter, &fdir_flow);
2784         if (ret)
2785                 return -rte_errno;
2786         flow = flow_fdir_filter_lookup(dev, &fdir_flow);
2787         if (!flow) {
2788                 rte_errno = ENOENT;
2789                 return -rte_errno;
2790         }
2791         flow_list_destroy(dev, &priv->flows, flow);
2792         DRV_LOG(DEBUG, "port %u deleted FDIR flow %p",
2793                 dev->data->port_id, (void *)flow);
2794         return 0;
2795 }
2796
2797 /**
2798  * Update queue for specific filter.
2799  *
2800  * @param dev
2801  *   Pointer to Ethernet device.
2802  * @param fdir_filter
2803  *   Filter to be updated.
2804  *
2805  * @return
2806  *   0 on success, a negative errno value otherwise and rte_errno is set.
2807  */
2808 static int
2809 flow_fdir_filter_update(struct rte_eth_dev *dev,
2810                         const struct rte_eth_fdir_filter *fdir_filter)
2811 {
2812         int ret;
2813
2814         ret = flow_fdir_filter_delete(dev, fdir_filter);
2815         if (ret)
2816                 return ret;
2817         return flow_fdir_filter_add(dev, fdir_filter);
2818 }
2819
2820 /**
2821  * Flush all filters.
2822  *
2823  * @param dev
2824  *   Pointer to Ethernet device.
2825  */
2826 static void
2827 flow_fdir_filter_flush(struct rte_eth_dev *dev)
2828 {
2829         struct priv *priv = dev->data->dev_private;
2830
2831         mlx5_flow_list_flush(dev, &priv->flows);
2832 }
2833
2834 /**
2835  * Get flow director information.
2836  *
2837  * @param dev
2838  *   Pointer to Ethernet device.
2839  * @param[out] fdir_info
2840  *   Resulting flow director information.
2841  */
2842 static void
2843 flow_fdir_info_get(struct rte_eth_dev *dev, struct rte_eth_fdir_info *fdir_info)
2844 {
2845         struct rte_eth_fdir_masks *mask =
2846                 &dev->data->dev_conf.fdir_conf.mask;
2847
2848         fdir_info->mode = dev->data->dev_conf.fdir_conf.mode;
2849         fdir_info->guarant_spc = 0;
2850         rte_memcpy(&fdir_info->mask, mask, sizeof(fdir_info->mask));
2851         fdir_info->max_flexpayload = 0;
2852         fdir_info->flow_types_mask[0] = 0;
2853         fdir_info->flex_payload_unit = 0;
2854         fdir_info->max_flex_payload_segment_num = 0;
2855         fdir_info->flex_payload_limit = 0;
2856         memset(&fdir_info->flex_conf, 0, sizeof(fdir_info->flex_conf));
2857 }
2858
2859 /**
2860  * Deal with flow director operations.
2861  *
2862  * @param dev
2863  *   Pointer to Ethernet device.
2864  * @param filter_op
2865  *   Operation to perform.
2866  * @param arg
2867  *   Pointer to operation-specific structure.
2868  *
2869  * @return
2870  *   0 on success, a negative errno value otherwise and rte_errno is set.
2871  */
2872 static int
2873 flow_fdir_ctrl_func(struct rte_eth_dev *dev, enum rte_filter_op filter_op,
2874                     void *arg)
2875 {
2876         enum rte_fdir_mode fdir_mode =
2877                 dev->data->dev_conf.fdir_conf.mode;
2878
2879         if (filter_op == RTE_ETH_FILTER_NOP)
2880                 return 0;
2881         if (fdir_mode != RTE_FDIR_MODE_PERFECT &&
2882             fdir_mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN) {
2883                 DRV_LOG(ERR, "port %u flow director mode %d not supported",
2884                         dev->data->port_id, fdir_mode);
2885                 rte_errno = EINVAL;
2886                 return -rte_errno;
2887         }
2888         switch (filter_op) {
2889         case RTE_ETH_FILTER_ADD:
2890                 return flow_fdir_filter_add(dev, arg);
2891         case RTE_ETH_FILTER_UPDATE:
2892                 return flow_fdir_filter_update(dev, arg);
2893         case RTE_ETH_FILTER_DELETE:
2894                 return flow_fdir_filter_delete(dev, arg);
2895         case RTE_ETH_FILTER_FLUSH:
2896                 flow_fdir_filter_flush(dev);
2897                 break;
2898         case RTE_ETH_FILTER_INFO:
2899                 flow_fdir_info_get(dev, arg);
2900                 break;
2901         default:
2902                 DRV_LOG(DEBUG, "port %u unknown operation %u",
2903                         dev->data->port_id, filter_op);
2904                 rte_errno = EINVAL;
2905                 return -rte_errno;
2906         }
2907         return 0;
2908 }
2909
2910 /**
2911  * Manage filter operations.
2912  *
2913  * @param dev
2914  *   Pointer to Ethernet device structure.
2915  * @param filter_type
2916  *   Filter type.
2917  * @param filter_op
2918  *   Operation to perform.
2919  * @param arg
2920  *   Pointer to operation-specific structure.
2921  *
2922  * @return
2923  *   0 on success, a negative errno value otherwise and rte_errno is set.
2924  */
2925 int
2926 mlx5_dev_filter_ctrl(struct rte_eth_dev *dev,
2927                      enum rte_filter_type filter_type,
2928                      enum rte_filter_op filter_op,
2929                      void *arg)
2930 {
2931         switch (filter_type) {
2932         case RTE_ETH_FILTER_GENERIC:
2933                 if (filter_op != RTE_ETH_FILTER_GET) {
2934                         rte_errno = EINVAL;
2935                         return -rte_errno;
2936                 }
2937                 *(const void **)arg = &mlx5_flow_ops;
2938                 return 0;
2939         case RTE_ETH_FILTER_FDIR:
2940                 return flow_fdir_ctrl_func(dev, filter_op, arg);
2941         default:
2942                 DRV_LOG(ERR, "port %u filter type (%d) not supported",
2943                         dev->data->port_id, filter_type);
2944                 rte_errno = ENOTSUP;
2945                 return -rte_errno;
2946         }
2947         return 0;
2948 }