net/mlx5: support jump action
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9
10 /* Verbs header. */
11 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
12 #ifdef PEDANTIC
13 #pragma GCC diagnostic ignored "-Wpedantic"
14 #endif
15 #include <infiniband/verbs.h>
16 #ifdef PEDANTIC
17 #pragma GCC diagnostic error "-Wpedantic"
18 #endif
19
20 #include <rte_common.h>
21 #include <rte_ether.h>
22 #include <rte_eth_ctrl.h>
23 #include <rte_ethdev_driver.h>
24 #include <rte_flow.h>
25 #include <rte_flow_driver.h>
26 #include <rte_malloc.h>
27 #include <rte_ip.h>
28 #include <rte_gre.h>
29
30 #include "mlx5.h"
31 #include "mlx5_defs.h"
32 #include "mlx5_prm.h"
33 #include "mlx5_glue.h"
34 #include "mlx5_flow.h"
35 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
36
37 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
38 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
39 #endif
40
41 union flow_dv_attr {
42         struct {
43                 uint32_t valid:1;
44                 uint32_t ipv4:1;
45                 uint32_t ipv6:1;
46                 uint32_t tcp:1;
47                 uint32_t udp:1;
48                 uint32_t reserved:27;
49         };
50         uint32_t attr;
51 };
52
53 /**
54  * Initialize flow attributes structure according to flow items' types.
55  *
56  * @param[in] item
57  *   Pointer to item specification.
58  * @param[out] attr
59  *   Pointer to flow attributes structure.
60  */
61 static void
62 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr)
63 {
64         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
65                 switch (item->type) {
66                 case RTE_FLOW_ITEM_TYPE_IPV4:
67                         attr->ipv4 = 1;
68                         break;
69                 case RTE_FLOW_ITEM_TYPE_IPV6:
70                         attr->ipv6 = 1;
71                         break;
72                 case RTE_FLOW_ITEM_TYPE_UDP:
73                         attr->udp = 1;
74                         break;
75                 case RTE_FLOW_ITEM_TYPE_TCP:
76                         attr->tcp = 1;
77                         break;
78                 default:
79                         break;
80                 }
81         }
82         attr->valid = 1;
83 }
84
85 struct field_modify_info {
86         uint32_t size; /* Size of field in protocol header, in bytes. */
87         uint32_t offset; /* Offset of field in protocol header, in bytes. */
88         enum mlx5_modification_field id;
89 };
90
91 struct field_modify_info modify_eth[] = {
92         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
93         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
94         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
95         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
96         {0, 0, 0},
97 };
98
99 struct field_modify_info modify_ipv4[] = {
100         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
101         {4, 12, MLX5_MODI_OUT_SIPV4},
102         {4, 16, MLX5_MODI_OUT_DIPV4},
103         {0, 0, 0},
104 };
105
106 struct field_modify_info modify_ipv6[] = {
107         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
108         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
109         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
110         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
111         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
112         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
113         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
114         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
115         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
116         {0, 0, 0},
117 };
118
119 struct field_modify_info modify_udp[] = {
120         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
121         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
122         {0, 0, 0},
123 };
124
125 struct field_modify_info modify_tcp[] = {
126         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
127         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
128         {0, 0, 0},
129 };
130
131 /**
132  * Convert modify-header action to DV specification.
133  *
134  * @param[in] item
135  *   Pointer to item specification.
136  * @param[in] field
137  *   Pointer to field modification information.
138  * @param[in,out] resource
139  *   Pointer to the modify-header resource.
140  * @param[in] type
141  *   Type of modification.
142  * @param[out] error
143  *   Pointer to the error structure.
144  *
145  * @return
146  *   0 on success, a negative errno value otherwise and rte_errno is set.
147  */
148 static int
149 flow_dv_convert_modify_action(struct rte_flow_item *item,
150                               struct field_modify_info *field,
151                               struct mlx5_flow_dv_modify_hdr_resource *resource,
152                               uint32_t type,
153                               struct rte_flow_error *error)
154 {
155         uint32_t i = resource->actions_num;
156         struct mlx5_modification_cmd *actions = resource->actions;
157         const uint8_t *spec = item->spec;
158         const uint8_t *mask = item->mask;
159         uint32_t set;
160
161         while (field->size) {
162                 set = 0;
163                 /* Generate modify command for each mask segment. */
164                 memcpy(&set, &mask[field->offset], field->size);
165                 if (set) {
166                         if (i >= MLX5_MODIFY_NUM)
167                                 return rte_flow_error_set(error, EINVAL,
168                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
169                                          "too many items to modify");
170                         actions[i].action_type = type;
171                         actions[i].field = field->id;
172                         actions[i].length = field->size ==
173                                         4 ? 0 : field->size * 8;
174                         rte_memcpy(&actions[i].data[4 - field->size],
175                                    &spec[field->offset], field->size);
176                         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
177                         ++i;
178                 }
179                 if (resource->actions_num != i)
180                         resource->actions_num = i;
181                 field++;
182         }
183         if (!resource->actions_num)
184                 return rte_flow_error_set(error, EINVAL,
185                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
186                                           "invalid modification flow item");
187         return 0;
188 }
189
190 /**
191  * Convert modify-header set IPv4 address action to DV specification.
192  *
193  * @param[in,out] resource
194  *   Pointer to the modify-header resource.
195  * @param[in] action
196  *   Pointer to action specification.
197  * @param[out] error
198  *   Pointer to the error structure.
199  *
200  * @return
201  *   0 on success, a negative errno value otherwise and rte_errno is set.
202  */
203 static int
204 flow_dv_convert_action_modify_ipv4
205                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
206                          const struct rte_flow_action *action,
207                          struct rte_flow_error *error)
208 {
209         const struct rte_flow_action_set_ipv4 *conf =
210                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
211         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
212         struct rte_flow_item_ipv4 ipv4;
213         struct rte_flow_item_ipv4 ipv4_mask;
214
215         memset(&ipv4, 0, sizeof(ipv4));
216         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
217         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
218                 ipv4.hdr.src_addr = conf->ipv4_addr;
219                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
220         } else {
221                 ipv4.hdr.dst_addr = conf->ipv4_addr;
222                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
223         }
224         item.spec = &ipv4;
225         item.mask = &ipv4_mask;
226         return flow_dv_convert_modify_action(&item, modify_ipv4, resource,
227                                              MLX5_MODIFICATION_TYPE_SET, error);
228 }
229
230 /**
231  * Convert modify-header set IPv6 address action to DV specification.
232  *
233  * @param[in,out] resource
234  *   Pointer to the modify-header resource.
235  * @param[in] action
236  *   Pointer to action specification.
237  * @param[out] error
238  *   Pointer to the error structure.
239  *
240  * @return
241  *   0 on success, a negative errno value otherwise and rte_errno is set.
242  */
243 static int
244 flow_dv_convert_action_modify_ipv6
245                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
246                          const struct rte_flow_action *action,
247                          struct rte_flow_error *error)
248 {
249         const struct rte_flow_action_set_ipv6 *conf =
250                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
251         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
252         struct rte_flow_item_ipv6 ipv6;
253         struct rte_flow_item_ipv6 ipv6_mask;
254
255         memset(&ipv6, 0, sizeof(ipv6));
256         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
257         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
258                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
259                        sizeof(ipv6.hdr.src_addr));
260                 memcpy(&ipv6_mask.hdr.src_addr,
261                        &rte_flow_item_ipv6_mask.hdr.src_addr,
262                        sizeof(ipv6.hdr.src_addr));
263         } else {
264                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
265                        sizeof(ipv6.hdr.dst_addr));
266                 memcpy(&ipv6_mask.hdr.dst_addr,
267                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
268                        sizeof(ipv6.hdr.dst_addr));
269         }
270         item.spec = &ipv6;
271         item.mask = &ipv6_mask;
272         return flow_dv_convert_modify_action(&item, modify_ipv6, resource,
273                                              MLX5_MODIFICATION_TYPE_SET, error);
274 }
275
276 /**
277  * Convert modify-header set MAC address action to DV specification.
278  *
279  * @param[in,out] resource
280  *   Pointer to the modify-header resource.
281  * @param[in] action
282  *   Pointer to action specification.
283  * @param[out] error
284  *   Pointer to the error structure.
285  *
286  * @return
287  *   0 on success, a negative errno value otherwise and rte_errno is set.
288  */
289 static int
290 flow_dv_convert_action_modify_mac
291                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
292                          const struct rte_flow_action *action,
293                          struct rte_flow_error *error)
294 {
295         const struct rte_flow_action_set_mac *conf =
296                 (const struct rte_flow_action_set_mac *)(action->conf);
297         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
298         struct rte_flow_item_eth eth;
299         struct rte_flow_item_eth eth_mask;
300
301         memset(&eth, 0, sizeof(eth));
302         memset(&eth_mask, 0, sizeof(eth_mask));
303         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
304                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
305                        sizeof(eth.src.addr_bytes));
306                 memcpy(&eth_mask.src.addr_bytes,
307                        &rte_flow_item_eth_mask.src.addr_bytes,
308                        sizeof(eth_mask.src.addr_bytes));
309         } else {
310                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
311                        sizeof(eth.dst.addr_bytes));
312                 memcpy(&eth_mask.dst.addr_bytes,
313                        &rte_flow_item_eth_mask.dst.addr_bytes,
314                        sizeof(eth_mask.dst.addr_bytes));
315         }
316         item.spec = &eth;
317         item.mask = &eth_mask;
318         return flow_dv_convert_modify_action(&item, modify_eth, resource,
319                                              MLX5_MODIFICATION_TYPE_SET, error);
320 }
321
322 /**
323  * Convert modify-header set TP action to DV specification.
324  *
325  * @param[in,out] resource
326  *   Pointer to the modify-header resource.
327  * @param[in] action
328  *   Pointer to action specification.
329  * @param[in] items
330  *   Pointer to rte_flow_item objects list.
331  * @param[in] attr
332  *   Pointer to flow attributes structure.
333  * @param[out] error
334  *   Pointer to the error structure.
335  *
336  * @return
337  *   0 on success, a negative errno value otherwise and rte_errno is set.
338  */
339 static int
340 flow_dv_convert_action_modify_tp
341                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
342                          const struct rte_flow_action *action,
343                          const struct rte_flow_item *items,
344                          union flow_dv_attr *attr,
345                          struct rte_flow_error *error)
346 {
347         const struct rte_flow_action_set_tp *conf =
348                 (const struct rte_flow_action_set_tp *)(action->conf);
349         struct rte_flow_item item;
350         struct rte_flow_item_udp udp;
351         struct rte_flow_item_udp udp_mask;
352         struct rte_flow_item_tcp tcp;
353         struct rte_flow_item_tcp tcp_mask;
354         struct field_modify_info *field;
355
356         if (!attr->valid)
357                 flow_dv_attr_init(items, attr);
358         if (attr->udp) {
359                 memset(&udp, 0, sizeof(udp));
360                 memset(&udp_mask, 0, sizeof(udp_mask));
361                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
362                         udp.hdr.src_port = conf->port;
363                         udp_mask.hdr.src_port =
364                                         rte_flow_item_udp_mask.hdr.src_port;
365                 } else {
366                         udp.hdr.dst_port = conf->port;
367                         udp_mask.hdr.dst_port =
368                                         rte_flow_item_udp_mask.hdr.dst_port;
369                 }
370                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
371                 item.spec = &udp;
372                 item.mask = &udp_mask;
373                 field = modify_udp;
374         }
375         if (attr->tcp) {
376                 memset(&tcp, 0, sizeof(tcp));
377                 memset(&tcp_mask, 0, sizeof(tcp_mask));
378                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
379                         tcp.hdr.src_port = conf->port;
380                         tcp_mask.hdr.src_port =
381                                         rte_flow_item_tcp_mask.hdr.src_port;
382                 } else {
383                         tcp.hdr.dst_port = conf->port;
384                         tcp_mask.hdr.dst_port =
385                                         rte_flow_item_tcp_mask.hdr.dst_port;
386                 }
387                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
388                 item.spec = &tcp;
389                 item.mask = &tcp_mask;
390                 field = modify_tcp;
391         }
392         return flow_dv_convert_modify_action(&item, field, resource,
393                                              MLX5_MODIFICATION_TYPE_SET, error);
394 }
395
396 /**
397  * Convert modify-header set TTL action to DV specification.
398  *
399  * @param[in,out] resource
400  *   Pointer to the modify-header resource.
401  * @param[in] action
402  *   Pointer to action specification.
403  * @param[in] items
404  *   Pointer to rte_flow_item objects list.
405  * @param[in] attr
406  *   Pointer to flow attributes structure.
407  * @param[out] error
408  *   Pointer to the error structure.
409  *
410  * @return
411  *   0 on success, a negative errno value otherwise and rte_errno is set.
412  */
413 static int
414 flow_dv_convert_action_modify_ttl
415                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
416                          const struct rte_flow_action *action,
417                          const struct rte_flow_item *items,
418                          union flow_dv_attr *attr,
419                          struct rte_flow_error *error)
420 {
421         const struct rte_flow_action_set_ttl *conf =
422                 (const struct rte_flow_action_set_ttl *)(action->conf);
423         struct rte_flow_item item;
424         struct rte_flow_item_ipv4 ipv4;
425         struct rte_flow_item_ipv4 ipv4_mask;
426         struct rte_flow_item_ipv6 ipv6;
427         struct rte_flow_item_ipv6 ipv6_mask;
428         struct field_modify_info *field;
429
430         if (!attr->valid)
431                 flow_dv_attr_init(items, attr);
432         if (attr->ipv4) {
433                 memset(&ipv4, 0, sizeof(ipv4));
434                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
435                 ipv4.hdr.time_to_live = conf->ttl_value;
436                 ipv4_mask.hdr.time_to_live = 0xFF;
437                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
438                 item.spec = &ipv4;
439                 item.mask = &ipv4_mask;
440                 field = modify_ipv4;
441         }
442         if (attr->ipv6) {
443                 memset(&ipv6, 0, sizeof(ipv6));
444                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
445                 ipv6.hdr.hop_limits = conf->ttl_value;
446                 ipv6_mask.hdr.hop_limits = 0xFF;
447                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
448                 item.spec = &ipv6;
449                 item.mask = &ipv6_mask;
450                 field = modify_ipv6;
451         }
452         return flow_dv_convert_modify_action(&item, field, resource,
453                                              MLX5_MODIFICATION_TYPE_SET, error);
454 }
455
456 /**
457  * Convert modify-header decrement TTL action to DV specification.
458  *
459  * @param[in,out] resource
460  *   Pointer to the modify-header resource.
461  * @param[in] action
462  *   Pointer to action specification.
463  * @param[in] items
464  *   Pointer to rte_flow_item objects list.
465  * @param[in] attr
466  *   Pointer to flow attributes structure.
467  * @param[out] error
468  *   Pointer to the error structure.
469  *
470  * @return
471  *   0 on success, a negative errno value otherwise and rte_errno is set.
472  */
473 static int
474 flow_dv_convert_action_modify_dec_ttl
475                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
476                          const struct rte_flow_item *items,
477                          union flow_dv_attr *attr,
478                          struct rte_flow_error *error)
479 {
480         struct rte_flow_item item;
481         struct rte_flow_item_ipv4 ipv4;
482         struct rte_flow_item_ipv4 ipv4_mask;
483         struct rte_flow_item_ipv6 ipv6;
484         struct rte_flow_item_ipv6 ipv6_mask;
485         struct field_modify_info *field;
486
487         if (!attr->valid)
488                 flow_dv_attr_init(items, attr);
489         if (attr->ipv4) {
490                 memset(&ipv4, 0, sizeof(ipv4));
491                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
492                 ipv4.hdr.time_to_live = 0xFF;
493                 ipv4_mask.hdr.time_to_live = 0xFF;
494                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
495                 item.spec = &ipv4;
496                 item.mask = &ipv4_mask;
497                 field = modify_ipv4;
498         }
499         if (attr->ipv6) {
500                 memset(&ipv6, 0, sizeof(ipv6));
501                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
502                 ipv6.hdr.hop_limits = 0xFF;
503                 ipv6_mask.hdr.hop_limits = 0xFF;
504                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
505                 item.spec = &ipv6;
506                 item.mask = &ipv6_mask;
507                 field = modify_ipv6;
508         }
509         return flow_dv_convert_modify_action(&item, field, resource,
510                                              MLX5_MODIFICATION_TYPE_ADD, error);
511 }
512
513 /**
514  * Validate META item.
515  *
516  * @param[in] dev
517  *   Pointer to the rte_eth_dev structure.
518  * @param[in] item
519  *   Item specification.
520  * @param[in] attr
521  *   Attributes of flow that includes this item.
522  * @param[out] error
523  *   Pointer to error structure.
524  *
525  * @return
526  *   0 on success, a negative errno value otherwise and rte_errno is set.
527  */
528 static int
529 flow_dv_validate_item_meta(struct rte_eth_dev *dev,
530                            const struct rte_flow_item *item,
531                            const struct rte_flow_attr *attr,
532                            struct rte_flow_error *error)
533 {
534         const struct rte_flow_item_meta *spec = item->spec;
535         const struct rte_flow_item_meta *mask = item->mask;
536         const struct rte_flow_item_meta nic_mask = {
537                 .data = RTE_BE32(UINT32_MAX)
538         };
539         int ret;
540         uint64_t offloads = dev->data->dev_conf.txmode.offloads;
541
542         if (!(offloads & DEV_TX_OFFLOAD_MATCH_METADATA))
543                 return rte_flow_error_set(error, EPERM,
544                                           RTE_FLOW_ERROR_TYPE_ITEM,
545                                           NULL,
546                                           "match on metadata offload "
547                                           "configuration is off for this port");
548         if (!spec)
549                 return rte_flow_error_set(error, EINVAL,
550                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
551                                           item->spec,
552                                           "data cannot be empty");
553         if (!spec->data)
554                 return rte_flow_error_set(error, EINVAL,
555                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
556                                           NULL,
557                                           "data cannot be zero");
558         if (!mask)
559                 mask = &rte_flow_item_meta_mask;
560         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
561                                         (const uint8_t *)&nic_mask,
562                                         sizeof(struct rte_flow_item_meta),
563                                         error);
564         if (ret < 0)
565                 return ret;
566         if (attr->ingress)
567                 return rte_flow_error_set(error, ENOTSUP,
568                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
569                                           NULL,
570                                           "pattern not supported for ingress");
571         return 0;
572 }
573
574 /**
575  * Validate count action.
576  *
577  * @param[in] dev
578  *   device otr.
579  * @param[out] error
580  *   Pointer to error structure.
581  *
582  * @return
583  *   0 on success, a negative errno value otherwise and rte_errno is set.
584  */
585 static int
586 flow_dv_validate_action_count(struct rte_eth_dev *dev,
587                               struct rte_flow_error *error)
588 {
589         struct mlx5_priv *priv = dev->data->dev_private;
590
591         if (!priv->config.devx)
592                 goto notsup_err;
593 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
594         return 0;
595 #endif
596 notsup_err:
597         return rte_flow_error_set
598                       (error, ENOTSUP,
599                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
600                        NULL,
601                        "count action not supported");
602 }
603
604 /**
605  * Validate the L2 encap action.
606  *
607  * @param[in] action_flags
608  *   Holds the actions detected until now.
609  * @param[in] action
610  *   Pointer to the encap action.
611  * @param[in] attr
612  *   Pointer to flow attributes
613  * @param[out] error
614  *   Pointer to error structure.
615  *
616  * @return
617  *   0 on success, a negative errno value otherwise and rte_errno is set.
618  */
619 static int
620 flow_dv_validate_action_l2_encap(uint64_t action_flags,
621                                  const struct rte_flow_action *action,
622                                  const struct rte_flow_attr *attr,
623                                  struct rte_flow_error *error)
624 {
625         if (!(action->conf))
626                 return rte_flow_error_set(error, EINVAL,
627                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
628                                           "configuration cannot be null");
629         if (action_flags & MLX5_FLOW_ACTION_DROP)
630                 return rte_flow_error_set(error, EINVAL,
631                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
632                                           "can't drop and encap in same flow");
633         if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
634                 return rte_flow_error_set(error, EINVAL,
635                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
636                                           "can only have a single encap or"
637                                           " decap action in a flow");
638         if (attr->ingress)
639                 return rte_flow_error_set(error, ENOTSUP,
640                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
641                                           NULL,
642                                           "encap action not supported for "
643                                           "ingress");
644         return 0;
645 }
646
647 /**
648  * Validate the L2 decap action.
649  *
650  * @param[in] action_flags
651  *   Holds the actions detected until now.
652  * @param[in] attr
653  *   Pointer to flow attributes
654  * @param[out] error
655  *   Pointer to error structure.
656  *
657  * @return
658  *   0 on success, a negative errno value otherwise and rte_errno is set.
659  */
660 static int
661 flow_dv_validate_action_l2_decap(uint64_t action_flags,
662                                  const struct rte_flow_attr *attr,
663                                  struct rte_flow_error *error)
664 {
665         if (action_flags & MLX5_FLOW_ACTION_DROP)
666                 return rte_flow_error_set(error, EINVAL,
667                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
668                                           "can't drop and decap in same flow");
669         if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
670                 return rte_flow_error_set(error, EINVAL,
671                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
672                                           "can only have a single encap or"
673                                           " decap action in a flow");
674         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
675                 return rte_flow_error_set(error, EINVAL,
676                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
677                                           "can't have decap action after"
678                                           " modify action");
679         if (attr->egress)
680                 return rte_flow_error_set(error, ENOTSUP,
681                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
682                                           NULL,
683                                           "decap action not supported for "
684                                           "egress");
685         return 0;
686 }
687
688 /**
689  * Validate the raw encap action.
690  *
691  * @param[in] action_flags
692  *   Holds the actions detected until now.
693  * @param[in] action
694  *   Pointer to the encap action.
695  * @param[in] attr
696  *   Pointer to flow attributes
697  * @param[out] error
698  *   Pointer to error structure.
699  *
700  * @return
701  *   0 on success, a negative errno value otherwise and rte_errno is set.
702  */
703 static int
704 flow_dv_validate_action_raw_encap(uint64_t action_flags,
705                                   const struct rte_flow_action *action,
706                                   const struct rte_flow_attr *attr,
707                                   struct rte_flow_error *error)
708 {
709         if (!(action->conf))
710                 return rte_flow_error_set(error, EINVAL,
711                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
712                                           "configuration cannot be null");
713         if (action_flags & MLX5_FLOW_ACTION_DROP)
714                 return rte_flow_error_set(error, EINVAL,
715                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
716                                           "can't drop and encap in same flow");
717         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
718                 return rte_flow_error_set(error, EINVAL,
719                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
720                                           "can only have a single encap"
721                                           " action in a flow");
722         /* encap without preceding decap is not supported for ingress */
723         if (attr->ingress && !(action_flags & MLX5_FLOW_ACTION_RAW_DECAP))
724                 return rte_flow_error_set(error, ENOTSUP,
725                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
726                                           NULL,
727                                           "encap action not supported for "
728                                           "ingress");
729         return 0;
730 }
731
732 /**
733  * Validate the raw decap action.
734  *
735  * @param[in] action_flags
736  *   Holds the actions detected until now.
737  * @param[in] action
738  *   Pointer to the encap action.
739  * @param[in] attr
740  *   Pointer to flow attributes
741  * @param[out] error
742  *   Pointer to error structure.
743  *
744  * @return
745  *   0 on success, a negative errno value otherwise and rte_errno is set.
746  */
747 static int
748 flow_dv_validate_action_raw_decap(uint64_t action_flags,
749                                   const struct rte_flow_action *action,
750                                   const struct rte_flow_attr *attr,
751                                   struct rte_flow_error *error)
752 {
753         if (action_flags & MLX5_FLOW_ACTION_DROP)
754                 return rte_flow_error_set(error, EINVAL,
755                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
756                                           "can't drop and decap in same flow");
757         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
758                 return rte_flow_error_set(error, EINVAL,
759                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
760                                           "can't have encap action before"
761                                           " decap action");
762         if (action_flags & MLX5_FLOW_DECAP_ACTIONS)
763                 return rte_flow_error_set(error, EINVAL,
764                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
765                                           "can only have a single decap"
766                                           " action in a flow");
767         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
768                 return rte_flow_error_set(error, EINVAL,
769                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
770                                           "can't have decap action after"
771                                           " modify action");
772         /* decap action is valid on egress only if it is followed by encap */
773         if (attr->egress) {
774                 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
775                        action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
776                        action++) {
777                 }
778                 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
779                         return rte_flow_error_set
780                                         (error, ENOTSUP,
781                                          RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
782                                          NULL, "decap action not supported"
783                                          " for egress");
784         }
785         return 0;
786 }
787
788 /**
789  * Find existing encap/decap resource or create and register a new one.
790  *
791  * @param dev[in, out]
792  *   Pointer to rte_eth_dev structure.
793  * @param[in, out] resource
794  *   Pointer to encap/decap resource.
795  * @parm[in, out] dev_flow
796  *   Pointer to the dev_flow.
797  * @param[out] error
798  *   pointer to error structure.
799  *
800  * @return
801  *   0 on success otherwise -errno and errno is set.
802  */
803 static int
804 flow_dv_encap_decap_resource_register
805                         (struct rte_eth_dev *dev,
806                          struct mlx5_flow_dv_encap_decap_resource *resource,
807                          struct mlx5_flow *dev_flow,
808                          struct rte_flow_error *error)
809 {
810         struct mlx5_priv *priv = dev->data->dev_private;
811         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
812         struct rte_flow *flow = dev_flow->flow;
813         struct mlx5dv_dr_ns *ns;
814
815         resource->flags = flow->group ? 0 : 1;
816         if (flow->ingress)
817                 ns = priv->rx_ns;
818         else
819                 ns = priv->tx_ns;
820
821         /* Lookup a matching resource from cache. */
822         LIST_FOREACH(cache_resource, &priv->encaps_decaps, next) {
823                 if (resource->reformat_type == cache_resource->reformat_type &&
824                     resource->ft_type == cache_resource->ft_type &&
825                     resource->flags == cache_resource->flags &&
826                     resource->size == cache_resource->size &&
827                     !memcmp((const void *)resource->buf,
828                             (const void *)cache_resource->buf,
829                             resource->size)) {
830                         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
831                                 (void *)cache_resource,
832                                 rte_atomic32_read(&cache_resource->refcnt));
833                         rte_atomic32_inc(&cache_resource->refcnt);
834                         dev_flow->dv.encap_decap = cache_resource;
835                         return 0;
836                 }
837         }
838         /* Register new encap/decap resource. */
839         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
840         if (!cache_resource)
841                 return rte_flow_error_set(error, ENOMEM,
842                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
843                                           "cannot allocate resource memory");
844         *cache_resource = *resource;
845         cache_resource->verbs_action =
846                 mlx5_glue->dv_create_flow_action_packet_reformat
847                         (priv->sh->ctx, cache_resource->reformat_type,
848                          cache_resource->ft_type, ns, cache_resource->flags,
849                          cache_resource->size,
850                          (cache_resource->size ? cache_resource->buf : NULL));
851         if (!cache_resource->verbs_action) {
852                 rte_free(cache_resource);
853                 return rte_flow_error_set(error, ENOMEM,
854                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
855                                           NULL, "cannot create action");
856         }
857         rte_atomic32_init(&cache_resource->refcnt);
858         rte_atomic32_inc(&cache_resource->refcnt);
859         LIST_INSERT_HEAD(&priv->encaps_decaps, cache_resource, next);
860         dev_flow->dv.encap_decap = cache_resource;
861         DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
862                 (void *)cache_resource,
863                 rte_atomic32_read(&cache_resource->refcnt));
864         return 0;
865 }
866
867 /**
868  * Find existing table jump resource or create and register a new one.
869  *
870  * @param dev[in, out]
871  *   Pointer to rte_eth_dev structure.
872  * @param[in, out] resource
873  *   Pointer to jump table resource.
874  * @parm[in, out] dev_flow
875  *   Pointer to the dev_flow.
876  * @param[out] error
877  *   pointer to error structure.
878  *
879  * @return
880  *   0 on success otherwise -errno and errno is set.
881  */
882 static int
883 flow_dv_jump_tbl_resource_register
884                         (struct rte_eth_dev *dev,
885                          struct mlx5_flow_dv_jump_tbl_resource *resource,
886                          struct mlx5_flow *dev_flow,
887                          struct rte_flow_error *error)
888 {
889         struct mlx5_priv *priv = dev->data->dev_private;
890         struct mlx5_flow_dv_jump_tbl_resource *cache_resource;
891
892         /* Lookup a matching resource from cache. */
893         LIST_FOREACH(cache_resource, &priv->jump_tbl, next) {
894                 if (resource->tbl == cache_resource->tbl) {
895                         DRV_LOG(DEBUG, "jump table resource resource %p: refcnt %d++",
896                                 (void *)cache_resource,
897                                 rte_atomic32_read(&cache_resource->refcnt));
898                         rte_atomic32_inc(&cache_resource->refcnt);
899                         dev_flow->dv.jump = cache_resource;
900                         return 0;
901                 }
902         }
903         /* Register new jump table resource. */
904         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
905         if (!cache_resource)
906                 return rte_flow_error_set(error, ENOMEM,
907                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
908                                           "cannot allocate resource memory");
909         *cache_resource = *resource;
910         cache_resource->action =
911                 mlx5_glue->dr_create_flow_action_dest_flow_tbl
912                 (resource->tbl->obj);
913         if (!cache_resource->action) {
914                 rte_free(cache_resource);
915                 return rte_flow_error_set(error, ENOMEM,
916                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
917                                           NULL, "cannot create action");
918         }
919         rte_atomic32_init(&cache_resource->refcnt);
920         rte_atomic32_inc(&cache_resource->refcnt);
921         LIST_INSERT_HEAD(&priv->jump_tbl, cache_resource, next);
922         dev_flow->dv.jump = cache_resource;
923         DRV_LOG(DEBUG, "new jump table  resource %p: refcnt %d++",
924                 (void *)cache_resource,
925                 rte_atomic32_read(&cache_resource->refcnt));
926         return 0;
927 }
928
929 /**
930  * Get the size of specific rte_flow_item_type
931  *
932  * @param[in] item_type
933  *   Tested rte_flow_item_type.
934  *
935  * @return
936  *   sizeof struct item_type, 0 if void or irrelevant.
937  */
938 static size_t
939 flow_dv_get_item_len(const enum rte_flow_item_type item_type)
940 {
941         size_t retval;
942
943         switch (item_type) {
944         case RTE_FLOW_ITEM_TYPE_ETH:
945                 retval = sizeof(struct rte_flow_item_eth);
946                 break;
947         case RTE_FLOW_ITEM_TYPE_VLAN:
948                 retval = sizeof(struct rte_flow_item_vlan);
949                 break;
950         case RTE_FLOW_ITEM_TYPE_IPV4:
951                 retval = sizeof(struct rte_flow_item_ipv4);
952                 break;
953         case RTE_FLOW_ITEM_TYPE_IPV6:
954                 retval = sizeof(struct rte_flow_item_ipv6);
955                 break;
956         case RTE_FLOW_ITEM_TYPE_UDP:
957                 retval = sizeof(struct rte_flow_item_udp);
958                 break;
959         case RTE_FLOW_ITEM_TYPE_TCP:
960                 retval = sizeof(struct rte_flow_item_tcp);
961                 break;
962         case RTE_FLOW_ITEM_TYPE_VXLAN:
963                 retval = sizeof(struct rte_flow_item_vxlan);
964                 break;
965         case RTE_FLOW_ITEM_TYPE_GRE:
966                 retval = sizeof(struct rte_flow_item_gre);
967                 break;
968         case RTE_FLOW_ITEM_TYPE_NVGRE:
969                 retval = sizeof(struct rte_flow_item_nvgre);
970                 break;
971         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
972                 retval = sizeof(struct rte_flow_item_vxlan_gpe);
973                 break;
974         case RTE_FLOW_ITEM_TYPE_MPLS:
975                 retval = sizeof(struct rte_flow_item_mpls);
976                 break;
977         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
978         default:
979                 retval = 0;
980                 break;
981         }
982         return retval;
983 }
984
985 #define MLX5_ENCAP_IPV4_VERSION         0x40
986 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
987 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
988 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
989 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
990 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
991 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
992
993 /**
994  * Convert the encap action data from list of rte_flow_item to raw buffer
995  *
996  * @param[in] items
997  *   Pointer to rte_flow_item objects list.
998  * @param[out] buf
999  *   Pointer to the output buffer.
1000  * @param[out] size
1001  *   Pointer to the output buffer size.
1002  * @param[out] error
1003  *   Pointer to the error structure.
1004  *
1005  * @return
1006  *   0 on success, a negative errno value otherwise and rte_errno is set.
1007  */
1008 static int
1009 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
1010                            size_t *size, struct rte_flow_error *error)
1011 {
1012         struct ether_hdr *eth = NULL;
1013         struct vlan_hdr *vlan = NULL;
1014         struct ipv4_hdr *ipv4 = NULL;
1015         struct ipv6_hdr *ipv6 = NULL;
1016         struct udp_hdr *udp = NULL;
1017         struct vxlan_hdr *vxlan = NULL;
1018         struct vxlan_gpe_hdr *vxlan_gpe = NULL;
1019         struct gre_hdr *gre = NULL;
1020         size_t len;
1021         size_t temp_size = 0;
1022
1023         if (!items)
1024                 return rte_flow_error_set(error, EINVAL,
1025                                           RTE_FLOW_ERROR_TYPE_ACTION,
1026                                           NULL, "invalid empty data");
1027         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1028                 len = flow_dv_get_item_len(items->type);
1029                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
1030                         return rte_flow_error_set(error, EINVAL,
1031                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1032                                                   (void *)items->type,
1033                                                   "items total size is too big"
1034                                                   " for encap action");
1035                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
1036                 switch (items->type) {
1037                 case RTE_FLOW_ITEM_TYPE_ETH:
1038                         eth = (struct ether_hdr *)&buf[temp_size];
1039                         break;
1040                 case RTE_FLOW_ITEM_TYPE_VLAN:
1041                         vlan = (struct vlan_hdr *)&buf[temp_size];
1042                         if (!eth)
1043                                 return rte_flow_error_set(error, EINVAL,
1044                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1045                                                 (void *)items->type,
1046                                                 "eth header not found");
1047                         if (!eth->ether_type)
1048                                 eth->ether_type = RTE_BE16(ETHER_TYPE_VLAN);
1049                         break;
1050                 case RTE_FLOW_ITEM_TYPE_IPV4:
1051                         ipv4 = (struct ipv4_hdr *)&buf[temp_size];
1052                         if (!vlan && !eth)
1053                                 return rte_flow_error_set(error, EINVAL,
1054                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1055                                                 (void *)items->type,
1056                                                 "neither eth nor vlan"
1057                                                 " header found");
1058                         if (vlan && !vlan->eth_proto)
1059                                 vlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv4);
1060                         else if (eth && !eth->ether_type)
1061                                 eth->ether_type = RTE_BE16(ETHER_TYPE_IPv4);
1062                         if (!ipv4->version_ihl)
1063                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
1064                                                     MLX5_ENCAP_IPV4_IHL_MIN;
1065                         if (!ipv4->time_to_live)
1066                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
1067                         break;
1068                 case RTE_FLOW_ITEM_TYPE_IPV6:
1069                         ipv6 = (struct ipv6_hdr *)&buf[temp_size];
1070                         if (!vlan && !eth)
1071                                 return rte_flow_error_set(error, EINVAL,
1072                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1073                                                 (void *)items->type,
1074                                                 "neither eth nor vlan"
1075                                                 " header found");
1076                         if (vlan && !vlan->eth_proto)
1077                                 vlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv6);
1078                         else if (eth && !eth->ether_type)
1079                                 eth->ether_type = RTE_BE16(ETHER_TYPE_IPv6);
1080                         if (!ipv6->vtc_flow)
1081                                 ipv6->vtc_flow =
1082                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
1083                         if (!ipv6->hop_limits)
1084                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
1085                         break;
1086                 case RTE_FLOW_ITEM_TYPE_UDP:
1087                         udp = (struct udp_hdr *)&buf[temp_size];
1088                         if (!ipv4 && !ipv6)
1089                                 return rte_flow_error_set(error, EINVAL,
1090                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1091                                                 (void *)items->type,
1092                                                 "ip header not found");
1093                         if (ipv4 && !ipv4->next_proto_id)
1094                                 ipv4->next_proto_id = IPPROTO_UDP;
1095                         else if (ipv6 && !ipv6->proto)
1096                                 ipv6->proto = IPPROTO_UDP;
1097                         break;
1098                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1099                         vxlan = (struct vxlan_hdr *)&buf[temp_size];
1100                         if (!udp)
1101                                 return rte_flow_error_set(error, EINVAL,
1102                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1103                                                 (void *)items->type,
1104                                                 "udp header not found");
1105                         if (!udp->dst_port)
1106                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
1107                         if (!vxlan->vx_flags)
1108                                 vxlan->vx_flags =
1109                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
1110                         break;
1111                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1112                         vxlan_gpe = (struct vxlan_gpe_hdr *)&buf[temp_size];
1113                         if (!udp)
1114                                 return rte_flow_error_set(error, EINVAL,
1115                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1116                                                 (void *)items->type,
1117                                                 "udp header not found");
1118                         if (!vxlan_gpe->proto)
1119                                 return rte_flow_error_set(error, EINVAL,
1120                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1121                                                 (void *)items->type,
1122                                                 "next protocol not found");
1123                         if (!udp->dst_port)
1124                                 udp->dst_port =
1125                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
1126                         if (!vxlan_gpe->vx_flags)
1127                                 vxlan_gpe->vx_flags =
1128                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
1129                         break;
1130                 case RTE_FLOW_ITEM_TYPE_GRE:
1131                 case RTE_FLOW_ITEM_TYPE_NVGRE:
1132                         gre = (struct gre_hdr *)&buf[temp_size];
1133                         if (!gre->proto)
1134                                 return rte_flow_error_set(error, EINVAL,
1135                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1136                                                 (void *)items->type,
1137                                                 "next protocol not found");
1138                         if (!ipv4 && !ipv6)
1139                                 return rte_flow_error_set(error, EINVAL,
1140                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1141                                                 (void *)items->type,
1142                                                 "ip header not found");
1143                         if (ipv4 && !ipv4->next_proto_id)
1144                                 ipv4->next_proto_id = IPPROTO_GRE;
1145                         else if (ipv6 && !ipv6->proto)
1146                                 ipv6->proto = IPPROTO_GRE;
1147                         break;
1148                 case RTE_FLOW_ITEM_TYPE_VOID:
1149                         break;
1150                 default:
1151                         return rte_flow_error_set(error, EINVAL,
1152                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1153                                                   (void *)items->type,
1154                                                   "unsupported item type");
1155                         break;
1156                 }
1157                 temp_size += len;
1158         }
1159         *size = temp_size;
1160         return 0;
1161 }
1162
1163 /**
1164  * Convert L2 encap action to DV specification.
1165  *
1166  * @param[in] dev
1167  *   Pointer to rte_eth_dev structure.
1168  * @param[in] action
1169  *   Pointer to action structure.
1170  * @param[in, out] dev_flow
1171  *   Pointer to the mlx5_flow.
1172  * @param[out] error
1173  *   Pointer to the error structure.
1174  *
1175  * @return
1176  *   0 on success, a negative errno value otherwise and rte_errno is set.
1177  */
1178 static int
1179 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
1180                                const struct rte_flow_action *action,
1181                                struct mlx5_flow *dev_flow,
1182                                struct rte_flow_error *error)
1183 {
1184         const struct rte_flow_item *encap_data;
1185         const struct rte_flow_action_raw_encap *raw_encap_data;
1186         struct mlx5_flow_dv_encap_decap_resource res = {
1187                 .reformat_type =
1188                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
1189                 .ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
1190         };
1191
1192         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
1193                 raw_encap_data =
1194                         (const struct rte_flow_action_raw_encap *)action->conf;
1195                 res.size = raw_encap_data->size;
1196                 memcpy(res.buf, raw_encap_data->data, res.size);
1197         } else {
1198                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
1199                         encap_data =
1200                                 ((const struct rte_flow_action_vxlan_encap *)
1201                                                 action->conf)->definition;
1202                 else
1203                         encap_data =
1204                                 ((const struct rte_flow_action_nvgre_encap *)
1205                                                 action->conf)->definition;
1206                 if (flow_dv_convert_encap_data(encap_data, res.buf,
1207                                                &res.size, error))
1208                         return -rte_errno;
1209         }
1210         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1211                 return rte_flow_error_set(error, EINVAL,
1212                                           RTE_FLOW_ERROR_TYPE_ACTION,
1213                                           NULL, "can't create L2 encap action");
1214         return 0;
1215 }
1216
1217 /**
1218  * Convert L2 decap action to DV specification.
1219  *
1220  * @param[in] dev
1221  *   Pointer to rte_eth_dev structure.
1222  * @param[in, out] dev_flow
1223  *   Pointer to the mlx5_flow.
1224  * @param[out] error
1225  *   Pointer to the error structure.
1226  *
1227  * @return
1228  *   0 on success, a negative errno value otherwise and rte_errno is set.
1229  */
1230 static int
1231 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
1232                                struct mlx5_flow *dev_flow,
1233                                struct rte_flow_error *error)
1234 {
1235         struct mlx5_flow_dv_encap_decap_resource res = {
1236                 .size = 0,
1237                 .reformat_type =
1238                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
1239                 .ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
1240         };
1241
1242         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1243                 return rte_flow_error_set(error, EINVAL,
1244                                           RTE_FLOW_ERROR_TYPE_ACTION,
1245                                           NULL, "can't create L2 decap action");
1246         return 0;
1247 }
1248
1249 /**
1250  * Convert raw decap/encap (L3 tunnel) action to DV specification.
1251  *
1252  * @param[in] dev
1253  *   Pointer to rte_eth_dev structure.
1254  * @param[in] action
1255  *   Pointer to action structure.
1256  * @param[in, out] dev_flow
1257  *   Pointer to the mlx5_flow.
1258  * @param[in] attr
1259  *   Pointer to the flow attributes.
1260  * @param[out] error
1261  *   Pointer to the error structure.
1262  *
1263  * @return
1264  *   0 on success, a negative errno value otherwise and rte_errno is set.
1265  */
1266 static int
1267 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
1268                                 const struct rte_flow_action *action,
1269                                 struct mlx5_flow *dev_flow,
1270                                 const struct rte_flow_attr *attr,
1271                                 struct rte_flow_error *error)
1272 {
1273         const struct rte_flow_action_raw_encap *encap_data;
1274         struct mlx5_flow_dv_encap_decap_resource res;
1275
1276         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
1277         res.size = encap_data->size;
1278         memcpy(res.buf, encap_data->data, res.size);
1279         res.reformat_type = attr->egress ?
1280                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL :
1281                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
1282         res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
1283                                      MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
1284         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1285                 return rte_flow_error_set(error, EINVAL,
1286                                           RTE_FLOW_ERROR_TYPE_ACTION,
1287                                           NULL, "can't create encap action");
1288         return 0;
1289 }
1290
1291 /**
1292  * Validate the modify-header actions.
1293  *
1294  * @param[in] action_flags
1295  *   Holds the actions detected until now.
1296  * @param[in] action
1297  *   Pointer to the modify action.
1298  * @param[out] error
1299  *   Pointer to error structure.
1300  *
1301  * @return
1302  *   0 on success, a negative errno value otherwise and rte_errno is set.
1303  */
1304 static int
1305 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
1306                                    const struct rte_flow_action *action,
1307                                    struct rte_flow_error *error)
1308 {
1309         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
1310                 return rte_flow_error_set(error, EINVAL,
1311                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1312                                           NULL, "action configuration not set");
1313         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
1314                 return rte_flow_error_set(error, EINVAL,
1315                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1316                                           "can't have encap action before"
1317                                           " modify action");
1318         return 0;
1319 }
1320
1321 /**
1322  * Validate the modify-header MAC address actions.
1323  *
1324  * @param[in] action_flags
1325  *   Holds the actions detected until now.
1326  * @param[in] action
1327  *   Pointer to the modify action.
1328  * @param[in] item_flags
1329  *   Holds the items detected.
1330  * @param[out] error
1331  *   Pointer to error structure.
1332  *
1333  * @return
1334  *   0 on success, a negative errno value otherwise and rte_errno is set.
1335  */
1336 static int
1337 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
1338                                    const struct rte_flow_action *action,
1339                                    const uint64_t item_flags,
1340                                    struct rte_flow_error *error)
1341 {
1342         int ret = 0;
1343
1344         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1345         if (!ret) {
1346                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
1347                         return rte_flow_error_set(error, EINVAL,
1348                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1349                                                   NULL,
1350                                                   "no L2 item in pattern");
1351         }
1352         return ret;
1353 }
1354
1355 /**
1356  * Validate the modify-header IPv4 address actions.
1357  *
1358  * @param[in] action_flags
1359  *   Holds the actions detected until now.
1360  * @param[in] action
1361  *   Pointer to the modify action.
1362  * @param[in] item_flags
1363  *   Holds the items detected.
1364  * @param[out] error
1365  *   Pointer to error structure.
1366  *
1367  * @return
1368  *   0 on success, a negative errno value otherwise and rte_errno is set.
1369  */
1370 static int
1371 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
1372                                     const struct rte_flow_action *action,
1373                                     const uint64_t item_flags,
1374                                     struct rte_flow_error *error)
1375 {
1376         int ret = 0;
1377
1378         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1379         if (!ret) {
1380                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
1381                         return rte_flow_error_set(error, EINVAL,
1382                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1383                                                   NULL,
1384                                                   "no ipv4 item in pattern");
1385         }
1386         return ret;
1387 }
1388
1389 /**
1390  * Validate the modify-header IPv6 address actions.
1391  *
1392  * @param[in] action_flags
1393  *   Holds the actions detected until now.
1394  * @param[in] action
1395  *   Pointer to the modify action.
1396  * @param[in] item_flags
1397  *   Holds the items detected.
1398  * @param[out] error
1399  *   Pointer to error structure.
1400  *
1401  * @return
1402  *   0 on success, a negative errno value otherwise and rte_errno is set.
1403  */
1404 static int
1405 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
1406                                     const struct rte_flow_action *action,
1407                                     const uint64_t item_flags,
1408                                     struct rte_flow_error *error)
1409 {
1410         int ret = 0;
1411
1412         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1413         if (!ret) {
1414                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
1415                         return rte_flow_error_set(error, EINVAL,
1416                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1417                                                   NULL,
1418                                                   "no ipv6 item in pattern");
1419         }
1420         return ret;
1421 }
1422
1423 /**
1424  * Validate the modify-header TP actions.
1425  *
1426  * @param[in] action_flags
1427  *   Holds the actions detected until now.
1428  * @param[in] action
1429  *   Pointer to the modify action.
1430  * @param[in] item_flags
1431  *   Holds the items detected.
1432  * @param[out] error
1433  *   Pointer to error structure.
1434  *
1435  * @return
1436  *   0 on success, a negative errno value otherwise and rte_errno is set.
1437  */
1438 static int
1439 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
1440                                   const struct rte_flow_action *action,
1441                                   const uint64_t item_flags,
1442                                   struct rte_flow_error *error)
1443 {
1444         int ret = 0;
1445
1446         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1447         if (!ret) {
1448                 if (!(item_flags & MLX5_FLOW_LAYER_L4))
1449                         return rte_flow_error_set(error, EINVAL,
1450                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1451                                                   NULL, "no transport layer "
1452                                                   "in pattern");
1453         }
1454         return ret;
1455 }
1456
1457 /**
1458  * Validate the modify-header TTL actions.
1459  *
1460  * @param[in] action_flags
1461  *   Holds the actions detected until now.
1462  * @param[in] action
1463  *   Pointer to the modify action.
1464  * @param[in] item_flags
1465  *   Holds the items detected.
1466  * @param[out] error
1467  *   Pointer to error structure.
1468  *
1469  * @return
1470  *   0 on success, a negative errno value otherwise and rte_errno is set.
1471  */
1472 static int
1473 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
1474                                    const struct rte_flow_action *action,
1475                                    const uint64_t item_flags,
1476                                    struct rte_flow_error *error)
1477 {
1478         int ret = 0;
1479
1480         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1481         if (!ret) {
1482                 if (!(item_flags & MLX5_FLOW_LAYER_L3))
1483                         return rte_flow_error_set(error, EINVAL,
1484                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1485                                                   NULL,
1486                                                   "no IP protocol in pattern");
1487         }
1488         return ret;
1489 }
1490
1491 /**
1492  * Validate jump action.
1493  *
1494  * @param[in] action
1495  *   Pointer to the modify action.
1496  * @param[in] group
1497  *   The group of the current flow.
1498  * @param[out] error
1499  *   Pointer to error structure.
1500  *
1501  * @return
1502  *   0 on success, a negative errno value otherwise and rte_errno is set.
1503  */
1504 static int
1505 flow_dv_validate_action_jump(const struct rte_flow_action *action,
1506                              uint32_t group,
1507                              struct rte_flow_error *error)
1508 {
1509         if (action->type != RTE_FLOW_ACTION_TYPE_JUMP && !action->conf)
1510                 return rte_flow_error_set(error, EINVAL,
1511                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1512                                           NULL, "action configuration not set");
1513         if (group >= ((const struct rte_flow_action_jump *)action->conf)->group)
1514                 return rte_flow_error_set(error, EINVAL,
1515                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1516                                           "target group must be higher then"
1517                                           " the current flow group");
1518         return 0;
1519 }
1520
1521
1522 /**
1523  * Find existing modify-header resource or create and register a new one.
1524  *
1525  * @param dev[in, out]
1526  *   Pointer to rte_eth_dev structure.
1527  * @param[in, out] resource
1528  *   Pointer to modify-header resource.
1529  * @parm[in, out] dev_flow
1530  *   Pointer to the dev_flow.
1531  * @param[out] error
1532  *   pointer to error structure.
1533  *
1534  * @return
1535  *   0 on success otherwise -errno and errno is set.
1536  */
1537 static int
1538 flow_dv_modify_hdr_resource_register
1539                         (struct rte_eth_dev *dev,
1540                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1541                          struct mlx5_flow *dev_flow,
1542                          struct rte_flow_error *error)
1543 {
1544         struct mlx5_priv *priv = dev->data->dev_private;
1545         struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
1546
1547         struct mlx5dv_dr_ns *ns =
1548                 resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX  ?
1549                 priv->tx_ns : priv->rx_ns;
1550
1551         /* Lookup a matching resource from cache. */
1552         LIST_FOREACH(cache_resource, &priv->modify_cmds, next) {
1553                 if (resource->ft_type == cache_resource->ft_type &&
1554                     resource->actions_num == cache_resource->actions_num &&
1555                     !memcmp((const void *)resource->actions,
1556                             (const void *)cache_resource->actions,
1557                             (resource->actions_num *
1558                                             sizeof(resource->actions[0])))) {
1559                         DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
1560                                 (void *)cache_resource,
1561                                 rte_atomic32_read(&cache_resource->refcnt));
1562                         rte_atomic32_inc(&cache_resource->refcnt);
1563                         dev_flow->dv.modify_hdr = cache_resource;
1564                         return 0;
1565                 }
1566         }
1567         /* Register new modify-header resource. */
1568         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1569         if (!cache_resource)
1570                 return rte_flow_error_set(error, ENOMEM,
1571                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1572                                           "cannot allocate resource memory");
1573         *cache_resource = *resource;
1574         cache_resource->verbs_action =
1575                 mlx5_glue->dv_create_flow_action_modify_header
1576                                         (priv->sh->ctx, cache_resource->ft_type,
1577                                          ns, 0,
1578                                          cache_resource->actions_num *
1579                                          sizeof(cache_resource->actions[0]),
1580                                          (uint64_t *)cache_resource->actions);
1581         if (!cache_resource->verbs_action) {
1582                 rte_free(cache_resource);
1583                 return rte_flow_error_set(error, ENOMEM,
1584                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1585                                           NULL, "cannot create action");
1586         }
1587         rte_atomic32_init(&cache_resource->refcnt);
1588         rte_atomic32_inc(&cache_resource->refcnt);
1589         LIST_INSERT_HEAD(&priv->modify_cmds, cache_resource, next);
1590         dev_flow->dv.modify_hdr = cache_resource;
1591         DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
1592                 (void *)cache_resource,
1593                 rte_atomic32_read(&cache_resource->refcnt));
1594         return 0;
1595 }
1596
1597 /**
1598  * Get or create a flow counter.
1599  *
1600  * @param[in] dev
1601  *   Pointer to the Ethernet device structure.
1602  * @param[in] shared
1603  *   Indicate if this counter is shared with other flows.
1604  * @param[in] id
1605  *   Counter identifier.
1606  *
1607  * @return
1608  *   pointer to flow counter on success, NULL otherwise and rte_errno is set.
1609  */
1610 static struct mlx5_flow_counter *
1611 flow_dv_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
1612 {
1613         struct mlx5_priv *priv = dev->data->dev_private;
1614         struct mlx5_flow_counter *cnt = NULL;
1615         struct mlx5_devx_counter_set *dcs = NULL;
1616         int ret;
1617
1618         if (!priv->config.devx) {
1619                 ret = -ENOTSUP;
1620                 goto error_exit;
1621         }
1622         if (shared) {
1623                 LIST_FOREACH(cnt, &priv->flow_counters, next) {
1624                         if (cnt->shared && cnt->id == id) {
1625                                 cnt->ref_cnt++;
1626                                 return cnt;
1627                         }
1628                 }
1629         }
1630         cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
1631         dcs = rte_calloc(__func__, 1, sizeof(*dcs), 0);
1632         if (!dcs || !cnt) {
1633                 ret = -ENOMEM;
1634                 goto error_exit;
1635         }
1636         ret = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, dcs);
1637         if (ret)
1638                 goto error_exit;
1639         struct mlx5_flow_counter tmpl = {
1640                 .shared = shared,
1641                 .ref_cnt = 1,
1642                 .id = id,
1643                 .dcs = dcs,
1644         };
1645         tmpl.action = mlx5_glue->dv_create_flow_action_counter(dcs->obj, 0);
1646         if (!tmpl.action) {
1647                 ret = errno;
1648                 goto error_exit;
1649         }
1650         *cnt = tmpl;
1651         LIST_INSERT_HEAD(&priv->flow_counters, cnt, next);
1652         return cnt;
1653 error_exit:
1654         rte_free(cnt);
1655         rte_free(dcs);
1656         rte_errno = -ret;
1657         return NULL;
1658 }
1659
1660 /**
1661  * Release a flow counter.
1662  *
1663  * @param[in] counter
1664  *   Pointer to the counter handler.
1665  */
1666 static void
1667 flow_dv_counter_release(struct mlx5_flow_counter *counter)
1668 {
1669         int ret;
1670
1671         if (!counter)
1672                 return;
1673         if (--counter->ref_cnt == 0) {
1674                 ret = mlx5_devx_cmd_flow_counter_free(counter->dcs->obj);
1675                 if (ret)
1676                         DRV_LOG(ERR, "Failed to free devx counters, %d", ret);
1677                 LIST_REMOVE(counter, next);
1678                 rte_free(counter->dcs);
1679                 rte_free(counter);
1680         }
1681 }
1682
1683 /**
1684  * Verify the @p attributes will be correctly understood by the NIC and store
1685  * them in the @p flow if everything is correct.
1686  *
1687  * @param[in] dev
1688  *   Pointer to dev struct.
1689  * @param[in] attributes
1690  *   Pointer to flow attributes
1691  * @param[out] error
1692  *   Pointer to error structure.
1693  *
1694  * @return
1695  *   0 on success, a negative errno value otherwise and rte_errno is set.
1696  */
1697 static int
1698 flow_dv_validate_attributes(struct rte_eth_dev *dev,
1699                             const struct rte_flow_attr *attributes,
1700                             struct rte_flow_error *error)
1701 {
1702         struct mlx5_priv *priv = dev->data->dev_private;
1703         uint32_t priority_max = priv->config.flow_prio - 1;
1704
1705 #ifndef HAVE_MLX5DV_DR
1706         if (attributes->group)
1707                 return rte_flow_error_set(error, ENOTSUP,
1708                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1709                                           NULL,
1710                                           "groups is not supported");
1711 #endif
1712         if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
1713             attributes->priority >= priority_max)
1714                 return rte_flow_error_set(error, ENOTSUP,
1715                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1716                                           NULL,
1717                                           "priority out of range");
1718         if (attributes->transfer)
1719                 return rte_flow_error_set(error, ENOTSUP,
1720                                           RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1721                                           NULL,
1722                                           "transfer is not supported");
1723         if (!(attributes->egress ^ attributes->ingress))
1724                 return rte_flow_error_set(error, ENOTSUP,
1725                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1726                                           "must specify exactly one of "
1727                                           "ingress or egress");
1728         return 0;
1729 }
1730
1731 /**
1732  * Internal validation function. For validating both actions and items.
1733  *
1734  * @param[in] dev
1735  *   Pointer to the rte_eth_dev structure.
1736  * @param[in] attr
1737  *   Pointer to the flow attributes.
1738  * @param[in] items
1739  *   Pointer to the list of items.
1740  * @param[in] actions
1741  *   Pointer to the list of actions.
1742  * @param[out] error
1743  *   Pointer to the error structure.
1744  *
1745  * @return
1746  *   0 on success, a negative errno value otherwise and rte_errno is set.
1747  */
1748 static int
1749 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
1750                  const struct rte_flow_item items[],
1751                  const struct rte_flow_action actions[],
1752                  struct rte_flow_error *error)
1753 {
1754         int ret;
1755         uint64_t action_flags = 0;
1756         uint64_t item_flags = 0;
1757         uint64_t last_item = 0;
1758         int tunnel = 0;
1759         uint8_t next_protocol = 0xff;
1760         int actions_n = 0;
1761
1762         if (items == NULL)
1763                 return -1;
1764         ret = flow_dv_validate_attributes(dev, attr, error);
1765         if (ret < 0)
1766                 return ret;
1767         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1768                 tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1769                 switch (items->type) {
1770                 case RTE_FLOW_ITEM_TYPE_VOID:
1771                         break;
1772                 case RTE_FLOW_ITEM_TYPE_ETH:
1773                         ret = mlx5_flow_validate_item_eth(items, item_flags,
1774                                                           error);
1775                         if (ret < 0)
1776                                 return ret;
1777                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1778                                              MLX5_FLOW_LAYER_OUTER_L2;
1779                         break;
1780                 case RTE_FLOW_ITEM_TYPE_VLAN:
1781                         ret = mlx5_flow_validate_item_vlan(items, item_flags,
1782                                                            error);
1783                         if (ret < 0)
1784                                 return ret;
1785                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
1786                                              MLX5_FLOW_LAYER_OUTER_VLAN;
1787                         break;
1788                 case RTE_FLOW_ITEM_TYPE_IPV4:
1789                         ret = mlx5_flow_validate_item_ipv4(items, item_flags,
1790                                                            NULL, error);
1791                         if (ret < 0)
1792                                 return ret;
1793                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1794                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1795                         if (items->mask != NULL &&
1796                             ((const struct rte_flow_item_ipv4 *)
1797                              items->mask)->hdr.next_proto_id) {
1798                                 next_protocol =
1799                                         ((const struct rte_flow_item_ipv4 *)
1800                                          (items->spec))->hdr.next_proto_id;
1801                                 next_protocol &=
1802                                         ((const struct rte_flow_item_ipv4 *)
1803                                          (items->mask))->hdr.next_proto_id;
1804                         } else {
1805                                 /* Reset for inner layer. */
1806                                 next_protocol = 0xff;
1807                         }
1808                         break;
1809                 case RTE_FLOW_ITEM_TYPE_IPV6:
1810                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
1811                                                            NULL, error);
1812                         if (ret < 0)
1813                                 return ret;
1814                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1815                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1816                         if (items->mask != NULL &&
1817                             ((const struct rte_flow_item_ipv6 *)
1818                              items->mask)->hdr.proto) {
1819                                 next_protocol =
1820                                         ((const struct rte_flow_item_ipv6 *)
1821                                          items->spec)->hdr.proto;
1822                                 next_protocol &=
1823                                         ((const struct rte_flow_item_ipv6 *)
1824                                          items->mask)->hdr.proto;
1825                         } else {
1826                                 /* Reset for inner layer. */
1827                                 next_protocol = 0xff;
1828                         }
1829                         break;
1830                 case RTE_FLOW_ITEM_TYPE_TCP:
1831                         ret = mlx5_flow_validate_item_tcp
1832                                                 (items, item_flags,
1833                                                  next_protocol,
1834                                                  &rte_flow_item_tcp_mask,
1835                                                  error);
1836                         if (ret < 0)
1837                                 return ret;
1838                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1839                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
1840                         break;
1841                 case RTE_FLOW_ITEM_TYPE_UDP:
1842                         ret = mlx5_flow_validate_item_udp(items, item_flags,
1843                                                           next_protocol,
1844                                                           error);
1845                         if (ret < 0)
1846                                 return ret;
1847                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1848                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
1849                         break;
1850                 case RTE_FLOW_ITEM_TYPE_GRE:
1851                 case RTE_FLOW_ITEM_TYPE_NVGRE:
1852                         ret = mlx5_flow_validate_item_gre(items, item_flags,
1853                                                           next_protocol, error);
1854                         if (ret < 0)
1855                                 return ret;
1856                         last_item = MLX5_FLOW_LAYER_GRE;
1857                         break;
1858                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1859                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
1860                                                             error);
1861                         if (ret < 0)
1862                                 return ret;
1863                         last_item = MLX5_FLOW_LAYER_VXLAN;
1864                         break;
1865                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1866                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
1867                                                                 item_flags, dev,
1868                                                                 error);
1869                         if (ret < 0)
1870                                 return ret;
1871                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
1872                         break;
1873                 case RTE_FLOW_ITEM_TYPE_MPLS:
1874                         ret = mlx5_flow_validate_item_mpls(dev, items,
1875                                                            item_flags,
1876                                                            last_item, error);
1877                         if (ret < 0)
1878                                 return ret;
1879                         last_item = MLX5_FLOW_LAYER_MPLS;
1880                         break;
1881                 case RTE_FLOW_ITEM_TYPE_META:
1882                         ret = flow_dv_validate_item_meta(dev, items, attr,
1883                                                          error);
1884                         if (ret < 0)
1885                                 return ret;
1886                         last_item = MLX5_FLOW_ITEM_METADATA;
1887                         break;
1888                 default:
1889                         return rte_flow_error_set(error, ENOTSUP,
1890                                                   RTE_FLOW_ERROR_TYPE_ITEM,
1891                                                   NULL, "item not supported");
1892                 }
1893                 item_flags |= last_item;
1894         }
1895         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1896                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
1897                         return rte_flow_error_set(error, ENOTSUP,
1898                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1899                                                   actions, "too many actions");
1900                 switch (actions->type) {
1901                 case RTE_FLOW_ACTION_TYPE_VOID:
1902                         break;
1903                 case RTE_FLOW_ACTION_TYPE_FLAG:
1904                         ret = mlx5_flow_validate_action_flag(action_flags,
1905                                                              attr, error);
1906                         if (ret < 0)
1907                                 return ret;
1908                         action_flags |= MLX5_FLOW_ACTION_FLAG;
1909                         ++actions_n;
1910                         break;
1911                 case RTE_FLOW_ACTION_TYPE_MARK:
1912                         ret = mlx5_flow_validate_action_mark(actions,
1913                                                              action_flags,
1914                                                              attr, error);
1915                         if (ret < 0)
1916                                 return ret;
1917                         action_flags |= MLX5_FLOW_ACTION_MARK;
1918                         ++actions_n;
1919                         break;
1920                 case RTE_FLOW_ACTION_TYPE_DROP:
1921                         ret = mlx5_flow_validate_action_drop(action_flags,
1922                                                              attr, error);
1923                         if (ret < 0)
1924                                 return ret;
1925                         action_flags |= MLX5_FLOW_ACTION_DROP;
1926                         ++actions_n;
1927                         break;
1928                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1929                         ret = mlx5_flow_validate_action_queue(actions,
1930                                                               action_flags, dev,
1931                                                               attr, error);
1932                         if (ret < 0)
1933                                 return ret;
1934                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
1935                         ++actions_n;
1936                         break;
1937                 case RTE_FLOW_ACTION_TYPE_RSS:
1938                         ret = mlx5_flow_validate_action_rss(actions,
1939                                                             action_flags, dev,
1940                                                             attr, error);
1941                         if (ret < 0)
1942                                 return ret;
1943                         action_flags |= MLX5_FLOW_ACTION_RSS;
1944                         ++actions_n;
1945                         break;
1946                 case RTE_FLOW_ACTION_TYPE_COUNT:
1947                         ret = flow_dv_validate_action_count(dev, error);
1948                         if (ret < 0)
1949                                 return ret;
1950                         action_flags |= MLX5_FLOW_ACTION_COUNT;
1951                         ++actions_n;
1952                         break;
1953                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
1954                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
1955                         ret = flow_dv_validate_action_l2_encap(action_flags,
1956                                                                actions, attr,
1957                                                                error);
1958                         if (ret < 0)
1959                                 return ret;
1960                         action_flags |= actions->type ==
1961                                         RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
1962                                         MLX5_FLOW_ACTION_VXLAN_ENCAP :
1963                                         MLX5_FLOW_ACTION_NVGRE_ENCAP;
1964                         ++actions_n;
1965                         break;
1966                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
1967                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
1968                         ret = flow_dv_validate_action_l2_decap(action_flags,
1969                                                                attr, error);
1970                         if (ret < 0)
1971                                 return ret;
1972                         action_flags |= actions->type ==
1973                                         RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
1974                                         MLX5_FLOW_ACTION_VXLAN_DECAP :
1975                                         MLX5_FLOW_ACTION_NVGRE_DECAP;
1976                         ++actions_n;
1977                         break;
1978                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
1979                         ret = flow_dv_validate_action_raw_encap(action_flags,
1980                                                                 actions, attr,
1981                                                                 error);
1982                         if (ret < 0)
1983                                 return ret;
1984                         action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
1985                         ++actions_n;
1986                         break;
1987                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
1988                         ret = flow_dv_validate_action_raw_decap(action_flags,
1989                                                                 actions, attr,
1990                                                                 error);
1991                         if (ret < 0)
1992                                 return ret;
1993                         action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
1994                         ++actions_n;
1995                         break;
1996                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
1997                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
1998                         ret = flow_dv_validate_action_modify_mac(action_flags,
1999                                                                  actions,
2000                                                                  item_flags,
2001                                                                  error);
2002                         if (ret < 0)
2003                                 return ret;
2004                         /* Count all modify-header actions as one action. */
2005                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2006                                 ++actions_n;
2007                         action_flags |= actions->type ==
2008                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
2009                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
2010                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
2011                         break;
2012
2013                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
2014                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
2015                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
2016                                                                   actions,
2017                                                                   item_flags,
2018                                                                   error);
2019                         if (ret < 0)
2020                                 return ret;
2021                         /* Count all modify-header actions as one action. */
2022                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2023                                 ++actions_n;
2024                         action_flags |= actions->type ==
2025                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
2026                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
2027                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
2028                         break;
2029                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
2030                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
2031                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
2032                                                                   actions,
2033                                                                   item_flags,
2034                                                                   error);
2035                         if (ret < 0)
2036                                 return ret;
2037                         /* Count all modify-header actions as one action. */
2038                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2039                                 ++actions_n;
2040                         action_flags |= actions->type ==
2041                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
2042                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
2043                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
2044                         break;
2045                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
2046                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
2047                         ret = flow_dv_validate_action_modify_tp(action_flags,
2048                                                                 actions,
2049                                                                 item_flags,
2050                                                                 error);
2051                         if (ret < 0)
2052                                 return ret;
2053                         /* Count all modify-header actions as one action. */
2054                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2055                                 ++actions_n;
2056                         action_flags |= actions->type ==
2057                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
2058                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
2059                                                 MLX5_FLOW_ACTION_SET_TP_DST;
2060                         break;
2061                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
2062                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
2063                         ret = flow_dv_validate_action_modify_ttl(action_flags,
2064                                                                  actions,
2065                                                                  item_flags,
2066                                                                  error);
2067                         if (ret < 0)
2068                                 return ret;
2069                         /* Count all modify-header actions as one action. */
2070                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2071                                 ++actions_n;
2072                         action_flags |= actions->type ==
2073                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
2074                                                 MLX5_FLOW_ACTION_SET_TTL :
2075                                                 MLX5_FLOW_ACTION_DEC_TTL;
2076                         break;
2077                 case RTE_FLOW_ACTION_TYPE_JUMP:
2078                         ret = flow_dv_validate_action_jump(actions,
2079                                                            attr->group, error);
2080                         if (ret)
2081                                 return ret;
2082                         ++actions_n;
2083                         action_flags |= MLX5_FLOW_ACTION_JUMP;
2084                         break;
2085                 default:
2086                         return rte_flow_error_set(error, ENOTSUP,
2087                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2088                                                   actions,
2089                                                   "action not supported");
2090                 }
2091         }
2092         if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
2093                 return rte_flow_error_set(error, EINVAL,
2094                                           RTE_FLOW_ERROR_TYPE_ACTION, actions,
2095                                           "no fate action is found");
2096         return 0;
2097 }
2098
2099 /**
2100  * Internal preparation function. Allocates the DV flow size,
2101  * this size is constant.
2102  *
2103  * @param[in] attr
2104  *   Pointer to the flow attributes.
2105  * @param[in] items
2106  *   Pointer to the list of items.
2107  * @param[in] actions
2108  *   Pointer to the list of actions.
2109  * @param[out] error
2110  *   Pointer to the error structure.
2111  *
2112  * @return
2113  *   Pointer to mlx5_flow object on success,
2114  *   otherwise NULL and rte_errno is set.
2115  */
2116 static struct mlx5_flow *
2117 flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
2118                 const struct rte_flow_item items[] __rte_unused,
2119                 const struct rte_flow_action actions[] __rte_unused,
2120                 struct rte_flow_error *error)
2121 {
2122         uint32_t size = sizeof(struct mlx5_flow);
2123         struct mlx5_flow *flow;
2124
2125         flow = rte_calloc(__func__, 1, size, 0);
2126         if (!flow) {
2127                 rte_flow_error_set(error, ENOMEM,
2128                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2129                                    "not enough memory to create flow");
2130                 return NULL;
2131         }
2132         flow->dv.value.size = MLX5_ST_SZ_DB(fte_match_param);
2133         return flow;
2134 }
2135
2136 #ifndef NDEBUG
2137 /**
2138  * Sanity check for match mask and value. Similar to check_valid_spec() in
2139  * kernel driver. If unmasked bit is present in value, it returns failure.
2140  *
2141  * @param match_mask
2142  *   pointer to match mask buffer.
2143  * @param match_value
2144  *   pointer to match value buffer.
2145  *
2146  * @return
2147  *   0 if valid, -EINVAL otherwise.
2148  */
2149 static int
2150 flow_dv_check_valid_spec(void *match_mask, void *match_value)
2151 {
2152         uint8_t *m = match_mask;
2153         uint8_t *v = match_value;
2154         unsigned int i;
2155
2156         for (i = 0; i < MLX5_ST_SZ_DB(fte_match_param); ++i) {
2157                 if (v[i] & ~m[i]) {
2158                         DRV_LOG(ERR,
2159                                 "match_value differs from match_criteria"
2160                                 " %p[%u] != %p[%u]",
2161                                 match_value, i, match_mask, i);
2162                         return -EINVAL;
2163                 }
2164         }
2165         return 0;
2166 }
2167 #endif
2168
2169 /**
2170  * Add Ethernet item to matcher and to the value.
2171  *
2172  * @param[in, out] matcher
2173  *   Flow matcher.
2174  * @param[in, out] key
2175  *   Flow matcher value.
2176  * @param[in] item
2177  *   Flow pattern to translate.
2178  * @param[in] inner
2179  *   Item is inner pattern.
2180  */
2181 static void
2182 flow_dv_translate_item_eth(void *matcher, void *key,
2183                            const struct rte_flow_item *item, int inner)
2184 {
2185         const struct rte_flow_item_eth *eth_m = item->mask;
2186         const struct rte_flow_item_eth *eth_v = item->spec;
2187         const struct rte_flow_item_eth nic_mask = {
2188                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
2189                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
2190                 .type = RTE_BE16(0xffff),
2191         };
2192         void *headers_m;
2193         void *headers_v;
2194         char *l24_v;
2195         unsigned int i;
2196
2197         if (!eth_v)
2198                 return;
2199         if (!eth_m)
2200                 eth_m = &nic_mask;
2201         if (inner) {
2202                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2203                                          inner_headers);
2204                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2205         } else {
2206                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2207                                          outer_headers);
2208                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2209         }
2210         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
2211                &eth_m->dst, sizeof(eth_m->dst));
2212         /* The value must be in the range of the mask. */
2213         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
2214         for (i = 0; i < sizeof(eth_m->dst); ++i)
2215                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
2216         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
2217                &eth_m->src, sizeof(eth_m->src));
2218         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
2219         /* The value must be in the range of the mask. */
2220         for (i = 0; i < sizeof(eth_m->dst); ++i)
2221                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
2222         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
2223                  rte_be_to_cpu_16(eth_m->type));
2224         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
2225         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
2226 }
2227
2228 /**
2229  * Add VLAN item to matcher and to the value.
2230  *
2231  * @param[in, out] matcher
2232  *   Flow matcher.
2233  * @param[in, out] key
2234  *   Flow matcher value.
2235  * @param[in] item
2236  *   Flow pattern to translate.
2237  * @param[in] inner
2238  *   Item is inner pattern.
2239  */
2240 static void
2241 flow_dv_translate_item_vlan(void *matcher, void *key,
2242                             const struct rte_flow_item *item,
2243                             int inner)
2244 {
2245         const struct rte_flow_item_vlan *vlan_m = item->mask;
2246         const struct rte_flow_item_vlan *vlan_v = item->spec;
2247         const struct rte_flow_item_vlan nic_mask = {
2248                 .tci = RTE_BE16(0x0fff),
2249                 .inner_type = RTE_BE16(0xffff),
2250         };
2251         void *headers_m;
2252         void *headers_v;
2253         uint16_t tci_m;
2254         uint16_t tci_v;
2255
2256         if (!vlan_v)
2257                 return;
2258         if (!vlan_m)
2259                 vlan_m = &nic_mask;
2260         if (inner) {
2261                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2262                                          inner_headers);
2263                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2264         } else {
2265                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2266                                          outer_headers);
2267                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2268         }
2269         tci_m = rte_be_to_cpu_16(vlan_m->tci);
2270         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
2271         MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
2272         MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
2273         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
2274         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
2275         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
2276         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
2277         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
2278         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
2279 }
2280
2281 /**
2282  * Add IPV4 item to matcher and to the value.
2283  *
2284  * @param[in, out] matcher
2285  *   Flow matcher.
2286  * @param[in, out] key
2287  *   Flow matcher value.
2288  * @param[in] item
2289  *   Flow pattern to translate.
2290  * @param[in] inner
2291  *   Item is inner pattern.
2292  * @param[in] group
2293  *   The group to insert the rule.
2294  */
2295 static void
2296 flow_dv_translate_item_ipv4(void *matcher, void *key,
2297                             const struct rte_flow_item *item,
2298                             int inner, uint32_t group)
2299 {
2300         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
2301         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
2302         const struct rte_flow_item_ipv4 nic_mask = {
2303                 .hdr = {
2304                         .src_addr = RTE_BE32(0xffffffff),
2305                         .dst_addr = RTE_BE32(0xffffffff),
2306                         .type_of_service = 0xff,
2307                         .next_proto_id = 0xff,
2308                 },
2309         };
2310         void *headers_m;
2311         void *headers_v;
2312         char *l24_m;
2313         char *l24_v;
2314         uint8_t tos;
2315
2316         if (inner) {
2317                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2318                                          inner_headers);
2319                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2320         } else {
2321                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2322                                          outer_headers);
2323                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2324         }
2325         if (group == 0)
2326                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
2327         else
2328                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x4);
2329         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
2330         if (!ipv4_v)
2331                 return;
2332         if (!ipv4_m)
2333                 ipv4_m = &nic_mask;
2334         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2335                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
2336         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2337                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
2338         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
2339         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
2340         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2341                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
2342         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2343                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
2344         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
2345         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
2346         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
2347         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
2348                  ipv4_m->hdr.type_of_service);
2349         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
2350         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
2351                  ipv4_m->hdr.type_of_service >> 2);
2352         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
2353         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
2354                  ipv4_m->hdr.next_proto_id);
2355         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2356                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
2357 }
2358
2359 /**
2360  * Add IPV6 item to matcher and to the value.
2361  *
2362  * @param[in, out] matcher
2363  *   Flow matcher.
2364  * @param[in, out] key
2365  *   Flow matcher value.
2366  * @param[in] item
2367  *   Flow pattern to translate.
2368  * @param[in] inner
2369  *   Item is inner pattern.
2370  * @param[in] group
2371  *   The group to insert the rule.
2372  */
2373 static void
2374 flow_dv_translate_item_ipv6(void *matcher, void *key,
2375                             const struct rte_flow_item *item,
2376                             int inner, uint32_t group)
2377 {
2378         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
2379         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
2380         const struct rte_flow_item_ipv6 nic_mask = {
2381                 .hdr = {
2382                         .src_addr =
2383                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
2384                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
2385                         .dst_addr =
2386                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
2387                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
2388                         .vtc_flow = RTE_BE32(0xffffffff),
2389                         .proto = 0xff,
2390                         .hop_limits = 0xff,
2391                 },
2392         };
2393         void *headers_m;
2394         void *headers_v;
2395         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2396         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2397         char *l24_m;
2398         char *l24_v;
2399         uint32_t vtc_m;
2400         uint32_t vtc_v;
2401         int i;
2402         int size;
2403
2404         if (inner) {
2405                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2406                                          inner_headers);
2407                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2408         } else {
2409                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2410                                          outer_headers);
2411                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2412         }
2413         if (group == 0)
2414                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
2415         else
2416                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x6);
2417         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
2418         if (!ipv6_v)
2419                 return;
2420         if (!ipv6_m)
2421                 ipv6_m = &nic_mask;
2422         size = sizeof(ipv6_m->hdr.dst_addr);
2423         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2424                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
2425         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2426                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
2427         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
2428         for (i = 0; i < size; ++i)
2429                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
2430         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2431                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
2432         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2433                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
2434         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
2435         for (i = 0; i < size; ++i)
2436                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
2437         /* TOS. */
2438         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
2439         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
2440         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
2441         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
2442         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
2443         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
2444         /* Label. */
2445         if (inner) {
2446                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
2447                          vtc_m);
2448                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
2449                          vtc_v);
2450         } else {
2451                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
2452                          vtc_m);
2453                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
2454                          vtc_v);
2455         }
2456         /* Protocol. */
2457         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
2458                  ipv6_m->hdr.proto);
2459         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2460                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
2461 }
2462
2463 /**
2464  * Add TCP item to matcher and to the value.
2465  *
2466  * @param[in, out] matcher
2467  *   Flow matcher.
2468  * @param[in, out] key
2469  *   Flow matcher value.
2470  * @param[in] item
2471  *   Flow pattern to translate.
2472  * @param[in] inner
2473  *   Item is inner pattern.
2474  */
2475 static void
2476 flow_dv_translate_item_tcp(void *matcher, void *key,
2477                            const struct rte_flow_item *item,
2478                            int inner)
2479 {
2480         const struct rte_flow_item_tcp *tcp_m = item->mask;
2481         const struct rte_flow_item_tcp *tcp_v = item->spec;
2482         void *headers_m;
2483         void *headers_v;
2484
2485         if (inner) {
2486                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2487                                          inner_headers);
2488                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2489         } else {
2490                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2491                                          outer_headers);
2492                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2493         }
2494         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2495         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
2496         if (!tcp_v)
2497                 return;
2498         if (!tcp_m)
2499                 tcp_m = &rte_flow_item_tcp_mask;
2500         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
2501                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
2502         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
2503                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
2504         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
2505                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
2506         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
2507                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
2508 }
2509
2510 /**
2511  * Add UDP item to matcher and to the value.
2512  *
2513  * @param[in, out] matcher
2514  *   Flow matcher.
2515  * @param[in, out] key
2516  *   Flow matcher value.
2517  * @param[in] item
2518  *   Flow pattern to translate.
2519  * @param[in] inner
2520  *   Item is inner pattern.
2521  */
2522 static void
2523 flow_dv_translate_item_udp(void *matcher, void *key,
2524                            const struct rte_flow_item *item,
2525                            int inner)
2526 {
2527         const struct rte_flow_item_udp *udp_m = item->mask;
2528         const struct rte_flow_item_udp *udp_v = item->spec;
2529         void *headers_m;
2530         void *headers_v;
2531
2532         if (inner) {
2533                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2534                                          inner_headers);
2535                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2536         } else {
2537                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2538                                          outer_headers);
2539                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2540         }
2541         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2542         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
2543         if (!udp_v)
2544                 return;
2545         if (!udp_m)
2546                 udp_m = &rte_flow_item_udp_mask;
2547         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
2548                  rte_be_to_cpu_16(udp_m->hdr.src_port));
2549         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
2550                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
2551         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
2552                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
2553         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
2554                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
2555 }
2556
2557 /**
2558  * Add GRE item to matcher and to the value.
2559  *
2560  * @param[in, out] matcher
2561  *   Flow matcher.
2562  * @param[in, out] key
2563  *   Flow matcher value.
2564  * @param[in] item
2565  *   Flow pattern to translate.
2566  * @param[in] inner
2567  *   Item is inner pattern.
2568  */
2569 static void
2570 flow_dv_translate_item_gre(void *matcher, void *key,
2571                            const struct rte_flow_item *item,
2572                            int inner)
2573 {
2574         const struct rte_flow_item_gre *gre_m = item->mask;
2575         const struct rte_flow_item_gre *gre_v = item->spec;
2576         void *headers_m;
2577         void *headers_v;
2578         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2579         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2580
2581         if (inner) {
2582                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2583                                          inner_headers);
2584                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2585         } else {
2586                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2587                                          outer_headers);
2588                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2589         }
2590         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2591         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
2592         if (!gre_v)
2593                 return;
2594         if (!gre_m)
2595                 gre_m = &rte_flow_item_gre_mask;
2596         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
2597                  rte_be_to_cpu_16(gre_m->protocol));
2598         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
2599                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
2600 }
2601
2602 /**
2603  * Add NVGRE item to matcher and to the value.
2604  *
2605  * @param[in, out] matcher
2606  *   Flow matcher.
2607  * @param[in, out] key
2608  *   Flow matcher value.
2609  * @param[in] item
2610  *   Flow pattern to translate.
2611  * @param[in] inner
2612  *   Item is inner pattern.
2613  */
2614 static void
2615 flow_dv_translate_item_nvgre(void *matcher, void *key,
2616                              const struct rte_flow_item *item,
2617                              int inner)
2618 {
2619         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
2620         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
2621         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2622         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2623         const char *tni_flow_id_m = (const char *)nvgre_m->tni;
2624         const char *tni_flow_id_v = (const char *)nvgre_v->tni;
2625         char *gre_key_m;
2626         char *gre_key_v;
2627         int size;
2628         int i;
2629
2630         flow_dv_translate_item_gre(matcher, key, item, inner);
2631         if (!nvgre_v)
2632                 return;
2633         if (!nvgre_m)
2634                 nvgre_m = &rte_flow_item_nvgre_mask;
2635         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
2636         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
2637         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
2638         memcpy(gre_key_m, tni_flow_id_m, size);
2639         for (i = 0; i < size; ++i)
2640                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
2641 }
2642
2643 /**
2644  * Add VXLAN item to matcher and to the value.
2645  *
2646  * @param[in, out] matcher
2647  *   Flow matcher.
2648  * @param[in, out] key
2649  *   Flow matcher value.
2650  * @param[in] item
2651  *   Flow pattern to translate.
2652  * @param[in] inner
2653  *   Item is inner pattern.
2654  */
2655 static void
2656 flow_dv_translate_item_vxlan(void *matcher, void *key,
2657                              const struct rte_flow_item *item,
2658                              int inner)
2659 {
2660         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
2661         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
2662         void *headers_m;
2663         void *headers_v;
2664         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2665         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2666         char *vni_m;
2667         char *vni_v;
2668         uint16_t dport;
2669         int size;
2670         int i;
2671
2672         if (inner) {
2673                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2674                                          inner_headers);
2675                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2676         } else {
2677                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2678                                          outer_headers);
2679                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2680         }
2681         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
2682                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
2683         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
2684                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
2685                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
2686         }
2687         if (!vxlan_v)
2688                 return;
2689         if (!vxlan_m)
2690                 vxlan_m = &rte_flow_item_vxlan_mask;
2691         size = sizeof(vxlan_m->vni);
2692         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
2693         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
2694         memcpy(vni_m, vxlan_m->vni, size);
2695         for (i = 0; i < size; ++i)
2696                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
2697 }
2698
2699 /**
2700  * Add MPLS item to matcher and to the value.
2701  *
2702  * @param[in, out] matcher
2703  *   Flow matcher.
2704  * @param[in, out] key
2705  *   Flow matcher value.
2706  * @param[in] item
2707  *   Flow pattern to translate.
2708  * @param[in] prev_layer
2709  *   The protocol layer indicated in previous item.
2710  * @param[in] inner
2711  *   Item is inner pattern.
2712  */
2713 static void
2714 flow_dv_translate_item_mpls(void *matcher, void *key,
2715                             const struct rte_flow_item *item,
2716                             uint64_t prev_layer,
2717                             int inner)
2718 {
2719         const uint32_t *in_mpls_m = item->mask;
2720         const uint32_t *in_mpls_v = item->spec;
2721         uint32_t *out_mpls_m = 0;
2722         uint32_t *out_mpls_v = 0;
2723         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2724         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2725         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
2726                                      misc_parameters_2);
2727         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
2728         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
2729         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2730
2731         switch (prev_layer) {
2732         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
2733                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
2734                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
2735                          MLX5_UDP_PORT_MPLS);
2736                 break;
2737         case MLX5_FLOW_LAYER_GRE:
2738                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
2739                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
2740                          ETHER_TYPE_MPLS);
2741                 break;
2742         default:
2743                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2744                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2745                          IPPROTO_MPLS);
2746                 break;
2747         }
2748         if (!in_mpls_v)
2749                 return;
2750         if (!in_mpls_m)
2751                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
2752         switch (prev_layer) {
2753         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
2754                 out_mpls_m =
2755                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
2756                                                  outer_first_mpls_over_udp);
2757                 out_mpls_v =
2758                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
2759                                                  outer_first_mpls_over_udp);
2760                 break;
2761         case MLX5_FLOW_LAYER_GRE:
2762                 out_mpls_m =
2763                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
2764                                                  outer_first_mpls_over_gre);
2765                 out_mpls_v =
2766                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
2767                                                  outer_first_mpls_over_gre);
2768                 break;
2769         default:
2770                 /* Inner MPLS not over GRE is not supported. */
2771                 if (!inner) {
2772                         out_mpls_m =
2773                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
2774                                                          misc2_m,
2775                                                          outer_first_mpls);
2776                         out_mpls_v =
2777                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
2778                                                          misc2_v,
2779                                                          outer_first_mpls);
2780                 }
2781                 break;
2782         }
2783         if (out_mpls_m && out_mpls_v) {
2784                 *out_mpls_m = *in_mpls_m;
2785                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
2786         }
2787 }
2788
2789 /**
2790  * Add META item to matcher
2791  *
2792  * @param[in, out] matcher
2793  *   Flow matcher.
2794  * @param[in, out] key
2795  *   Flow matcher value.
2796  * @param[in] item
2797  *   Flow pattern to translate.
2798  * @param[in] inner
2799  *   Item is inner pattern.
2800  */
2801 static void
2802 flow_dv_translate_item_meta(void *matcher, void *key,
2803                             const struct rte_flow_item *item)
2804 {
2805         const struct rte_flow_item_meta *meta_m;
2806         const struct rte_flow_item_meta *meta_v;
2807         void *misc2_m =
2808                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
2809         void *misc2_v =
2810                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
2811
2812         meta_m = (const void *)item->mask;
2813         if (!meta_m)
2814                 meta_m = &rte_flow_item_meta_mask;
2815         meta_v = (const void *)item->spec;
2816         if (meta_v) {
2817                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a,
2818                          rte_be_to_cpu_32(meta_m->data));
2819                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a,
2820                          rte_be_to_cpu_32(meta_v->data & meta_m->data));
2821         }
2822 }
2823
2824 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
2825
2826 #define HEADER_IS_ZERO(match_criteria, headers)                              \
2827         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
2828                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
2829
2830 /**
2831  * Calculate flow matcher enable bitmap.
2832  *
2833  * @param match_criteria
2834  *   Pointer to flow matcher criteria.
2835  *
2836  * @return
2837  *   Bitmap of enabled fields.
2838  */
2839 static uint8_t
2840 flow_dv_matcher_enable(uint32_t *match_criteria)
2841 {
2842         uint8_t match_criteria_enable;
2843
2844         match_criteria_enable =
2845                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
2846                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
2847         match_criteria_enable |=
2848                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
2849                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
2850         match_criteria_enable |=
2851                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
2852                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
2853         match_criteria_enable |=
2854                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
2855                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
2856 #ifdef HAVE_MLX5DV_DR
2857         match_criteria_enable |=
2858                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
2859                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
2860 #endif
2861         return match_criteria_enable;
2862 }
2863
2864
2865 /**
2866  * Get a flow table.
2867  *
2868  * @param dev[in, out]
2869  *   Pointer to rte_eth_dev structure.
2870  * @param[in] table_id
2871  *   Table id to use.
2872  * @param[in] egress
2873  *   Direction of the table.
2874  * @param[out] error
2875  *   pointer to error structure.
2876  *
2877  * @return
2878  *   Returns tables resource based on the index, NULL in case of failed.
2879  */
2880 static struct mlx5_flow_tbl_resource *
2881 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
2882                          uint32_t table_id, uint8_t egress,
2883                          struct rte_flow_error *error)
2884 {
2885         struct mlx5_priv *priv = dev->data->dev_private;
2886         struct mlx5_flow_tbl_resource *tbl;
2887
2888 #ifdef HAVE_MLX5DV_DR
2889         if (egress) {
2890                 tbl = &priv->tx_tbl[table_id];
2891                 if (!tbl->obj)
2892                         tbl->obj = mlx5_glue->dr_create_flow_tbl
2893                                 (priv->tx_ns, table_id);
2894         } else {
2895                 tbl = &priv->rx_tbl[table_id];
2896                 if (!tbl->obj)
2897                         tbl->obj = mlx5_glue->dr_create_flow_tbl
2898                                 (priv->rx_ns, table_id);
2899         }
2900         if (!tbl->obj) {
2901                 rte_flow_error_set(error, ENOMEM,
2902                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2903                                    NULL, "cannot create table");
2904                 return NULL;
2905         }
2906         rte_atomic32_inc(&tbl->refcnt);
2907         return tbl;
2908 #else
2909         (void)error;
2910         (void)tbl;
2911         if (egress)
2912                 return &priv->tx_tbl[table_id];
2913         else
2914                 return &priv->rx_tbl[table_id];
2915 #endif
2916 }
2917
2918 /**
2919  * Release a flow table.
2920  *
2921  * @param[in] tbl
2922  *   Table resource to be released.
2923  *
2924  * @return
2925  *   Returns 0 if table was released, else return 1;
2926  */
2927 static int
2928 flow_dv_tbl_resource_release(struct mlx5_flow_tbl_resource *tbl)
2929 {
2930         if (!tbl)
2931                 return 0;
2932         if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
2933                 mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
2934                 tbl->obj = NULL;
2935                 return 0;
2936         }
2937         return 1;
2938 }
2939
2940 /**
2941  * Register the flow matcher.
2942  *
2943  * @param dev[in, out]
2944  *   Pointer to rte_eth_dev structure.
2945  * @param[in, out] matcher
2946  *   Pointer to flow matcher.
2947  * @parm[in, out] dev_flow
2948  *   Pointer to the dev_flow.
2949  * @param[out] error
2950  *   pointer to error structure.
2951  *
2952  * @return
2953  *   0 on success otherwise -errno and errno is set.
2954  */
2955 static int
2956 flow_dv_matcher_register(struct rte_eth_dev *dev,
2957                          struct mlx5_flow_dv_matcher *matcher,
2958                          struct mlx5_flow *dev_flow,
2959                          struct rte_flow_error *error)
2960 {
2961         struct mlx5_priv *priv = dev->data->dev_private;
2962         struct mlx5_flow_dv_matcher *cache_matcher;
2963         struct mlx5dv_flow_matcher_attr dv_attr = {
2964                 .type = IBV_FLOW_ATTR_NORMAL,
2965                 .match_mask = (void *)&matcher->mask,
2966         };
2967         struct mlx5_flow_tbl_resource *tbl = NULL;
2968
2969         /* Lookup from cache. */
2970         LIST_FOREACH(cache_matcher, &priv->matchers, next) {
2971                 if (matcher->crc == cache_matcher->crc &&
2972                     matcher->priority == cache_matcher->priority &&
2973                     matcher->egress == cache_matcher->egress &&
2974                     matcher->group == cache_matcher->group &&
2975                     !memcmp((const void *)matcher->mask.buf,
2976                             (const void *)cache_matcher->mask.buf,
2977                             cache_matcher->mask.size)) {
2978                         DRV_LOG(DEBUG,
2979                                 "priority %hd use %s matcher %p: refcnt %d++",
2980                                 cache_matcher->priority,
2981                                 cache_matcher->egress ? "tx" : "rx",
2982                                 (void *)cache_matcher,
2983                                 rte_atomic32_read(&cache_matcher->refcnt));
2984                         rte_atomic32_inc(&cache_matcher->refcnt);
2985                         dev_flow->dv.matcher = cache_matcher;
2986                         return 0;
2987                 }
2988         }
2989         /* Register new matcher. */
2990         cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
2991         if (!cache_matcher)
2992                 return rte_flow_error_set(error, ENOMEM,
2993                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2994                                           "cannot allocate matcher memory");
2995         tbl = flow_dv_tbl_resource_get(dev, matcher->group * MLX5_GROUP_FACTOR,
2996                                        matcher->egress, error);
2997         if (!tbl) {
2998                 rte_free(cache_matcher);
2999                 return rte_flow_error_set(error, ENOMEM,
3000                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3001                                           NULL, "cannot create table");
3002         }
3003         *cache_matcher = *matcher;
3004         dv_attr.match_criteria_enable =
3005                 flow_dv_matcher_enable(cache_matcher->mask.buf);
3006         dv_attr.priority = matcher->priority;
3007         if (matcher->egress)
3008                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
3009         cache_matcher->matcher_object =
3010                 mlx5_glue->dv_create_flow_matcher(priv->sh->ctx, &dv_attr,
3011                                                   tbl->obj);
3012         if (!cache_matcher->matcher_object) {
3013                 rte_free(cache_matcher);
3014 #ifdef HAVE_MLX5DV_DR
3015                 flow_dv_tbl_resource_release(tbl);
3016 #endif
3017                 return rte_flow_error_set(error, ENOMEM,
3018                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3019                                           NULL, "cannot create matcher");
3020         }
3021         rte_atomic32_inc(&cache_matcher->refcnt);
3022         LIST_INSERT_HEAD(&priv->matchers, cache_matcher, next);
3023         dev_flow->dv.matcher = cache_matcher;
3024         DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
3025                 cache_matcher->priority,
3026                 cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
3027                 rte_atomic32_read(&cache_matcher->refcnt));
3028         rte_atomic32_inc(&tbl->refcnt);
3029         return 0;
3030 }
3031
3032 /**
3033  * Add source vport match to the specified matcher.
3034  *
3035  * @param[in, out] matcher
3036  *   Flow matcher.
3037  * @param[in, out] key
3038  *   Flow matcher value.
3039  * @param[in] port
3040  *   Source vport value to match
3041  * @param[in] mask
3042  *   Mask
3043  */
3044 static void
3045 flow_dv_translate_source_vport(void *matcher, void *key,
3046                               int16_t port, uint16_t mask)
3047 {
3048         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3049         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3050
3051         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
3052         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
3053 }
3054
3055 /**
3056  * Find existing tag resource or create and register a new one.
3057  *
3058  * @param dev[in, out]
3059  *   Pointer to rte_eth_dev structure.
3060  * @param[in, out] resource
3061  *   Pointer to tag resource.
3062  * @parm[in, out] dev_flow
3063  *   Pointer to the dev_flow.
3064  * @param[out] error
3065  *   pointer to error structure.
3066  *
3067  * @return
3068  *   0 on success otherwise -errno and errno is set.
3069  */
3070 static int
3071 flow_dv_tag_resource_register
3072                         (struct rte_eth_dev *dev,
3073                          struct mlx5_flow_dv_tag_resource *resource,
3074                          struct mlx5_flow *dev_flow,
3075                          struct rte_flow_error *error)
3076 {
3077         struct mlx5_priv *priv = dev->data->dev_private;
3078         struct mlx5_flow_dv_tag_resource *cache_resource;
3079
3080         /* Lookup a matching resource from cache. */
3081         LIST_FOREACH(cache_resource, &priv->tags, next) {
3082                 if (resource->tag == cache_resource->tag) {
3083                         DRV_LOG(DEBUG, "tag resource %p: refcnt %d++",
3084                                 (void *)cache_resource,
3085                                 rte_atomic32_read(&cache_resource->refcnt));
3086                         rte_atomic32_inc(&cache_resource->refcnt);
3087                         dev_flow->flow->tag_resource = cache_resource;
3088                         return 0;
3089                 }
3090         }
3091         /* Register new  resource. */
3092         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
3093         if (!cache_resource)
3094                 return rte_flow_error_set(error, ENOMEM,
3095                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3096                                           "cannot allocate resource memory");
3097         *cache_resource = *resource;
3098         cache_resource->action = mlx5_glue->dv_create_flow_action_tag
3099                 (resource->tag);
3100         if (!cache_resource->action) {
3101                 rte_free(cache_resource);
3102                 return rte_flow_error_set(error, ENOMEM,
3103                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3104                                           NULL, "cannot create action");
3105         }
3106         rte_atomic32_init(&cache_resource->refcnt);
3107         rte_atomic32_inc(&cache_resource->refcnt);
3108         LIST_INSERT_HEAD(&priv->tags, cache_resource, next);
3109         dev_flow->flow->tag_resource = cache_resource;
3110         DRV_LOG(DEBUG, "new tag resource %p: refcnt %d++",
3111                 (void *)cache_resource,
3112                 rte_atomic32_read(&cache_resource->refcnt));
3113         return 0;
3114 }
3115
3116 /**
3117  * Release the tag.
3118  *
3119  * @param dev
3120  *   Pointer to Ethernet device.
3121  * @param flow
3122  *   Pointer to mlx5_flow.
3123  *
3124  * @return
3125  *   1 while a reference on it exists, 0 when freed.
3126  */
3127 static int
3128 flow_dv_tag_release(struct rte_eth_dev *dev,
3129                     struct mlx5_flow_dv_tag_resource *tag)
3130 {
3131         assert(tag);
3132         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
3133                 dev->data->port_id, (void *)tag,
3134                 rte_atomic32_read(&tag->refcnt));
3135         if (rte_atomic32_dec_and_test(&tag->refcnt)) {
3136                 claim_zero(mlx5_glue->destroy_flow_action(tag->action));
3137                 LIST_REMOVE(tag, next);
3138                 DRV_LOG(DEBUG, "port %u tag %p: removed",
3139                         dev->data->port_id, (void *)tag);
3140                 rte_free(tag);
3141                 return 0;
3142         }
3143         return 1;
3144 }
3145
3146 /**
3147  * Fill the flow with DV spec.
3148  *
3149  * @param[in] dev
3150  *   Pointer to rte_eth_dev structure.
3151  * @param[in, out] dev_flow
3152  *   Pointer to the sub flow.
3153  * @param[in] attr
3154  *   Pointer to the flow attributes.
3155  * @param[in] items
3156  *   Pointer to the list of items.
3157  * @param[in] actions
3158  *   Pointer to the list of actions.
3159  * @param[out] error
3160  *   Pointer to the error structure.
3161  *
3162  * @return
3163  *   0 on success, a negative errno value otherwise and rte_errno is set.
3164  */
3165 static int
3166 flow_dv_translate(struct rte_eth_dev *dev,
3167                   struct mlx5_flow *dev_flow,
3168                   const struct rte_flow_attr *attr,
3169                   const struct rte_flow_item items[],
3170                   const struct rte_flow_action actions[],
3171                   struct rte_flow_error *error)
3172 {
3173         struct mlx5_priv *priv = dev->data->dev_private;
3174         struct rte_flow *flow = dev_flow->flow;
3175         uint64_t item_flags = 0;
3176         uint64_t last_item = 0;
3177         uint64_t action_flags = 0;
3178         uint64_t priority = attr->priority;
3179         struct mlx5_flow_dv_matcher matcher = {
3180                 .mask = {
3181                         .size = sizeof(matcher.mask.buf),
3182                 },
3183         };
3184         int actions_n = 0;
3185         bool actions_end = false;
3186         struct mlx5_flow_dv_modify_hdr_resource res = {
3187                 .ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3188                                           MLX5DV_FLOW_TABLE_TYPE_NIC_RX
3189         };
3190         union flow_dv_attr flow_attr = { .attr = 0 };
3191         struct mlx5_flow_dv_tag_resource tag_resource;
3192
3193         if (priority == MLX5_FLOW_PRIO_RSVD)
3194                 priority = priv->config.flow_prio - 1;
3195         for (; !actions_end ; actions++) {
3196                 const struct rte_flow_action_queue *queue;
3197                 const struct rte_flow_action_rss *rss;
3198                 const struct rte_flow_action *action = actions;
3199                 const struct rte_flow_action_count *count = action->conf;
3200                 const uint8_t *rss_key;
3201                 const struct rte_flow_action_jump *jump_data;
3202                 struct mlx5_flow_dv_jump_tbl_resource jump_tbl_resource;
3203                 struct mlx5_flow_tbl_resource *tbl;
3204
3205                 switch (actions->type) {
3206                 case RTE_FLOW_ACTION_TYPE_VOID:
3207                         break;
3208                 case RTE_FLOW_ACTION_TYPE_FLAG:
3209                         tag_resource.tag =
3210                                 mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
3211                         if (!flow->tag_resource)
3212                                 if (flow_dv_tag_resource_register
3213                                     (dev, &tag_resource, dev_flow, error))
3214                                         return errno;
3215                         dev_flow->dv.actions[actions_n++] =
3216                                 flow->tag_resource->action;
3217                         action_flags |= MLX5_FLOW_ACTION_FLAG;
3218                         break;
3219                 case RTE_FLOW_ACTION_TYPE_MARK:
3220                         tag_resource.tag = mlx5_flow_mark_set
3221                               (((const struct rte_flow_action_mark *)
3222                                (actions->conf))->id);
3223                         if (!flow->tag_resource)
3224                                 if (flow_dv_tag_resource_register
3225                                     (dev, &tag_resource, dev_flow, error))
3226                                         return errno;
3227                         dev_flow->dv.actions[actions_n++] =
3228                                 flow->tag_resource->action;
3229                         action_flags |= MLX5_FLOW_ACTION_MARK;
3230                         break;
3231                 case RTE_FLOW_ACTION_TYPE_DROP:
3232                         action_flags |= MLX5_FLOW_ACTION_DROP;
3233                         break;
3234                 case RTE_FLOW_ACTION_TYPE_QUEUE:
3235                         queue = actions->conf;
3236                         flow->rss.queue_num = 1;
3237                         (*flow->queue)[0] = queue->index;
3238                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
3239                         break;
3240                 case RTE_FLOW_ACTION_TYPE_RSS:
3241                         rss = actions->conf;
3242                         if (flow->queue)
3243                                 memcpy((*flow->queue), rss->queue,
3244                                        rss->queue_num * sizeof(uint16_t));
3245                         flow->rss.queue_num = rss->queue_num;
3246                         /* NULL RSS key indicates default RSS key. */
3247                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
3248                         memcpy(flow->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
3249                         /* RSS type 0 indicates default RSS type ETH_RSS_IP. */
3250                         flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
3251                         flow->rss.level = rss->level;
3252                         action_flags |= MLX5_FLOW_ACTION_RSS;
3253                         break;
3254                 case RTE_FLOW_ACTION_TYPE_COUNT:
3255                         if (!priv->config.devx) {
3256                                 rte_errno = ENOTSUP;
3257                                 goto cnt_err;
3258                         }
3259                         flow->counter = flow_dv_counter_new(dev, count->shared,
3260                                                             count->id);
3261                         if (flow->counter == NULL)
3262                                 goto cnt_err;
3263                         dev_flow->dv.actions[actions_n++] =
3264                                 flow->counter->action;
3265                         action_flags |= MLX5_FLOW_ACTION_COUNT;
3266                         break;
3267 cnt_err:
3268                         if (rte_errno == ENOTSUP)
3269                                 return rte_flow_error_set
3270                                               (error, ENOTSUP,
3271                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3272                                                NULL,
3273                                                "count action not supported");
3274                         else
3275                                 return rte_flow_error_set
3276                                                 (error, rte_errno,
3277                                                  RTE_FLOW_ERROR_TYPE_ACTION,
3278                                                  action,
3279                                                  "cannot create counter"
3280                                                   " object.");
3281                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3282                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3283                         if (flow_dv_create_action_l2_encap(dev, actions,
3284                                                            dev_flow, error))
3285                                 return -rte_errno;
3286                         dev_flow->dv.actions[actions_n++] =
3287                                 dev_flow->dv.encap_decap->verbs_action;
3288                         action_flags |= actions->type ==
3289                                         RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
3290                                         MLX5_FLOW_ACTION_VXLAN_ENCAP :
3291                                         MLX5_FLOW_ACTION_NVGRE_ENCAP;
3292                         break;
3293                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
3294                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
3295                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
3296                                                            error))
3297                                 return -rte_errno;
3298                         dev_flow->dv.actions[actions_n++] =
3299                                 dev_flow->dv.encap_decap->verbs_action;
3300                         action_flags |= actions->type ==
3301                                         RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
3302                                         MLX5_FLOW_ACTION_VXLAN_DECAP :
3303                                         MLX5_FLOW_ACTION_NVGRE_DECAP;
3304                         break;
3305                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3306                         /* Handle encap with preceding decap. */
3307                         if (action_flags & MLX5_FLOW_ACTION_RAW_DECAP) {
3308                                 if (flow_dv_create_action_raw_encap
3309                                         (dev, actions, dev_flow, attr, error))
3310                                         return -rte_errno;
3311                                 dev_flow->dv.actions[actions_n++] =
3312                                         dev_flow->dv.encap_decap->verbs_action;
3313                         } else {
3314                                 /* Handle encap without preceding decap. */
3315                                 if (flow_dv_create_action_l2_encap(dev, actions,
3316                                                                    dev_flow,
3317                                                                    error))
3318                                         return -rte_errno;
3319                                 dev_flow->dv.actions[actions_n++] =
3320                                         dev_flow->dv.encap_decap->verbs_action;
3321                         }
3322                         action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
3323                         break;
3324                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
3325                         /* Check if this decap is followed by encap. */
3326                         for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
3327                                action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
3328                                action++) {
3329                         }
3330                         /* Handle decap only if it isn't followed by encap. */
3331                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
3332                                 if (flow_dv_create_action_l2_decap(dev,
3333                                                                    dev_flow,
3334                                                                    error))
3335                                         return -rte_errno;
3336                                 dev_flow->dv.actions[actions_n++] =
3337                                         dev_flow->dv.encap_decap->verbs_action;
3338                         }
3339                         /* If decap is followed by encap, handle it at encap. */
3340                         action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
3341                         break;
3342                 case RTE_FLOW_ACTION_TYPE_JUMP:
3343                         jump_data = action->conf;
3344                         tbl = flow_dv_tbl_resource_get(dev, jump_data->group *
3345                                                        MLX5_GROUP_FACTOR,
3346                                                        attr->egress, error);
3347                         if (!tbl)
3348                                 return rte_flow_error_set
3349                                                 (error, errno,
3350                                                  RTE_FLOW_ERROR_TYPE_ACTION,
3351                                                  NULL,
3352                                                  "cannot create jump action.");
3353                         jump_tbl_resource.tbl = tbl;
3354                         if (flow_dv_jump_tbl_resource_register
3355                             (dev, &jump_tbl_resource, dev_flow, error)) {
3356                                 flow_dv_tbl_resource_release(tbl);
3357                                 return rte_flow_error_set
3358                                                 (error, errno,
3359                                                  RTE_FLOW_ERROR_TYPE_ACTION,
3360                                                  NULL,
3361                                                  "cannot create jump action.");
3362                         }
3363                         dev_flow->dv.actions[actions_n++] =
3364                                 dev_flow->dv.jump->action;
3365                         action_flags |= MLX5_FLOW_ACTION_JUMP;
3366                         break;
3367                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
3368                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
3369                         if (flow_dv_convert_action_modify_mac(&res, actions,
3370                                                               error))
3371                                 return -rte_errno;
3372                         action_flags |= actions->type ==
3373                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
3374                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
3375                                         MLX5_FLOW_ACTION_SET_MAC_DST;
3376                         break;
3377                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
3378                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
3379                         if (flow_dv_convert_action_modify_ipv4(&res, actions,
3380                                                                error))
3381                                 return -rte_errno;
3382                         action_flags |= actions->type ==
3383                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
3384                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
3385                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
3386                         break;
3387                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
3388                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
3389                         if (flow_dv_convert_action_modify_ipv6(&res, actions,
3390                                                                error))
3391                                 return -rte_errno;
3392                         action_flags |= actions->type ==
3393                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
3394                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
3395                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
3396                         break;
3397                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
3398                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
3399                         if (flow_dv_convert_action_modify_tp(&res, actions,
3400                                                              items, &flow_attr,
3401                                                              error))
3402                                 return -rte_errno;
3403                         action_flags |= actions->type ==
3404                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
3405                                         MLX5_FLOW_ACTION_SET_TP_SRC :
3406                                         MLX5_FLOW_ACTION_SET_TP_DST;
3407                         break;
3408                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
3409                         if (flow_dv_convert_action_modify_dec_ttl(&res, items,
3410                                                                   &flow_attr,
3411                                                                   error))
3412                                 return -rte_errno;
3413                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
3414                         break;
3415                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
3416                         if (flow_dv_convert_action_modify_ttl(&res, actions,
3417                                                              items, &flow_attr,
3418                                                              error))
3419                                 return -rte_errno;
3420                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
3421                         break;
3422                 case RTE_FLOW_ACTION_TYPE_END:
3423                         actions_end = true;
3424                         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) {
3425                                 /* create modify action if needed. */
3426                                 if (flow_dv_modify_hdr_resource_register
3427                                                                 (dev, &res,
3428                                                                  dev_flow,
3429                                                                  error))
3430                                         return -rte_errno;
3431                                 dev_flow->dv.actions[actions_n++] =
3432                                         dev_flow->dv.modify_hdr->verbs_action;
3433                         }
3434                         break;
3435                 default:
3436                         break;
3437                 }
3438         }
3439         dev_flow->dv.actions_n = actions_n;
3440         flow->actions = action_flags;
3441         if (attr->ingress && !attr->transfer &&
3442             (priv->representor || priv->master)) {
3443                 /* It was validated - we support unidirection flows only. */
3444                 assert(!attr->egress);
3445                 /*
3446                  * Add matching on source vport index only
3447                  * for ingress rules in E-Switch configurations.
3448                  */
3449                 flow_dv_translate_source_vport(matcher.mask.buf,
3450                                                dev_flow->dv.value.buf,
3451                                                priv->vport_id,
3452                                                0xffff);
3453         }
3454         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3455                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
3456                 void *match_mask = matcher.mask.buf;
3457                 void *match_value = dev_flow->dv.value.buf;
3458
3459                 switch (items->type) {
3460                 case RTE_FLOW_ITEM_TYPE_ETH:
3461                         flow_dv_translate_item_eth(match_mask, match_value,
3462                                                    items, tunnel);
3463                         matcher.priority = MLX5_PRIORITY_MAP_L2;
3464                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
3465                                              MLX5_FLOW_LAYER_OUTER_L2;
3466                         break;
3467                 case RTE_FLOW_ITEM_TYPE_VLAN:
3468                         flow_dv_translate_item_vlan(match_mask, match_value,
3469                                                     items, tunnel);
3470                         matcher.priority = MLX5_PRIORITY_MAP_L2;
3471                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
3472                                               MLX5_FLOW_LAYER_INNER_VLAN) :
3473                                              (MLX5_FLOW_LAYER_OUTER_L2 |
3474                                               MLX5_FLOW_LAYER_OUTER_VLAN);
3475                         break;
3476                 case RTE_FLOW_ITEM_TYPE_IPV4:
3477                         flow_dv_translate_item_ipv4(match_mask, match_value,
3478                                                     items, tunnel, attr->group);
3479                         matcher.priority = MLX5_PRIORITY_MAP_L3;
3480                         dev_flow->dv.hash_fields |=
3481                                 mlx5_flow_hashfields_adjust
3482                                         (dev_flow, tunnel,
3483                                          MLX5_IPV4_LAYER_TYPES,
3484                                          MLX5_IPV4_IBV_RX_HASH);
3485                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
3486                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3487                         break;
3488                 case RTE_FLOW_ITEM_TYPE_IPV6:
3489                         flow_dv_translate_item_ipv6(match_mask, match_value,
3490                                                     items, tunnel, attr->group);
3491                         matcher.priority = MLX5_PRIORITY_MAP_L3;
3492                         dev_flow->dv.hash_fields |=
3493                                 mlx5_flow_hashfields_adjust
3494                                         (dev_flow, tunnel,
3495                                          MLX5_IPV6_LAYER_TYPES,
3496                                          MLX5_IPV6_IBV_RX_HASH);
3497                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
3498                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3499                         break;
3500                 case RTE_FLOW_ITEM_TYPE_TCP:
3501                         flow_dv_translate_item_tcp(match_mask, match_value,
3502                                                    items, tunnel);
3503                         matcher.priority = MLX5_PRIORITY_MAP_L4;
3504                         dev_flow->dv.hash_fields |=
3505                                 mlx5_flow_hashfields_adjust
3506                                         (dev_flow, tunnel, ETH_RSS_TCP,
3507                                          IBV_RX_HASH_SRC_PORT_TCP |
3508                                          IBV_RX_HASH_DST_PORT_TCP);
3509                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
3510                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
3511                         break;
3512                 case RTE_FLOW_ITEM_TYPE_UDP:
3513                         flow_dv_translate_item_udp(match_mask, match_value,
3514                                                    items, tunnel);
3515                         matcher.priority = MLX5_PRIORITY_MAP_L4;
3516                         dev_flow->dv.hash_fields |=
3517                                 mlx5_flow_hashfields_adjust
3518                                         (dev_flow, tunnel, ETH_RSS_UDP,
3519                                          IBV_RX_HASH_SRC_PORT_UDP |
3520                                          IBV_RX_HASH_DST_PORT_UDP);
3521                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
3522                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
3523                         break;
3524                 case RTE_FLOW_ITEM_TYPE_GRE:
3525                         flow_dv_translate_item_gre(match_mask, match_value,
3526                                                    items, tunnel);
3527                         last_item = MLX5_FLOW_LAYER_GRE;
3528                         break;
3529                 case RTE_FLOW_ITEM_TYPE_NVGRE:
3530                         flow_dv_translate_item_nvgre(match_mask, match_value,
3531                                                      items, tunnel);
3532                         last_item = MLX5_FLOW_LAYER_GRE;
3533                         break;
3534                 case RTE_FLOW_ITEM_TYPE_VXLAN:
3535                         flow_dv_translate_item_vxlan(match_mask, match_value,
3536                                                      items, tunnel);
3537                         last_item = MLX5_FLOW_LAYER_VXLAN;
3538                         break;
3539                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3540                         flow_dv_translate_item_vxlan(match_mask, match_value,
3541                                                      items, tunnel);
3542                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
3543                         break;
3544                 case RTE_FLOW_ITEM_TYPE_MPLS:
3545                         flow_dv_translate_item_mpls(match_mask, match_value,
3546                                                     items, last_item, tunnel);
3547                         last_item = MLX5_FLOW_LAYER_MPLS;
3548                         break;
3549                 case RTE_FLOW_ITEM_TYPE_META:
3550                         flow_dv_translate_item_meta(match_mask, match_value,
3551                                                     items);
3552                         last_item = MLX5_FLOW_ITEM_METADATA;
3553                         break;
3554                 default:
3555                         break;
3556                 }
3557                 item_flags |= last_item;
3558         }
3559         assert(!flow_dv_check_valid_spec(matcher.mask.buf,
3560                                          dev_flow->dv.value.buf));
3561         dev_flow->layers = item_flags;
3562         /* Register matcher. */
3563         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
3564                                     matcher.mask.size);
3565         matcher.priority = mlx5_flow_adjust_priority(dev, priority,
3566                                                      matcher.priority);
3567         matcher.egress = attr->egress;
3568         matcher.group = attr->group;
3569         if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
3570                 return -rte_errno;
3571         return 0;
3572 }
3573
3574 /**
3575  * Apply the flow to the NIC.
3576  *
3577  * @param[in] dev
3578  *   Pointer to the Ethernet device structure.
3579  * @param[in, out] flow
3580  *   Pointer to flow structure.
3581  * @param[out] error
3582  *   Pointer to error structure.
3583  *
3584  * @return
3585  *   0 on success, a negative errno value otherwise and rte_errno is set.
3586  */
3587 static int
3588 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
3589               struct rte_flow_error *error)
3590 {
3591         struct mlx5_flow_dv *dv;
3592         struct mlx5_flow *dev_flow;
3593         int n;
3594         int err;
3595
3596         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
3597                 dv = &dev_flow->dv;
3598                 n = dv->actions_n;
3599                 if (flow->actions & MLX5_FLOW_ACTION_DROP) {
3600                         dv->hrxq = mlx5_hrxq_drop_new(dev);
3601                         if (!dv->hrxq) {
3602                                 rte_flow_error_set
3603                                         (error, errno,
3604                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3605                                          "cannot get drop hash queue");
3606                                 goto error;
3607                         }
3608                         dv->actions[n++] =
3609                                 mlx5_glue->dv_create_flow_action_dest_ibv_qp
3610                                 (dv->hrxq->qp);
3611                 } else if (flow->actions &
3612                            (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
3613                         struct mlx5_hrxq *hrxq;
3614
3615                         hrxq = mlx5_hrxq_get(dev, flow->key,
3616                                              MLX5_RSS_HASH_KEY_LEN,
3617                                              dv->hash_fields,
3618                                              (*flow->queue),
3619                                              flow->rss.queue_num);
3620                         if (!hrxq)
3621                                 hrxq = mlx5_hrxq_new
3622                                         (dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
3623                                          dv->hash_fields, (*flow->queue),
3624                                          flow->rss.queue_num,
3625                                          !!(dev_flow->layers &
3626                                             MLX5_FLOW_LAYER_TUNNEL));
3627                         if (!hrxq) {
3628                                 rte_flow_error_set
3629                                         (error, rte_errno,
3630                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3631                                          "cannot get hash queue");
3632                                 goto error;
3633                         }
3634                         dv->hrxq = hrxq;
3635                         dv->actions[n++] =
3636                                 mlx5_glue->dv_create_flow_action_dest_ibv_qp
3637                                 (dv->hrxq->qp);
3638                 }
3639                 dv->flow =
3640                         mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
3641                                                   (void *)&dv->value, n,
3642                                                   dv->actions);
3643                 if (!dv->flow) {
3644                         rte_flow_error_set(error, errno,
3645                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3646                                            NULL,
3647                                            "hardware refuses to create flow");
3648                         goto error;
3649                 }
3650         }
3651         return 0;
3652 error:
3653         err = rte_errno; /* Save rte_errno before cleanup. */
3654         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
3655                 struct mlx5_flow_dv *dv = &dev_flow->dv;
3656                 if (dv->hrxq) {
3657                         if (flow->actions & MLX5_FLOW_ACTION_DROP)
3658                                 mlx5_hrxq_drop_release(dev);
3659                         else
3660                                 mlx5_hrxq_release(dev, dv->hrxq);
3661                         dv->hrxq = NULL;
3662                 }
3663         }
3664         rte_errno = err; /* Restore rte_errno. */
3665         return -rte_errno;
3666 }
3667
3668 /**
3669  * Release the flow matcher.
3670  *
3671  * @param dev
3672  *   Pointer to Ethernet device.
3673  * @param flow
3674  *   Pointer to mlx5_flow.
3675  *
3676  * @return
3677  *   1 while a reference on it exists, 0 when freed.
3678  */
3679 static int
3680 flow_dv_matcher_release(struct rte_eth_dev *dev,
3681                         struct mlx5_flow *flow)
3682 {
3683         struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
3684         struct mlx5_priv *priv = dev->data->dev_private;
3685         struct mlx5_flow_tbl_resource *tbl;
3686
3687         assert(matcher->matcher_object);
3688         DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
3689                 dev->data->port_id, (void *)matcher,
3690                 rte_atomic32_read(&matcher->refcnt));
3691         if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
3692                 claim_zero(mlx5_glue->dv_destroy_flow_matcher
3693                            (matcher->matcher_object));
3694                 LIST_REMOVE(matcher, next);
3695                 if (matcher->egress)
3696                         tbl = &priv->tx_tbl[matcher->group];
3697                 else
3698                         tbl = &priv->rx_tbl[matcher->group];
3699                 flow_dv_tbl_resource_release(tbl);
3700                 rte_free(matcher);
3701                 DRV_LOG(DEBUG, "port %u matcher %p: removed",
3702                         dev->data->port_id, (void *)matcher);
3703                 return 0;
3704         }
3705         return 1;
3706 }
3707
3708 /**
3709  * Release an encap/decap resource.
3710  *
3711  * @param flow
3712  *   Pointer to mlx5_flow.
3713  *
3714  * @return
3715  *   1 while a reference on it exists, 0 when freed.
3716  */
3717 static int
3718 flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
3719 {
3720         struct mlx5_flow_dv_encap_decap_resource *cache_resource =
3721                                                 flow->dv.encap_decap;
3722
3723         assert(cache_resource->verbs_action);
3724         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
3725                 (void *)cache_resource,
3726                 rte_atomic32_read(&cache_resource->refcnt));
3727         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
3728                 claim_zero(mlx5_glue->destroy_flow_action
3729                                 (cache_resource->verbs_action));
3730                 LIST_REMOVE(cache_resource, next);
3731                 rte_free(cache_resource);
3732                 DRV_LOG(DEBUG, "encap/decap resource %p: removed",
3733                         (void *)cache_resource);
3734                 return 0;
3735         }
3736         return 1;
3737 }
3738
3739 /**
3740  * Release an jump to table action resource.
3741  *
3742  * @param flow
3743  *   Pointer to mlx5_flow.
3744  *
3745  * @return
3746  *   1 while a reference on it exists, 0 when freed.
3747  */
3748 static int
3749 flow_dv_jump_tbl_resource_release(struct mlx5_flow *flow)
3750 {
3751         struct mlx5_flow_dv_jump_tbl_resource *cache_resource =
3752                                                 flow->dv.jump;
3753
3754         assert(cache_resource->action);
3755         DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--",
3756                 (void *)cache_resource,
3757                 rte_atomic32_read(&cache_resource->refcnt));
3758         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
3759                 claim_zero(mlx5_glue->destroy_flow_action
3760                                 (cache_resource->action));
3761                 LIST_REMOVE(cache_resource, next);
3762                 flow_dv_tbl_resource_release(cache_resource->tbl);
3763                 rte_free(cache_resource);
3764                 DRV_LOG(DEBUG, "jump table resource %p: removed",
3765                         (void *)cache_resource);
3766                 return 0;
3767         }
3768         return 1;
3769 }
3770
3771 /**
3772  * Release a modify-header resource.
3773  *
3774  * @param flow
3775  *   Pointer to mlx5_flow.
3776  *
3777  * @return
3778  *   1 while a reference on it exists, 0 when freed.
3779  */
3780 static int
3781 flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
3782 {
3783         struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
3784                                                 flow->dv.modify_hdr;
3785
3786         assert(cache_resource->verbs_action);
3787         DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
3788                 (void *)cache_resource,
3789                 rte_atomic32_read(&cache_resource->refcnt));
3790         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
3791                 claim_zero(mlx5_glue->destroy_flow_action
3792                                 (cache_resource->verbs_action));
3793                 LIST_REMOVE(cache_resource, next);
3794                 rte_free(cache_resource);
3795                 DRV_LOG(DEBUG, "modify-header resource %p: removed",
3796                         (void *)cache_resource);
3797                 return 0;
3798         }
3799         return 1;
3800 }
3801
3802 /**
3803  * Remove the flow from the NIC but keeps it in memory.
3804  *
3805  * @param[in] dev
3806  *   Pointer to Ethernet device.
3807  * @param[in, out] flow
3808  *   Pointer to flow structure.
3809  */
3810 static void
3811 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
3812 {
3813         struct mlx5_flow_dv *dv;
3814         struct mlx5_flow *dev_flow;
3815
3816         if (!flow)
3817                 return;
3818         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
3819                 dv = &dev_flow->dv;
3820                 if (dv->flow) {
3821                         claim_zero(mlx5_glue->dv_destroy_flow(dv->flow));
3822                         dv->flow = NULL;
3823                 }
3824                 if (dv->hrxq) {
3825                         if (flow->actions & MLX5_FLOW_ACTION_DROP)
3826                                 mlx5_hrxq_drop_release(dev);
3827                         else
3828                                 mlx5_hrxq_release(dev, dv->hrxq);
3829                         dv->hrxq = NULL;
3830                 }
3831         }
3832 }
3833
3834 /**
3835  * Remove the flow from the NIC and the memory.
3836  *
3837  * @param[in] dev
3838  *   Pointer to the Ethernet device structure.
3839  * @param[in, out] flow
3840  *   Pointer to flow structure.
3841  */
3842 static void
3843 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
3844 {
3845         struct mlx5_flow *dev_flow;
3846
3847         if (!flow)
3848                 return;
3849         flow_dv_remove(dev, flow);
3850         if (flow->counter) {
3851                 flow_dv_counter_release(flow->counter);
3852                 flow->counter = NULL;
3853         }
3854         if (flow->tag_resource) {
3855                 flow_dv_tag_release(dev, flow->tag_resource);
3856                 flow->tag_resource = NULL;
3857         }
3858         while (!LIST_EMPTY(&flow->dev_flows)) {
3859                 dev_flow = LIST_FIRST(&flow->dev_flows);
3860                 LIST_REMOVE(dev_flow, next);
3861                 if (dev_flow->dv.matcher)
3862                         flow_dv_matcher_release(dev, dev_flow);
3863                 if (dev_flow->dv.encap_decap)
3864                         flow_dv_encap_decap_resource_release(dev_flow);
3865                 if (dev_flow->dv.modify_hdr)
3866                         flow_dv_modify_hdr_resource_release(dev_flow);
3867                 if (dev_flow->dv.jump)
3868                         flow_dv_jump_tbl_resource_release(dev_flow);
3869                 rte_free(dev_flow);
3870         }
3871 }
3872
3873 /**
3874  * Query a dv flow  rule for its statistics via devx.
3875  *
3876  * @param[in] dev
3877  *   Pointer to Ethernet device.
3878  * @param[in] flow
3879  *   Pointer to the sub flow.
3880  * @param[out] data
3881  *   data retrieved by the query.
3882  * @param[out] error
3883  *   Perform verbose error reporting if not NULL.
3884  *
3885  * @return
3886  *   0 on success, a negative errno value otherwise and rte_errno is set.
3887  */
3888 static int
3889 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
3890                     void *data, struct rte_flow_error *error)
3891 {
3892         struct mlx5_priv *priv = dev->data->dev_private;
3893         struct rte_flow_query_count *qc = data;
3894         uint64_t pkts = 0;
3895         uint64_t bytes = 0;
3896         int err;
3897
3898         if (!priv->config.devx)
3899                 return rte_flow_error_set(error, ENOTSUP,
3900                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3901                                           NULL,
3902                                           "counters are not supported");
3903         if (flow->counter) {
3904                 err = mlx5_devx_cmd_flow_counter_query
3905                                                 (flow->counter->dcs,
3906                                                  qc->reset, &pkts, &bytes);
3907                 if (err)
3908                         return rte_flow_error_set
3909                                 (error, err,
3910                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3911                                  NULL,
3912                                  "cannot read counters");
3913                 qc->hits_set = 1;
3914                 qc->bytes_set = 1;
3915                 qc->hits = pkts - flow->counter->hits;
3916                 qc->bytes = bytes - flow->counter->bytes;
3917                 if (qc->reset) {
3918                         flow->counter->hits = pkts;
3919                         flow->counter->bytes = bytes;
3920                 }
3921                 return 0;
3922         }
3923         return rte_flow_error_set(error, EINVAL,
3924                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3925                                   NULL,
3926                                   "counters are not available");
3927 }
3928
3929 /**
3930  * Query a flow.
3931  *
3932  * @see rte_flow_query()
3933  * @see rte_flow_ops
3934  */
3935 static int
3936 flow_dv_query(struct rte_eth_dev *dev,
3937               struct rte_flow *flow __rte_unused,
3938               const struct rte_flow_action *actions __rte_unused,
3939               void *data __rte_unused,
3940               struct rte_flow_error *error __rte_unused)
3941 {
3942         int ret = -EINVAL;
3943
3944         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3945                 switch (actions->type) {
3946                 case RTE_FLOW_ACTION_TYPE_VOID:
3947                         break;
3948                 case RTE_FLOW_ACTION_TYPE_COUNT:
3949                         ret = flow_dv_query_count(dev, flow, data, error);
3950                         break;
3951                 default:
3952                         return rte_flow_error_set(error, ENOTSUP,
3953                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3954                                                   actions,
3955                                                   "action not supported");
3956                 }
3957         }
3958         return ret;
3959 }
3960
3961
3962 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
3963         .validate = flow_dv_validate,
3964         .prepare = flow_dv_prepare,
3965         .translate = flow_dv_translate,
3966         .apply = flow_dv_apply,
3967         .remove = flow_dv_remove,
3968         .destroy = flow_dv_destroy,
3969         .query = flow_dv_query,
3970 };
3971
3972 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */