net/qede: fix odd number of queues usage in 100G mode
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 /* Verbs header. */
12 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
13 #ifdef PEDANTIC
14 #pragma GCC diagnostic ignored "-Wpedantic"
15 #endif
16 #include <infiniband/verbs.h>
17 #ifdef PEDANTIC
18 #pragma GCC diagnostic error "-Wpedantic"
19 #endif
20
21 #include <rte_common.h>
22 #include <rte_ether.h>
23 #include <rte_ethdev_driver.h>
24 #include <rte_flow.h>
25 #include <rte_flow_driver.h>
26 #include <rte_malloc.h>
27 #include <rte_ip.h>
28 #include <rte_gre.h>
29
30 #include "mlx5.h"
31 #include "mlx5_defs.h"
32 #include "mlx5_glue.h"
33 #include "mlx5_flow.h"
34 #include "mlx5_prm.h"
35 #include "mlx5_rxtx.h"
36
37 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
38
39 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
40 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
41 #endif
42
43 #ifndef HAVE_MLX5DV_DR_ESWITCH
44 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
45 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
46 #endif
47 #endif
48
49 #ifndef HAVE_MLX5DV_DR
50 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
51 #endif
52
53 /* VLAN header definitions */
54 #define MLX5DV_FLOW_VLAN_PCP_SHIFT 13
55 #define MLX5DV_FLOW_VLAN_PCP_MASK (0x7 << MLX5DV_FLOW_VLAN_PCP_SHIFT)
56 #define MLX5DV_FLOW_VLAN_VID_MASK 0x0fff
57 #define MLX5DV_FLOW_VLAN_PCP_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK)
58 #define MLX5DV_FLOW_VLAN_VID_MASK_BE RTE_BE16(MLX5DV_FLOW_VLAN_VID_MASK)
59
60 union flow_dv_attr {
61         struct {
62                 uint32_t valid:1;
63                 uint32_t ipv4:1;
64                 uint32_t ipv6:1;
65                 uint32_t tcp:1;
66                 uint32_t udp:1;
67                 uint32_t reserved:27;
68         };
69         uint32_t attr;
70 };
71
72 /**
73  * Initialize flow attributes structure according to flow items' types.
74  *
75  * @param[in] item
76  *   Pointer to item specification.
77  * @param[out] attr
78  *   Pointer to flow attributes structure.
79  */
80 static void
81 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr)
82 {
83         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
84                 switch (item->type) {
85                 case RTE_FLOW_ITEM_TYPE_IPV4:
86                         attr->ipv4 = 1;
87                         break;
88                 case RTE_FLOW_ITEM_TYPE_IPV6:
89                         attr->ipv6 = 1;
90                         break;
91                 case RTE_FLOW_ITEM_TYPE_UDP:
92                         attr->udp = 1;
93                         break;
94                 case RTE_FLOW_ITEM_TYPE_TCP:
95                         attr->tcp = 1;
96                         break;
97                 default:
98                         break;
99                 }
100         }
101         attr->valid = 1;
102 }
103
104 struct field_modify_info {
105         uint32_t size; /* Size of field in protocol header, in bytes. */
106         uint32_t offset; /* Offset of field in protocol header, in bytes. */
107         enum mlx5_modification_field id;
108 };
109
110 struct field_modify_info modify_eth[] = {
111         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
112         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
113         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
114         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
115         {0, 0, 0},
116 };
117
118 struct field_modify_info modify_vlan_out_first_vid[] = {
119         /* Size in bits !!! */
120         {12, 0, MLX5_MODI_OUT_FIRST_VID},
121         {0, 0, 0},
122 };
123
124 struct field_modify_info modify_ipv4[] = {
125         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
126         {4, 12, MLX5_MODI_OUT_SIPV4},
127         {4, 16, MLX5_MODI_OUT_DIPV4},
128         {0, 0, 0},
129 };
130
131 struct field_modify_info modify_ipv6[] = {
132         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
133         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
134         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
135         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
136         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
137         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
138         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
139         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
140         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
141         {0, 0, 0},
142 };
143
144 struct field_modify_info modify_udp[] = {
145         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
146         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
147         {0, 0, 0},
148 };
149
150 struct field_modify_info modify_tcp[] = {
151         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
152         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
153         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
154         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
155         {0, 0, 0},
156 };
157
158 static void
159 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item __rte_unused,
160                           uint8_t next_protocol, uint64_t *item_flags,
161                           int *tunnel)
162 {
163         assert(item->type == RTE_FLOW_ITEM_TYPE_IPV4 ||
164                item->type == RTE_FLOW_ITEM_TYPE_IPV6);
165         if (next_protocol == IPPROTO_IPIP) {
166                 *item_flags |= MLX5_FLOW_LAYER_IPIP;
167                 *tunnel = 1;
168         }
169         if (next_protocol == IPPROTO_IPV6) {
170                 *item_flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
171                 *tunnel = 1;
172         }
173 }
174
175 /**
176  * Acquire the synchronizing object to protect multithreaded access
177  * to shared dv context. Lock occurs only if context is actually
178  * shared, i.e. we have multiport IB device and representors are
179  * created.
180  *
181  * @param[in] dev
182  *   Pointer to the rte_eth_dev structure.
183  */
184 static void
185 flow_d_shared_lock(struct rte_eth_dev *dev)
186 {
187         struct mlx5_priv *priv = dev->data->dev_private;
188         struct mlx5_ibv_shared *sh = priv->sh;
189
190         if (sh->dv_refcnt > 1) {
191                 int ret;
192
193                 ret = pthread_mutex_lock(&sh->dv_mutex);
194                 assert(!ret);
195                 (void)ret;
196         }
197 }
198
199 static void
200 flow_d_shared_unlock(struct rte_eth_dev *dev)
201 {
202         struct mlx5_priv *priv = dev->data->dev_private;
203         struct mlx5_ibv_shared *sh = priv->sh;
204
205         if (sh->dv_refcnt > 1) {
206                 int ret;
207
208                 ret = pthread_mutex_unlock(&sh->dv_mutex);
209                 assert(!ret);
210                 (void)ret;
211         }
212 }
213
214 /**
215  * Convert modify-header action to DV specification.
216  *
217  * @param[in] item
218  *   Pointer to item specification.
219  * @param[in] field
220  *   Pointer to field modification information.
221  * @param[in,out] resource
222  *   Pointer to the modify-header resource.
223  * @param[in] type
224  *   Type of modification.
225  * @param[out] error
226  *   Pointer to the error structure.
227  *
228  * @return
229  *   0 on success, a negative errno value otherwise and rte_errno is set.
230  */
231 static int
232 flow_dv_convert_modify_action(struct rte_flow_item *item,
233                               struct field_modify_info *field,
234                               struct mlx5_flow_dv_modify_hdr_resource *resource,
235                               uint32_t type,
236                               struct rte_flow_error *error)
237 {
238         uint32_t i = resource->actions_num;
239         struct mlx5_modification_cmd *actions = resource->actions;
240         const uint8_t *spec = item->spec;
241         const uint8_t *mask = item->mask;
242         uint32_t set;
243
244         while (field->size) {
245                 set = 0;
246                 /* Generate modify command for each mask segment. */
247                 memcpy(&set, &mask[field->offset], field->size);
248                 if (set) {
249                         if (i >= MLX5_MODIFY_NUM)
250                                 return rte_flow_error_set(error, EINVAL,
251                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
252                                          "too many items to modify");
253                         actions[i].action_type = type;
254                         actions[i].field = field->id;
255                         actions[i].length = field->size ==
256                                         4 ? 0 : field->size * 8;
257                         rte_memcpy(&actions[i].data[4 - field->size],
258                                    &spec[field->offset], field->size);
259                         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
260                         ++i;
261                 }
262                 if (resource->actions_num != i)
263                         resource->actions_num = i;
264                 field++;
265         }
266         if (!resource->actions_num)
267                 return rte_flow_error_set(error, EINVAL,
268                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
269                                           "invalid modification flow item");
270         return 0;
271 }
272
273 /**
274  * Convert modify-header set IPv4 address action to DV specification.
275  *
276  * @param[in,out] resource
277  *   Pointer to the modify-header resource.
278  * @param[in] action
279  *   Pointer to action specification.
280  * @param[out] error
281  *   Pointer to the error structure.
282  *
283  * @return
284  *   0 on success, a negative errno value otherwise and rte_errno is set.
285  */
286 static int
287 flow_dv_convert_action_modify_ipv4
288                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
289                          const struct rte_flow_action *action,
290                          struct rte_flow_error *error)
291 {
292         const struct rte_flow_action_set_ipv4 *conf =
293                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
294         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
295         struct rte_flow_item_ipv4 ipv4;
296         struct rte_flow_item_ipv4 ipv4_mask;
297
298         memset(&ipv4, 0, sizeof(ipv4));
299         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
300         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
301                 ipv4.hdr.src_addr = conf->ipv4_addr;
302                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
303         } else {
304                 ipv4.hdr.dst_addr = conf->ipv4_addr;
305                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
306         }
307         item.spec = &ipv4;
308         item.mask = &ipv4_mask;
309         return flow_dv_convert_modify_action(&item, modify_ipv4, resource,
310                                              MLX5_MODIFICATION_TYPE_SET, error);
311 }
312
313 /**
314  * Convert modify-header set IPv6 address action to DV specification.
315  *
316  * @param[in,out] resource
317  *   Pointer to the modify-header resource.
318  * @param[in] action
319  *   Pointer to action specification.
320  * @param[out] error
321  *   Pointer to the error structure.
322  *
323  * @return
324  *   0 on success, a negative errno value otherwise and rte_errno is set.
325  */
326 static int
327 flow_dv_convert_action_modify_ipv6
328                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
329                          const struct rte_flow_action *action,
330                          struct rte_flow_error *error)
331 {
332         const struct rte_flow_action_set_ipv6 *conf =
333                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
334         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
335         struct rte_flow_item_ipv6 ipv6;
336         struct rte_flow_item_ipv6 ipv6_mask;
337
338         memset(&ipv6, 0, sizeof(ipv6));
339         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
340         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
341                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
342                        sizeof(ipv6.hdr.src_addr));
343                 memcpy(&ipv6_mask.hdr.src_addr,
344                        &rte_flow_item_ipv6_mask.hdr.src_addr,
345                        sizeof(ipv6.hdr.src_addr));
346         } else {
347                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
348                        sizeof(ipv6.hdr.dst_addr));
349                 memcpy(&ipv6_mask.hdr.dst_addr,
350                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
351                        sizeof(ipv6.hdr.dst_addr));
352         }
353         item.spec = &ipv6;
354         item.mask = &ipv6_mask;
355         return flow_dv_convert_modify_action(&item, modify_ipv6, resource,
356                                              MLX5_MODIFICATION_TYPE_SET, error);
357 }
358
359 /**
360  * Convert modify-header set MAC address action to DV specification.
361  *
362  * @param[in,out] resource
363  *   Pointer to the modify-header resource.
364  * @param[in] action
365  *   Pointer to action specification.
366  * @param[out] error
367  *   Pointer to the error structure.
368  *
369  * @return
370  *   0 on success, a negative errno value otherwise and rte_errno is set.
371  */
372 static int
373 flow_dv_convert_action_modify_mac
374                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
375                          const struct rte_flow_action *action,
376                          struct rte_flow_error *error)
377 {
378         const struct rte_flow_action_set_mac *conf =
379                 (const struct rte_flow_action_set_mac *)(action->conf);
380         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
381         struct rte_flow_item_eth eth;
382         struct rte_flow_item_eth eth_mask;
383
384         memset(&eth, 0, sizeof(eth));
385         memset(&eth_mask, 0, sizeof(eth_mask));
386         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
387                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
388                        sizeof(eth.src.addr_bytes));
389                 memcpy(&eth_mask.src.addr_bytes,
390                        &rte_flow_item_eth_mask.src.addr_bytes,
391                        sizeof(eth_mask.src.addr_bytes));
392         } else {
393                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
394                        sizeof(eth.dst.addr_bytes));
395                 memcpy(&eth_mask.dst.addr_bytes,
396                        &rte_flow_item_eth_mask.dst.addr_bytes,
397                        sizeof(eth_mask.dst.addr_bytes));
398         }
399         item.spec = &eth;
400         item.mask = &eth_mask;
401         return flow_dv_convert_modify_action(&item, modify_eth, resource,
402                                              MLX5_MODIFICATION_TYPE_SET, error);
403 }
404
405 /**
406  * Convert modify-header set VLAN VID action to DV specification.
407  *
408  * @param[in,out] resource
409  *   Pointer to the modify-header resource.
410  * @param[in] action
411  *   Pointer to action specification.
412  * @param[out] error
413  *   Pointer to the error structure.
414  *
415  * @return
416  *   0 on success, a negative errno value otherwise and rte_errno is set.
417  */
418 static int
419 flow_dv_convert_action_modify_vlan_vid
420                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
421                          const struct rte_flow_action *action,
422                          struct rte_flow_error *error)
423 {
424         const struct rte_flow_action_of_set_vlan_vid *conf =
425                 (const struct rte_flow_action_of_set_vlan_vid *)(action->conf);
426         int i = resource->actions_num;
427         struct mlx5_modification_cmd *actions = &resource->actions[i];
428         struct field_modify_info *field = modify_vlan_out_first_vid;
429
430         if (i >= MLX5_MODIFY_NUM)
431                 return rte_flow_error_set(error, EINVAL,
432                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
433                          "too many items to modify");
434         actions[i].action_type = MLX5_MODIFICATION_TYPE_SET;
435         actions[i].field = field->id;
436         actions[i].length = field->size;
437         actions[i].offset = field->offset;
438         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
439         actions[i].data1 = conf->vlan_vid;
440         actions[i].data1 = actions[i].data1 << 16;
441         resource->actions_num = ++i;
442         return 0;
443 }
444
445 /**
446  * Convert modify-header set TP action to DV specification.
447  *
448  * @param[in,out] resource
449  *   Pointer to the modify-header resource.
450  * @param[in] action
451  *   Pointer to action specification.
452  * @param[in] items
453  *   Pointer to rte_flow_item objects list.
454  * @param[in] attr
455  *   Pointer to flow attributes structure.
456  * @param[out] error
457  *   Pointer to the error structure.
458  *
459  * @return
460  *   0 on success, a negative errno value otherwise and rte_errno is set.
461  */
462 static int
463 flow_dv_convert_action_modify_tp
464                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
465                          const struct rte_flow_action *action,
466                          const struct rte_flow_item *items,
467                          union flow_dv_attr *attr,
468                          struct rte_flow_error *error)
469 {
470         const struct rte_flow_action_set_tp *conf =
471                 (const struct rte_flow_action_set_tp *)(action->conf);
472         struct rte_flow_item item;
473         struct rte_flow_item_udp udp;
474         struct rte_flow_item_udp udp_mask;
475         struct rte_flow_item_tcp tcp;
476         struct rte_flow_item_tcp tcp_mask;
477         struct field_modify_info *field;
478
479         if (!attr->valid)
480                 flow_dv_attr_init(items, attr);
481         if (attr->udp) {
482                 memset(&udp, 0, sizeof(udp));
483                 memset(&udp_mask, 0, sizeof(udp_mask));
484                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
485                         udp.hdr.src_port = conf->port;
486                         udp_mask.hdr.src_port =
487                                         rte_flow_item_udp_mask.hdr.src_port;
488                 } else {
489                         udp.hdr.dst_port = conf->port;
490                         udp_mask.hdr.dst_port =
491                                         rte_flow_item_udp_mask.hdr.dst_port;
492                 }
493                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
494                 item.spec = &udp;
495                 item.mask = &udp_mask;
496                 field = modify_udp;
497         }
498         if (attr->tcp) {
499                 memset(&tcp, 0, sizeof(tcp));
500                 memset(&tcp_mask, 0, sizeof(tcp_mask));
501                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
502                         tcp.hdr.src_port = conf->port;
503                         tcp_mask.hdr.src_port =
504                                         rte_flow_item_tcp_mask.hdr.src_port;
505                 } else {
506                         tcp.hdr.dst_port = conf->port;
507                         tcp_mask.hdr.dst_port =
508                                         rte_flow_item_tcp_mask.hdr.dst_port;
509                 }
510                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
511                 item.spec = &tcp;
512                 item.mask = &tcp_mask;
513                 field = modify_tcp;
514         }
515         return flow_dv_convert_modify_action(&item, field, resource,
516                                              MLX5_MODIFICATION_TYPE_SET, error);
517 }
518
519 /**
520  * Convert modify-header set TTL action to DV specification.
521  *
522  * @param[in,out] resource
523  *   Pointer to the modify-header resource.
524  * @param[in] action
525  *   Pointer to action specification.
526  * @param[in] items
527  *   Pointer to rte_flow_item objects list.
528  * @param[in] attr
529  *   Pointer to flow attributes structure.
530  * @param[out] error
531  *   Pointer to the error structure.
532  *
533  * @return
534  *   0 on success, a negative errno value otherwise and rte_errno is set.
535  */
536 static int
537 flow_dv_convert_action_modify_ttl
538                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
539                          const struct rte_flow_action *action,
540                          const struct rte_flow_item *items,
541                          union flow_dv_attr *attr,
542                          struct rte_flow_error *error)
543 {
544         const struct rte_flow_action_set_ttl *conf =
545                 (const struct rte_flow_action_set_ttl *)(action->conf);
546         struct rte_flow_item item;
547         struct rte_flow_item_ipv4 ipv4;
548         struct rte_flow_item_ipv4 ipv4_mask;
549         struct rte_flow_item_ipv6 ipv6;
550         struct rte_flow_item_ipv6 ipv6_mask;
551         struct field_modify_info *field;
552
553         if (!attr->valid)
554                 flow_dv_attr_init(items, attr);
555         if (attr->ipv4) {
556                 memset(&ipv4, 0, sizeof(ipv4));
557                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
558                 ipv4.hdr.time_to_live = conf->ttl_value;
559                 ipv4_mask.hdr.time_to_live = 0xFF;
560                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
561                 item.spec = &ipv4;
562                 item.mask = &ipv4_mask;
563                 field = modify_ipv4;
564         }
565         if (attr->ipv6) {
566                 memset(&ipv6, 0, sizeof(ipv6));
567                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
568                 ipv6.hdr.hop_limits = conf->ttl_value;
569                 ipv6_mask.hdr.hop_limits = 0xFF;
570                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
571                 item.spec = &ipv6;
572                 item.mask = &ipv6_mask;
573                 field = modify_ipv6;
574         }
575         return flow_dv_convert_modify_action(&item, field, resource,
576                                              MLX5_MODIFICATION_TYPE_SET, error);
577 }
578
579 /**
580  * Convert modify-header decrement TTL action to DV specification.
581  *
582  * @param[in,out] resource
583  *   Pointer to the modify-header resource.
584  * @param[in] action
585  *   Pointer to action specification.
586  * @param[in] items
587  *   Pointer to rte_flow_item objects list.
588  * @param[in] attr
589  *   Pointer to flow attributes structure.
590  * @param[out] error
591  *   Pointer to the error structure.
592  *
593  * @return
594  *   0 on success, a negative errno value otherwise and rte_errno is set.
595  */
596 static int
597 flow_dv_convert_action_modify_dec_ttl
598                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
599                          const struct rte_flow_item *items,
600                          union flow_dv_attr *attr,
601                          struct rte_flow_error *error)
602 {
603         struct rte_flow_item item;
604         struct rte_flow_item_ipv4 ipv4;
605         struct rte_flow_item_ipv4 ipv4_mask;
606         struct rte_flow_item_ipv6 ipv6;
607         struct rte_flow_item_ipv6 ipv6_mask;
608         struct field_modify_info *field;
609
610         if (!attr->valid)
611                 flow_dv_attr_init(items, attr);
612         if (attr->ipv4) {
613                 memset(&ipv4, 0, sizeof(ipv4));
614                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
615                 ipv4.hdr.time_to_live = 0xFF;
616                 ipv4_mask.hdr.time_to_live = 0xFF;
617                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
618                 item.spec = &ipv4;
619                 item.mask = &ipv4_mask;
620                 field = modify_ipv4;
621         }
622         if (attr->ipv6) {
623                 memset(&ipv6, 0, sizeof(ipv6));
624                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
625                 ipv6.hdr.hop_limits = 0xFF;
626                 ipv6_mask.hdr.hop_limits = 0xFF;
627                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
628                 item.spec = &ipv6;
629                 item.mask = &ipv6_mask;
630                 field = modify_ipv6;
631         }
632         return flow_dv_convert_modify_action(&item, field, resource,
633                                              MLX5_MODIFICATION_TYPE_ADD, error);
634 }
635
636 /**
637  * Convert modify-header increment/decrement TCP Sequence number
638  * to DV specification.
639  *
640  * @param[in,out] resource
641  *   Pointer to the modify-header resource.
642  * @param[in] action
643  *   Pointer to action specification.
644  * @param[out] error
645  *   Pointer to the error structure.
646  *
647  * @return
648  *   0 on success, a negative errno value otherwise and rte_errno is set.
649  */
650 static int
651 flow_dv_convert_action_modify_tcp_seq
652                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
653                          const struct rte_flow_action *action,
654                          struct rte_flow_error *error)
655 {
656         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
657         uint64_t value = rte_be_to_cpu_32(*conf);
658         struct rte_flow_item item;
659         struct rte_flow_item_tcp tcp;
660         struct rte_flow_item_tcp tcp_mask;
661
662         memset(&tcp, 0, sizeof(tcp));
663         memset(&tcp_mask, 0, sizeof(tcp_mask));
664         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
665                 /*
666                  * The HW has no decrement operation, only increment operation.
667                  * To simulate decrement X from Y using increment operation
668                  * we need to add UINT32_MAX X times to Y.
669                  * Each adding of UINT32_MAX decrements Y by 1.
670                  */
671                 value *= UINT32_MAX;
672         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
673         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
674         item.type = RTE_FLOW_ITEM_TYPE_TCP;
675         item.spec = &tcp;
676         item.mask = &tcp_mask;
677         return flow_dv_convert_modify_action(&item, modify_tcp, resource,
678                                              MLX5_MODIFICATION_TYPE_ADD, error);
679 }
680
681 /**
682  * Convert modify-header increment/decrement TCP Acknowledgment number
683  * to DV specification.
684  *
685  * @param[in,out] resource
686  *   Pointer to the modify-header resource.
687  * @param[in] action
688  *   Pointer to action specification.
689  * @param[out] error
690  *   Pointer to the error structure.
691  *
692  * @return
693  *   0 on success, a negative errno value otherwise and rte_errno is set.
694  */
695 static int
696 flow_dv_convert_action_modify_tcp_ack
697                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
698                          const struct rte_flow_action *action,
699                          struct rte_flow_error *error)
700 {
701         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
702         uint64_t value = rte_be_to_cpu_32(*conf);
703         struct rte_flow_item item;
704         struct rte_flow_item_tcp tcp;
705         struct rte_flow_item_tcp tcp_mask;
706
707         memset(&tcp, 0, sizeof(tcp));
708         memset(&tcp_mask, 0, sizeof(tcp_mask));
709         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
710                 /*
711                  * The HW has no decrement operation, only increment operation.
712                  * To simulate decrement X from Y using increment operation
713                  * we need to add UINT32_MAX X times to Y.
714                  * Each adding of UINT32_MAX decrements Y by 1.
715                  */
716                 value *= UINT32_MAX;
717         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
718         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
719         item.type = RTE_FLOW_ITEM_TYPE_TCP;
720         item.spec = &tcp;
721         item.mask = &tcp_mask;
722         return flow_dv_convert_modify_action(&item, modify_tcp, resource,
723                                              MLX5_MODIFICATION_TYPE_ADD, error);
724 }
725
726 /**
727  * Validate META item.
728  *
729  * @param[in] dev
730  *   Pointer to the rte_eth_dev structure.
731  * @param[in] item
732  *   Item specification.
733  * @param[in] attr
734  *   Attributes of flow that includes this item.
735  * @param[out] error
736  *   Pointer to error structure.
737  *
738  * @return
739  *   0 on success, a negative errno value otherwise and rte_errno is set.
740  */
741 static int
742 flow_dv_validate_item_meta(struct rte_eth_dev *dev,
743                            const struct rte_flow_item *item,
744                            const struct rte_flow_attr *attr,
745                            struct rte_flow_error *error)
746 {
747         const struct rte_flow_item_meta *spec = item->spec;
748         const struct rte_flow_item_meta *mask = item->mask;
749         const struct rte_flow_item_meta nic_mask = {
750                 .data = RTE_BE32(UINT32_MAX)
751         };
752         int ret;
753         uint64_t offloads = dev->data->dev_conf.txmode.offloads;
754
755         if (!(offloads & DEV_TX_OFFLOAD_MATCH_METADATA))
756                 return rte_flow_error_set(error, EPERM,
757                                           RTE_FLOW_ERROR_TYPE_ITEM,
758                                           NULL,
759                                           "match on metadata offload "
760                                           "configuration is off for this port");
761         if (!spec)
762                 return rte_flow_error_set(error, EINVAL,
763                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
764                                           item->spec,
765                                           "data cannot be empty");
766         if (!spec->data)
767                 return rte_flow_error_set(error, EINVAL,
768                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
769                                           NULL,
770                                           "data cannot be zero");
771         if (!mask)
772                 mask = &rte_flow_item_meta_mask;
773         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
774                                         (const uint8_t *)&nic_mask,
775                                         sizeof(struct rte_flow_item_meta),
776                                         error);
777         if (ret < 0)
778                 return ret;
779         if (attr->ingress)
780                 return rte_flow_error_set(error, ENOTSUP,
781                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
782                                           NULL,
783                                           "pattern not supported for ingress");
784         return 0;
785 }
786
787 /**
788  * Validate vport item.
789  *
790  * @param[in] dev
791  *   Pointer to the rte_eth_dev structure.
792  * @param[in] item
793  *   Item specification.
794  * @param[in] attr
795  *   Attributes of flow that includes this item.
796  * @param[in] item_flags
797  *   Bit-fields that holds the items detected until now.
798  * @param[out] error
799  *   Pointer to error structure.
800  *
801  * @return
802  *   0 on success, a negative errno value otherwise and rte_errno is set.
803  */
804 static int
805 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
806                               const struct rte_flow_item *item,
807                               const struct rte_flow_attr *attr,
808                               uint64_t item_flags,
809                               struct rte_flow_error *error)
810 {
811         const struct rte_flow_item_port_id *spec = item->spec;
812         const struct rte_flow_item_port_id *mask = item->mask;
813         const struct rte_flow_item_port_id switch_mask = {
814                         .id = 0xffffffff,
815         };
816         uint16_t esw_domain_id;
817         uint16_t item_port_esw_domain_id;
818         int ret;
819
820         if (!attr->transfer)
821                 return rte_flow_error_set(error, EINVAL,
822                                           RTE_FLOW_ERROR_TYPE_ITEM,
823                                           NULL,
824                                           "match on port id is valid only"
825                                           " when transfer flag is enabled");
826         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
827                 return rte_flow_error_set(error, ENOTSUP,
828                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
829                                           "multiple source ports are not"
830                                           " supported");
831         if (!mask)
832                 mask = &switch_mask;
833         if (mask->id != 0xffffffff)
834                 return rte_flow_error_set(error, ENOTSUP,
835                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
836                                            mask,
837                                            "no support for partial mask on"
838                                            " \"id\" field");
839         ret = mlx5_flow_item_acceptable
840                                 (item, (const uint8_t *)mask,
841                                  (const uint8_t *)&rte_flow_item_port_id_mask,
842                                  sizeof(struct rte_flow_item_port_id),
843                                  error);
844         if (ret)
845                 return ret;
846         if (!spec)
847                 return 0;
848         ret = mlx5_port_to_eswitch_info(spec->id, &item_port_esw_domain_id,
849                                         NULL);
850         if (ret)
851                 return rte_flow_error_set(error, -ret,
852                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
853                                           "failed to obtain E-Switch info for"
854                                           " port");
855         ret = mlx5_port_to_eswitch_info(dev->data->port_id,
856                                         &esw_domain_id, NULL);
857         if (ret < 0)
858                 return rte_flow_error_set(error, -ret,
859                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
860                                           NULL,
861                                           "failed to obtain E-Switch info");
862         if (item_port_esw_domain_id != esw_domain_id)
863                 return rte_flow_error_set(error, -ret,
864                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
865                                           "cannot match on a port from a"
866                                           " different E-Switch");
867         return 0;
868 }
869
870 /**
871  * Validate the pop VLAN action.
872  *
873  * @param[in] dev
874  *   Pointer to the rte_eth_dev structure.
875  * @param[in] action_flags
876  *   Holds the actions detected until now.
877  * @param[in] action
878  *   Pointer to the pop vlan action.
879  * @param[in] item_flags
880  *   The items found in this flow rule.
881  * @param[in] attr
882  *   Pointer to flow attributes.
883  * @param[out] error
884  *   Pointer to error structure.
885  *
886  * @return
887  *   0 on success, a negative errno value otherwise and rte_errno is set.
888  */
889 static int
890 flow_dv_validate_action_pop_vlan(struct rte_eth_dev *dev,
891                                  uint64_t action_flags,
892                                  const struct rte_flow_action *action,
893                                  uint64_t item_flags,
894                                  const struct rte_flow_attr *attr,
895                                  struct rte_flow_error *error)
896 {
897         struct mlx5_priv *priv = dev->data->dev_private;
898
899         (void)action;
900         (void)attr;
901         if (!priv->sh->pop_vlan_action)
902                 return rte_flow_error_set(error, ENOTSUP,
903                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
904                                           NULL,
905                                           "pop vlan action is not supported");
906         /*
907          * Check for inconsistencies:
908          *  fail strip_vlan in a flow that matches packets without VLAN tags.
909          *  fail strip_vlan in a flow that matches packets without explicitly a
910          *  matching on VLAN tag ?
911          */
912         if (action_flags & MLX5_FLOW_ACTION_OF_POP_VLAN)
913                 return rte_flow_error_set(error, ENOTSUP,
914                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
915                                           NULL,
916                                           "no support for multiple vlan pop "
917                                           "actions");
918         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
919                 return rte_flow_error_set(error, ENOTSUP,
920                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
921                                           NULL,
922                                           "cannot pop vlan without a "
923                                           "match on (outer) vlan in the flow");
924         return 0;
925 }
926
927 /**
928  * Get VLAN default info from vlan match info.
929  *
930  * @param[in] dev
931  *   Pointer to the rte_eth_dev structure.
932  * @param[in] item
933  *   the list of item specifications.
934  * @param[out] vlan
935  *   pointer VLAN info to fill to.
936  * @param[out] error
937  *   Pointer to error structure.
938  *
939  * @return
940  *   0 on success, a negative errno value otherwise and rte_errno is set.
941  */
942 static void
943 flow_dev_get_vlan_info_from_items(const struct rte_flow_item *items,
944                                   struct rte_vlan_hdr *vlan)
945 {
946         const struct rte_flow_item_vlan nic_mask = {
947                 .tci = RTE_BE16(MLX5DV_FLOW_VLAN_PCP_MASK |
948                                 MLX5DV_FLOW_VLAN_VID_MASK),
949                 .inner_type = RTE_BE16(0xffff),
950         };
951
952         if (items == NULL)
953                 return;
954         for (; items->type != RTE_FLOW_ITEM_TYPE_END &&
955                items->type != RTE_FLOW_ITEM_TYPE_VLAN; items++)
956                 ;
957         if (items->type == RTE_FLOW_ITEM_TYPE_VLAN) {
958                 const struct rte_flow_item_vlan *vlan_m = items->mask;
959                 const struct rte_flow_item_vlan *vlan_v = items->spec;
960
961                 if (!vlan_m)
962                         vlan_m = &nic_mask;
963                 /* Only full match values are accepted */
964                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_PCP_MASK_BE) ==
965                      MLX5DV_FLOW_VLAN_PCP_MASK_BE) {
966                         vlan->vlan_tci &= MLX5DV_FLOW_VLAN_PCP_MASK;
967                         vlan->vlan_tci |=
968                                 rte_be_to_cpu_16(vlan_v->tci &
969                                                  MLX5DV_FLOW_VLAN_PCP_MASK_BE);
970                 }
971                 if ((vlan_m->tci & MLX5DV_FLOW_VLAN_VID_MASK_BE) ==
972                      MLX5DV_FLOW_VLAN_VID_MASK_BE) {
973                         vlan->vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
974                         vlan->vlan_tci |=
975                                 rte_be_to_cpu_16(vlan_v->tci &
976                                                  MLX5DV_FLOW_VLAN_VID_MASK_BE);
977                 }
978                 if (vlan_m->inner_type == nic_mask.inner_type)
979                         vlan->eth_proto = rte_be_to_cpu_16(vlan_v->inner_type &
980                                                            vlan_m->inner_type);
981         }
982 }
983
984 /**
985  * Validate the push VLAN action.
986  *
987  * @param[in] action_flags
988  *   Holds the actions detected until now.
989  * @param[in] action
990  *   Pointer to the encap action.
991  * @param[in] attr
992  *   Pointer to flow attributes
993  * @param[out] error
994  *   Pointer to error structure.
995  *
996  * @return
997  *   0 on success, a negative errno value otherwise and rte_errno is set.
998  */
999 static int
1000 flow_dv_validate_action_push_vlan(uint64_t action_flags,
1001                                   const struct rte_flow_action *action,
1002                                   const struct rte_flow_attr *attr,
1003                                   struct rte_flow_error *error)
1004 {
1005         const struct rte_flow_action_of_push_vlan *push_vlan = action->conf;
1006
1007         if (push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_VLAN) &&
1008             push_vlan->ethertype != RTE_BE16(RTE_ETHER_TYPE_QINQ))
1009                 return rte_flow_error_set(error, EINVAL,
1010                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1011                                           "invalid vlan ethertype");
1012         if (action_flags &
1013                 (MLX5_FLOW_ACTION_OF_POP_VLAN | MLX5_FLOW_ACTION_OF_PUSH_VLAN))
1014                 return rte_flow_error_set(error, ENOTSUP,
1015                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1016                                           "no support for multiple VLAN "
1017                                           "actions");
1018         (void)attr;
1019         return 0;
1020 }
1021
1022 /**
1023  * Validate the set VLAN PCP.
1024  *
1025  * @param[in] action_flags
1026  *   Holds the actions detected until now.
1027  * @param[in] actions
1028  *   Pointer to the list of actions remaining in the flow rule.
1029  * @param[in] attr
1030  *   Pointer to flow attributes
1031  * @param[out] error
1032  *   Pointer to error structure.
1033  *
1034  * @return
1035  *   0 on success, a negative errno value otherwise and rte_errno is set.
1036  */
1037 static int
1038 flow_dv_validate_action_set_vlan_pcp(uint64_t action_flags,
1039                                      const struct rte_flow_action actions[],
1040                                      struct rte_flow_error *error)
1041 {
1042         const struct rte_flow_action *action = actions;
1043         const struct rte_flow_action_of_set_vlan_pcp *conf = action->conf;
1044
1045         if (conf->vlan_pcp > 7)
1046                 return rte_flow_error_set(error, EINVAL,
1047                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1048                                           "VLAN PCP value is too big");
1049         if (mlx5_flow_find_action(actions,
1050                                   RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN) == NULL)
1051                 return rte_flow_error_set(error, ENOTSUP,
1052                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1053                                           "set VLAN PCP can only be used "
1054                                           "with push VLAN action");
1055         if (action_flags & MLX5_FLOW_ACTION_OF_PUSH_VLAN)
1056                 return rte_flow_error_set(error, ENOTSUP,
1057                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1058                                           "set VLAN PCP action must precede "
1059                                           "the push VLAN action");
1060         return 0;
1061 }
1062
1063 /**
1064  * Validate the set VLAN VID.
1065  *
1066  * @param[in] item_flags
1067  *   Holds the items detected in this rule.
1068  * @param[in] actions
1069  *   Pointer to the list of actions remaining in the flow rule.
1070  * @param[in] attr
1071  *   Pointer to flow attributes
1072  * @param[out] error
1073  *   Pointer to error structure.
1074  *
1075  * @return
1076  *   0 on success, a negative errno value otherwise and rte_errno is set.
1077  */
1078 static int
1079 flow_dv_validate_action_set_vlan_vid(uint64_t item_flags,
1080                                      const struct rte_flow_action actions[],
1081                                      struct rte_flow_error *error)
1082 {
1083         const struct rte_flow_action *action = actions;
1084         const struct rte_flow_action_of_set_vlan_vid *conf = action->conf;
1085
1086         if (conf->vlan_vid > RTE_BE16(0xFFE))
1087                 return rte_flow_error_set(error, EINVAL,
1088                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1089                                           "VLAN VID value is too big");
1090         /* If a push VLAN action follows then it will handle this action */
1091         if (mlx5_flow_find_action(actions,
1092                                   RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN))
1093                 return 0;
1094
1095         /*
1096          * Action is on an existing VLAN header:
1097          *    Need to verify this is a single modify CID action.
1098          *   Rule mast include a match on outer VLAN.
1099          */
1100         if (mlx5_flow_find_action(++action,
1101                                   RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID))
1102                 return rte_flow_error_set(error, ENOTSUP,
1103                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1104                                           "Multiple VLAN VID modifications are "
1105                                           "not supported");
1106         if (!(item_flags & MLX5_FLOW_LAYER_OUTER_VLAN))
1107                 return rte_flow_error_set(error, EINVAL,
1108                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1109                                           "match on VLAN is required in order "
1110                                           "to set VLAN VID");
1111         return 0;
1112 }
1113
1114 /**
1115  * Validate count action.
1116  *
1117  * @param[in] dev
1118  *   device otr.
1119  * @param[out] error
1120  *   Pointer to error structure.
1121  *
1122  * @return
1123  *   0 on success, a negative errno value otherwise and rte_errno is set.
1124  */
1125 static int
1126 flow_dv_validate_action_count(struct rte_eth_dev *dev,
1127                               struct rte_flow_error *error)
1128 {
1129         struct mlx5_priv *priv = dev->data->dev_private;
1130
1131         if (!priv->config.devx)
1132                 goto notsup_err;
1133 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
1134         return 0;
1135 #endif
1136 notsup_err:
1137         return rte_flow_error_set
1138                       (error, ENOTSUP,
1139                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1140                        NULL,
1141                        "count action not supported");
1142 }
1143
1144 /**
1145  * Validate the L2 encap action.
1146  *
1147  * @param[in] action_flags
1148  *   Holds the actions detected until now.
1149  * @param[in] action
1150  *   Pointer to the encap action.
1151  * @param[in] attr
1152  *   Pointer to flow attributes
1153  * @param[out] error
1154  *   Pointer to error structure.
1155  *
1156  * @return
1157  *   0 on success, a negative errno value otherwise and rte_errno is set.
1158  */
1159 static int
1160 flow_dv_validate_action_l2_encap(uint64_t action_flags,
1161                                  const struct rte_flow_action *action,
1162                                  const struct rte_flow_attr *attr,
1163                                  struct rte_flow_error *error)
1164 {
1165         if (!(action->conf))
1166                 return rte_flow_error_set(error, EINVAL,
1167                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1168                                           "configuration cannot be null");
1169         if (action_flags & MLX5_FLOW_ACTION_DROP)
1170                 return rte_flow_error_set(error, EINVAL,
1171                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1172                                           "can't drop and encap in same flow");
1173         if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
1174                 return rte_flow_error_set(error, EINVAL,
1175                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1176                                           "can only have a single encap or"
1177                                           " decap action in a flow");
1178         if (!attr->transfer && attr->ingress)
1179                 return rte_flow_error_set(error, ENOTSUP,
1180                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1181                                           NULL,
1182                                           "encap action not supported for "
1183                                           "ingress");
1184         return 0;
1185 }
1186
1187 /**
1188  * Validate the L2 decap action.
1189  *
1190  * @param[in] action_flags
1191  *   Holds the actions detected until now.
1192  * @param[in] attr
1193  *   Pointer to flow attributes
1194  * @param[out] error
1195  *   Pointer to error structure.
1196  *
1197  * @return
1198  *   0 on success, a negative errno value otherwise and rte_errno is set.
1199  */
1200 static int
1201 flow_dv_validate_action_l2_decap(uint64_t action_flags,
1202                                  const struct rte_flow_attr *attr,
1203                                  struct rte_flow_error *error)
1204 {
1205         if (action_flags & MLX5_FLOW_ACTION_DROP)
1206                 return rte_flow_error_set(error, EINVAL,
1207                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1208                                           "can't drop and decap in same flow");
1209         if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
1210                 return rte_flow_error_set(error, EINVAL,
1211                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1212                                           "can only have a single encap or"
1213                                           " decap action in a flow");
1214         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
1215                 return rte_flow_error_set(error, EINVAL,
1216                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1217                                           "can't have decap action after"
1218                                           " modify action");
1219         if (attr->egress)
1220                 return rte_flow_error_set(error, ENOTSUP,
1221                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1222                                           NULL,
1223                                           "decap action not supported for "
1224                                           "egress");
1225         return 0;
1226 }
1227
1228 /**
1229  * Validate the raw encap action.
1230  *
1231  * @param[in] action_flags
1232  *   Holds the actions detected until now.
1233  * @param[in] action
1234  *   Pointer to the encap action.
1235  * @param[in] attr
1236  *   Pointer to flow attributes
1237  * @param[out] error
1238  *   Pointer to error structure.
1239  *
1240  * @return
1241  *   0 on success, a negative errno value otherwise and rte_errno is set.
1242  */
1243 static int
1244 flow_dv_validate_action_raw_encap(uint64_t action_flags,
1245                                   const struct rte_flow_action *action,
1246                                   const struct rte_flow_attr *attr,
1247                                   struct rte_flow_error *error)
1248 {
1249         const struct rte_flow_action_raw_encap *raw_encap =
1250                 (const struct rte_flow_action_raw_encap *)action->conf;
1251         if (!(action->conf))
1252                 return rte_flow_error_set(error, EINVAL,
1253                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1254                                           "configuration cannot be null");
1255         if (action_flags & MLX5_FLOW_ACTION_DROP)
1256                 return rte_flow_error_set(error, EINVAL,
1257                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1258                                           "can't drop and encap in same flow");
1259         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
1260                 return rte_flow_error_set(error, EINVAL,
1261                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1262                                           "can only have a single encap"
1263                                           " action in a flow");
1264         /* encap without preceding decap is not supported for ingress */
1265         if (!attr->transfer &&  attr->ingress &&
1266             !(action_flags & MLX5_FLOW_ACTION_RAW_DECAP))
1267                 return rte_flow_error_set(error, ENOTSUP,
1268                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1269                                           NULL,
1270                                           "encap action not supported for "
1271                                           "ingress");
1272         if (!raw_encap->size || !raw_encap->data)
1273                 return rte_flow_error_set(error, EINVAL,
1274                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
1275                                           "raw encap data cannot be empty");
1276         return 0;
1277 }
1278
1279 /**
1280  * Validate the raw decap action.
1281  *
1282  * @param[in] action_flags
1283  *   Holds the actions detected until now.
1284  * @param[in] action
1285  *   Pointer to the encap action.
1286  * @param[in] attr
1287  *   Pointer to flow attributes
1288  * @param[out] error
1289  *   Pointer to error structure.
1290  *
1291  * @return
1292  *   0 on success, a negative errno value otherwise and rte_errno is set.
1293  */
1294 static int
1295 flow_dv_validate_action_raw_decap(uint64_t action_flags,
1296                                   const struct rte_flow_action *action,
1297                                   const struct rte_flow_attr *attr,
1298                                   struct rte_flow_error *error)
1299 {
1300         if (action_flags & MLX5_FLOW_ACTION_DROP)
1301                 return rte_flow_error_set(error, EINVAL,
1302                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1303                                           "can't drop and decap in same flow");
1304         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
1305                 return rte_flow_error_set(error, EINVAL,
1306                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1307                                           "can't have encap action before"
1308                                           " decap action");
1309         if (action_flags & MLX5_FLOW_DECAP_ACTIONS)
1310                 return rte_flow_error_set(error, EINVAL,
1311                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1312                                           "can only have a single decap"
1313                                           " action in a flow");
1314         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
1315                 return rte_flow_error_set(error, EINVAL,
1316                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1317                                           "can't have decap action after"
1318                                           " modify action");
1319         /* decap action is valid on egress only if it is followed by encap */
1320         if (attr->egress) {
1321                 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
1322                        action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
1323                        action++) {
1324                 }
1325                 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
1326                         return rte_flow_error_set
1327                                         (error, ENOTSUP,
1328                                          RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1329                                          NULL, "decap action not supported"
1330                                          " for egress");
1331         }
1332         return 0;
1333 }
1334
1335 /**
1336  * Find existing encap/decap resource or create and register a new one.
1337  *
1338  * @param dev[in, out]
1339  *   Pointer to rte_eth_dev structure.
1340  * @param[in, out] resource
1341  *   Pointer to encap/decap resource.
1342  * @parm[in, out] dev_flow
1343  *   Pointer to the dev_flow.
1344  * @param[out] error
1345  *   pointer to error structure.
1346  *
1347  * @return
1348  *   0 on success otherwise -errno and errno is set.
1349  */
1350 static int
1351 flow_dv_encap_decap_resource_register
1352                         (struct rte_eth_dev *dev,
1353                          struct mlx5_flow_dv_encap_decap_resource *resource,
1354                          struct mlx5_flow *dev_flow,
1355                          struct rte_flow_error *error)
1356 {
1357         struct mlx5_priv *priv = dev->data->dev_private;
1358         struct mlx5_ibv_shared *sh = priv->sh;
1359         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
1360         struct rte_flow *flow = dev_flow->flow;
1361         struct mlx5dv_dr_domain *domain;
1362
1363         resource->flags = flow->group ? 0 : 1;
1364         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
1365                 domain = sh->fdb_domain;
1366         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
1367                 domain = sh->rx_domain;
1368         else
1369                 domain = sh->tx_domain;
1370
1371         /* Lookup a matching resource from cache. */
1372         LIST_FOREACH(cache_resource, &sh->encaps_decaps, next) {
1373                 if (resource->reformat_type == cache_resource->reformat_type &&
1374                     resource->ft_type == cache_resource->ft_type &&
1375                     resource->flags == cache_resource->flags &&
1376                     resource->size == cache_resource->size &&
1377                     !memcmp((const void *)resource->buf,
1378                             (const void *)cache_resource->buf,
1379                             resource->size)) {
1380                         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
1381                                 (void *)cache_resource,
1382                                 rte_atomic32_read(&cache_resource->refcnt));
1383                         rte_atomic32_inc(&cache_resource->refcnt);
1384                         dev_flow->dv.encap_decap = cache_resource;
1385                         return 0;
1386                 }
1387         }
1388         /* Register new encap/decap resource. */
1389         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1390         if (!cache_resource)
1391                 return rte_flow_error_set(error, ENOMEM,
1392                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1393                                           "cannot allocate resource memory");
1394         *cache_resource = *resource;
1395         cache_resource->verbs_action =
1396                 mlx5_glue->dv_create_flow_action_packet_reformat
1397                         (sh->ctx, cache_resource->reformat_type,
1398                          cache_resource->ft_type, domain, cache_resource->flags,
1399                          cache_resource->size,
1400                          (cache_resource->size ? cache_resource->buf : NULL));
1401         if (!cache_resource->verbs_action) {
1402                 rte_free(cache_resource);
1403                 return rte_flow_error_set(error, ENOMEM,
1404                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1405                                           NULL, "cannot create action");
1406         }
1407         rte_atomic32_init(&cache_resource->refcnt);
1408         rte_atomic32_inc(&cache_resource->refcnt);
1409         LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next);
1410         dev_flow->dv.encap_decap = cache_resource;
1411         DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
1412                 (void *)cache_resource,
1413                 rte_atomic32_read(&cache_resource->refcnt));
1414         return 0;
1415 }
1416
1417 /**
1418  * Find existing table jump resource or create and register a new one.
1419  *
1420  * @param dev[in, out]
1421  *   Pointer to rte_eth_dev structure.
1422  * @param[in, out] resource
1423  *   Pointer to jump table resource.
1424  * @parm[in, out] dev_flow
1425  *   Pointer to the dev_flow.
1426  * @param[out] error
1427  *   pointer to error structure.
1428  *
1429  * @return
1430  *   0 on success otherwise -errno and errno is set.
1431  */
1432 static int
1433 flow_dv_jump_tbl_resource_register
1434                         (struct rte_eth_dev *dev,
1435                          struct mlx5_flow_dv_jump_tbl_resource *resource,
1436                          struct mlx5_flow *dev_flow,
1437                          struct rte_flow_error *error)
1438 {
1439         struct mlx5_priv *priv = dev->data->dev_private;
1440         struct mlx5_ibv_shared *sh = priv->sh;
1441         struct mlx5_flow_dv_jump_tbl_resource *cache_resource;
1442
1443         /* Lookup a matching resource from cache. */
1444         LIST_FOREACH(cache_resource, &sh->jump_tbl, next) {
1445                 if (resource->tbl == cache_resource->tbl) {
1446                         DRV_LOG(DEBUG, "jump table resource resource %p: refcnt %d++",
1447                                 (void *)cache_resource,
1448                                 rte_atomic32_read(&cache_resource->refcnt));
1449                         rte_atomic32_inc(&cache_resource->refcnt);
1450                         dev_flow->dv.jump = cache_resource;
1451                         return 0;
1452                 }
1453         }
1454         /* Register new jump table resource. */
1455         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1456         if (!cache_resource)
1457                 return rte_flow_error_set(error, ENOMEM,
1458                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1459                                           "cannot allocate resource memory");
1460         *cache_resource = *resource;
1461         cache_resource->action =
1462                 mlx5_glue->dr_create_flow_action_dest_flow_tbl
1463                 (resource->tbl->obj);
1464         if (!cache_resource->action) {
1465                 rte_free(cache_resource);
1466                 return rte_flow_error_set(error, ENOMEM,
1467                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1468                                           NULL, "cannot create action");
1469         }
1470         rte_atomic32_init(&cache_resource->refcnt);
1471         rte_atomic32_inc(&cache_resource->refcnt);
1472         LIST_INSERT_HEAD(&sh->jump_tbl, cache_resource, next);
1473         dev_flow->dv.jump = cache_resource;
1474         DRV_LOG(DEBUG, "new jump table  resource %p: refcnt %d++",
1475                 (void *)cache_resource,
1476                 rte_atomic32_read(&cache_resource->refcnt));
1477         return 0;
1478 }
1479
1480 /**
1481  * Find existing table port ID resource or create and register a new one.
1482  *
1483  * @param dev[in, out]
1484  *   Pointer to rte_eth_dev structure.
1485  * @param[in, out] resource
1486  *   Pointer to port ID action resource.
1487  * @parm[in, out] dev_flow
1488  *   Pointer to the dev_flow.
1489  * @param[out] error
1490  *   pointer to error structure.
1491  *
1492  * @return
1493  *   0 on success otherwise -errno and errno is set.
1494  */
1495 static int
1496 flow_dv_port_id_action_resource_register
1497                         (struct rte_eth_dev *dev,
1498                          struct mlx5_flow_dv_port_id_action_resource *resource,
1499                          struct mlx5_flow *dev_flow,
1500                          struct rte_flow_error *error)
1501 {
1502         struct mlx5_priv *priv = dev->data->dev_private;
1503         struct mlx5_ibv_shared *sh = priv->sh;
1504         struct mlx5_flow_dv_port_id_action_resource *cache_resource;
1505
1506         /* Lookup a matching resource from cache. */
1507         LIST_FOREACH(cache_resource, &sh->port_id_action_list, next) {
1508                 if (resource->port_id == cache_resource->port_id) {
1509                         DRV_LOG(DEBUG, "port id action resource resource %p: "
1510                                 "refcnt %d++",
1511                                 (void *)cache_resource,
1512                                 rte_atomic32_read(&cache_resource->refcnt));
1513                         rte_atomic32_inc(&cache_resource->refcnt);
1514                         dev_flow->dv.port_id_action = cache_resource;
1515                         return 0;
1516                 }
1517         }
1518         /* Register new port id action resource. */
1519         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1520         if (!cache_resource)
1521                 return rte_flow_error_set(error, ENOMEM,
1522                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1523                                           "cannot allocate resource memory");
1524         *cache_resource = *resource;
1525         cache_resource->action =
1526                 mlx5_glue->dr_create_flow_action_dest_vport
1527                         (priv->sh->fdb_domain, resource->port_id);
1528         if (!cache_resource->action) {
1529                 rte_free(cache_resource);
1530                 return rte_flow_error_set(error, ENOMEM,
1531                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1532                                           NULL, "cannot create action");
1533         }
1534         rte_atomic32_init(&cache_resource->refcnt);
1535         rte_atomic32_inc(&cache_resource->refcnt);
1536         LIST_INSERT_HEAD(&sh->port_id_action_list, cache_resource, next);
1537         dev_flow->dv.port_id_action = cache_resource;
1538         DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
1539                 (void *)cache_resource,
1540                 rte_atomic32_read(&cache_resource->refcnt));
1541         return 0;
1542 }
1543
1544 /**
1545  * Find existing push vlan resource or create and register a new one.
1546  *
1547  * @param dev[in, out]
1548  *   Pointer to rte_eth_dev structure.
1549  * @param[in, out] resource
1550  *   Pointer to port ID action resource.
1551  * @parm[in, out] dev_flow
1552  *   Pointer to the dev_flow.
1553  * @param[out] error
1554  *   pointer to error structure.
1555  *
1556  * @return
1557  *   0 on success otherwise -errno and errno is set.
1558  */
1559 static int
1560 flow_dv_push_vlan_action_resource_register
1561                        (struct rte_eth_dev *dev,
1562                         struct mlx5_flow_dv_push_vlan_action_resource *resource,
1563                         struct mlx5_flow *dev_flow,
1564                         struct rte_flow_error *error)
1565 {
1566         struct mlx5_priv *priv = dev->data->dev_private;
1567         struct mlx5_ibv_shared *sh = priv->sh;
1568         struct mlx5_flow_dv_push_vlan_action_resource *cache_resource;
1569         struct mlx5dv_dr_domain *domain;
1570
1571         /* Lookup a matching resource from cache. */
1572         LIST_FOREACH(cache_resource, &sh->push_vlan_action_list, next) {
1573                 if (resource->vlan_tag == cache_resource->vlan_tag &&
1574                     resource->ft_type == cache_resource->ft_type) {
1575                         DRV_LOG(DEBUG, "push-VLAN action resource resource %p: "
1576                                 "refcnt %d++",
1577                                 (void *)cache_resource,
1578                                 rte_atomic32_read(&cache_resource->refcnt));
1579                         rte_atomic32_inc(&cache_resource->refcnt);
1580                         dev_flow->dv.push_vlan_res = cache_resource;
1581                         return 0;
1582                 }
1583         }
1584         /* Register new push_vlan action resource. */
1585         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1586         if (!cache_resource)
1587                 return rte_flow_error_set(error, ENOMEM,
1588                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1589                                           "cannot allocate resource memory");
1590         *cache_resource = *resource;
1591         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
1592                 domain = sh->fdb_domain;
1593         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
1594                 domain = sh->rx_domain;
1595         else
1596                 domain = sh->tx_domain;
1597         cache_resource->action =
1598                 mlx5_glue->dr_create_flow_action_push_vlan(domain,
1599                                                            resource->vlan_tag);
1600         if (!cache_resource->action) {
1601                 rte_free(cache_resource);
1602                 return rte_flow_error_set(error, ENOMEM,
1603                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1604                                           NULL, "cannot create action");
1605         }
1606         rte_atomic32_init(&cache_resource->refcnt);
1607         rte_atomic32_inc(&cache_resource->refcnt);
1608         LIST_INSERT_HEAD(&sh->push_vlan_action_list, cache_resource, next);
1609         dev_flow->dv.push_vlan_res = cache_resource;
1610         DRV_LOG(DEBUG, "new push vlan action resource %p: refcnt %d++",
1611                 (void *)cache_resource,
1612                 rte_atomic32_read(&cache_resource->refcnt));
1613         return 0;
1614 }
1615 /**
1616  * Get the size of specific rte_flow_item_type
1617  *
1618  * @param[in] item_type
1619  *   Tested rte_flow_item_type.
1620  *
1621  * @return
1622  *   sizeof struct item_type, 0 if void or irrelevant.
1623  */
1624 static size_t
1625 flow_dv_get_item_len(const enum rte_flow_item_type item_type)
1626 {
1627         size_t retval;
1628
1629         switch (item_type) {
1630         case RTE_FLOW_ITEM_TYPE_ETH:
1631                 retval = sizeof(struct rte_flow_item_eth);
1632                 break;
1633         case RTE_FLOW_ITEM_TYPE_VLAN:
1634                 retval = sizeof(struct rte_flow_item_vlan);
1635                 break;
1636         case RTE_FLOW_ITEM_TYPE_IPV4:
1637                 retval = sizeof(struct rte_flow_item_ipv4);
1638                 break;
1639         case RTE_FLOW_ITEM_TYPE_IPV6:
1640                 retval = sizeof(struct rte_flow_item_ipv6);
1641                 break;
1642         case RTE_FLOW_ITEM_TYPE_UDP:
1643                 retval = sizeof(struct rte_flow_item_udp);
1644                 break;
1645         case RTE_FLOW_ITEM_TYPE_TCP:
1646                 retval = sizeof(struct rte_flow_item_tcp);
1647                 break;
1648         case RTE_FLOW_ITEM_TYPE_VXLAN:
1649                 retval = sizeof(struct rte_flow_item_vxlan);
1650                 break;
1651         case RTE_FLOW_ITEM_TYPE_GRE:
1652                 retval = sizeof(struct rte_flow_item_gre);
1653                 break;
1654         case RTE_FLOW_ITEM_TYPE_NVGRE:
1655                 retval = sizeof(struct rte_flow_item_nvgre);
1656                 break;
1657         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1658                 retval = sizeof(struct rte_flow_item_vxlan_gpe);
1659                 break;
1660         case RTE_FLOW_ITEM_TYPE_MPLS:
1661                 retval = sizeof(struct rte_flow_item_mpls);
1662                 break;
1663         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
1664         default:
1665                 retval = 0;
1666                 break;
1667         }
1668         return retval;
1669 }
1670
1671 #define MLX5_ENCAP_IPV4_VERSION         0x40
1672 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
1673 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
1674 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
1675 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
1676 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
1677 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
1678
1679 /**
1680  * Convert the encap action data from list of rte_flow_item to raw buffer
1681  *
1682  * @param[in] items
1683  *   Pointer to rte_flow_item objects list.
1684  * @param[out] buf
1685  *   Pointer to the output buffer.
1686  * @param[out] size
1687  *   Pointer to the output buffer size.
1688  * @param[out] error
1689  *   Pointer to the error structure.
1690  *
1691  * @return
1692  *   0 on success, a negative errno value otherwise and rte_errno is set.
1693  */
1694 static int
1695 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
1696                            size_t *size, struct rte_flow_error *error)
1697 {
1698         struct rte_ether_hdr *eth = NULL;
1699         struct rte_vlan_hdr *vlan = NULL;
1700         struct rte_ipv4_hdr *ipv4 = NULL;
1701         struct rte_ipv6_hdr *ipv6 = NULL;
1702         struct rte_udp_hdr *udp = NULL;
1703         struct rte_vxlan_hdr *vxlan = NULL;
1704         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
1705         struct rte_gre_hdr *gre = NULL;
1706         size_t len;
1707         size_t temp_size = 0;
1708
1709         if (!items)
1710                 return rte_flow_error_set(error, EINVAL,
1711                                           RTE_FLOW_ERROR_TYPE_ACTION,
1712                                           NULL, "invalid empty data");
1713         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1714                 len = flow_dv_get_item_len(items->type);
1715                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
1716                         return rte_flow_error_set(error, EINVAL,
1717                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1718                                                   (void *)items->type,
1719                                                   "items total size is too big"
1720                                                   " for encap action");
1721                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
1722                 switch (items->type) {
1723                 case RTE_FLOW_ITEM_TYPE_ETH:
1724                         eth = (struct rte_ether_hdr *)&buf[temp_size];
1725                         break;
1726                 case RTE_FLOW_ITEM_TYPE_VLAN:
1727                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
1728                         if (!eth)
1729                                 return rte_flow_error_set(error, EINVAL,
1730                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1731                                                 (void *)items->type,
1732                                                 "eth header not found");
1733                         if (!eth->ether_type)
1734                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
1735                         break;
1736                 case RTE_FLOW_ITEM_TYPE_IPV4:
1737                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
1738                         if (!vlan && !eth)
1739                                 return rte_flow_error_set(error, EINVAL,
1740                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1741                                                 (void *)items->type,
1742                                                 "neither eth nor vlan"
1743                                                 " header found");
1744                         if (vlan && !vlan->eth_proto)
1745                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
1746                         else if (eth && !eth->ether_type)
1747                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
1748                         if (!ipv4->version_ihl)
1749                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
1750                                                     MLX5_ENCAP_IPV4_IHL_MIN;
1751                         if (!ipv4->time_to_live)
1752                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
1753                         break;
1754                 case RTE_FLOW_ITEM_TYPE_IPV6:
1755                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
1756                         if (!vlan && !eth)
1757                                 return rte_flow_error_set(error, EINVAL,
1758                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1759                                                 (void *)items->type,
1760                                                 "neither eth nor vlan"
1761                                                 " header found");
1762                         if (vlan && !vlan->eth_proto)
1763                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
1764                         else if (eth && !eth->ether_type)
1765                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
1766                         if (!ipv6->vtc_flow)
1767                                 ipv6->vtc_flow =
1768                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
1769                         if (!ipv6->hop_limits)
1770                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
1771                         break;
1772                 case RTE_FLOW_ITEM_TYPE_UDP:
1773                         udp = (struct rte_udp_hdr *)&buf[temp_size];
1774                         if (!ipv4 && !ipv6)
1775                                 return rte_flow_error_set(error, EINVAL,
1776                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1777                                                 (void *)items->type,
1778                                                 "ip header not found");
1779                         if (ipv4 && !ipv4->next_proto_id)
1780                                 ipv4->next_proto_id = IPPROTO_UDP;
1781                         else if (ipv6 && !ipv6->proto)
1782                                 ipv6->proto = IPPROTO_UDP;
1783                         break;
1784                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1785                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
1786                         if (!udp)
1787                                 return rte_flow_error_set(error, EINVAL,
1788                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1789                                                 (void *)items->type,
1790                                                 "udp header not found");
1791                         if (!udp->dst_port)
1792                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
1793                         if (!vxlan->vx_flags)
1794                                 vxlan->vx_flags =
1795                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
1796                         break;
1797                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1798                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
1799                         if (!udp)
1800                                 return rte_flow_error_set(error, EINVAL,
1801                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1802                                                 (void *)items->type,
1803                                                 "udp header not found");
1804                         if (!vxlan_gpe->proto)
1805                                 return rte_flow_error_set(error, EINVAL,
1806                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1807                                                 (void *)items->type,
1808                                                 "next protocol not found");
1809                         if (!udp->dst_port)
1810                                 udp->dst_port =
1811                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
1812                         if (!vxlan_gpe->vx_flags)
1813                                 vxlan_gpe->vx_flags =
1814                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
1815                         break;
1816                 case RTE_FLOW_ITEM_TYPE_GRE:
1817                 case RTE_FLOW_ITEM_TYPE_NVGRE:
1818                         gre = (struct rte_gre_hdr *)&buf[temp_size];
1819                         if (!gre->proto)
1820                                 return rte_flow_error_set(error, EINVAL,
1821                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1822                                                 (void *)items->type,
1823                                                 "next protocol not found");
1824                         if (!ipv4 && !ipv6)
1825                                 return rte_flow_error_set(error, EINVAL,
1826                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1827                                                 (void *)items->type,
1828                                                 "ip header not found");
1829                         if (ipv4 && !ipv4->next_proto_id)
1830                                 ipv4->next_proto_id = IPPROTO_GRE;
1831                         else if (ipv6 && !ipv6->proto)
1832                                 ipv6->proto = IPPROTO_GRE;
1833                         break;
1834                 case RTE_FLOW_ITEM_TYPE_VOID:
1835                         break;
1836                 default:
1837                         return rte_flow_error_set(error, EINVAL,
1838                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1839                                                   (void *)items->type,
1840                                                   "unsupported item type");
1841                         break;
1842                 }
1843                 temp_size += len;
1844         }
1845         *size = temp_size;
1846         return 0;
1847 }
1848
1849 static int
1850 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
1851 {
1852         struct rte_ether_hdr *eth = NULL;
1853         struct rte_vlan_hdr *vlan = NULL;
1854         struct rte_ipv6_hdr *ipv6 = NULL;
1855         struct rte_udp_hdr *udp = NULL;
1856         char *next_hdr;
1857         uint16_t proto;
1858
1859         eth = (struct rte_ether_hdr *)data;
1860         next_hdr = (char *)(eth + 1);
1861         proto = RTE_BE16(eth->ether_type);
1862
1863         /* VLAN skipping */
1864         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
1865                 vlan = (struct rte_vlan_hdr *)next_hdr;
1866                 proto = RTE_BE16(vlan->eth_proto);
1867                 next_hdr += sizeof(struct rte_vlan_hdr);
1868         }
1869
1870         /* HW calculates IPv4 csum. no need to proceed */
1871         if (proto == RTE_ETHER_TYPE_IPV4)
1872                 return 0;
1873
1874         /* non IPv4/IPv6 header. not supported */
1875         if (proto != RTE_ETHER_TYPE_IPV6) {
1876                 return rte_flow_error_set(error, ENOTSUP,
1877                                           RTE_FLOW_ERROR_TYPE_ACTION,
1878                                           NULL, "Cannot offload non IPv4/IPv6");
1879         }
1880
1881         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
1882
1883         /* ignore non UDP */
1884         if (ipv6->proto != IPPROTO_UDP)
1885                 return 0;
1886
1887         udp = (struct rte_udp_hdr *)(ipv6 + 1);
1888         udp->dgram_cksum = 0;
1889
1890         return 0;
1891 }
1892
1893 /**
1894  * Convert L2 encap action to DV specification.
1895  *
1896  * @param[in] dev
1897  *   Pointer to rte_eth_dev structure.
1898  * @param[in] action
1899  *   Pointer to action structure.
1900  * @param[in, out] dev_flow
1901  *   Pointer to the mlx5_flow.
1902  * @param[in] transfer
1903  *   Mark if the flow is E-Switch flow.
1904  * @param[out] error
1905  *   Pointer to the error structure.
1906  *
1907  * @return
1908  *   0 on success, a negative errno value otherwise and rte_errno is set.
1909  */
1910 static int
1911 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
1912                                const struct rte_flow_action *action,
1913                                struct mlx5_flow *dev_flow,
1914                                uint8_t transfer,
1915                                struct rte_flow_error *error)
1916 {
1917         const struct rte_flow_item *encap_data;
1918         const struct rte_flow_action_raw_encap *raw_encap_data;
1919         struct mlx5_flow_dv_encap_decap_resource res = {
1920                 .reformat_type =
1921                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
1922                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
1923                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
1924         };
1925
1926         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
1927                 raw_encap_data =
1928                         (const struct rte_flow_action_raw_encap *)action->conf;
1929                 res.size = raw_encap_data->size;
1930                 memcpy(res.buf, raw_encap_data->data, res.size);
1931                 if (flow_dv_zero_encap_udp_csum(res.buf, error))
1932                         return -rte_errno;
1933         } else {
1934                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
1935                         encap_data =
1936                                 ((const struct rte_flow_action_vxlan_encap *)
1937                                                 action->conf)->definition;
1938                 else
1939                         encap_data =
1940                                 ((const struct rte_flow_action_nvgre_encap *)
1941                                                 action->conf)->definition;
1942                 if (flow_dv_convert_encap_data(encap_data, res.buf,
1943                                                &res.size, error))
1944                         return -rte_errno;
1945         }
1946         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1947                 return rte_flow_error_set(error, EINVAL,
1948                                           RTE_FLOW_ERROR_TYPE_ACTION,
1949                                           NULL, "can't create L2 encap action");
1950         return 0;
1951 }
1952
1953 /**
1954  * Convert L2 decap action to DV specification.
1955  *
1956  * @param[in] dev
1957  *   Pointer to rte_eth_dev structure.
1958  * @param[in, out] dev_flow
1959  *   Pointer to the mlx5_flow.
1960  * @param[in] transfer
1961  *   Mark if the flow is E-Switch flow.
1962  * @param[out] error
1963  *   Pointer to the error structure.
1964  *
1965  * @return
1966  *   0 on success, a negative errno value otherwise and rte_errno is set.
1967  */
1968 static int
1969 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
1970                                struct mlx5_flow *dev_flow,
1971                                uint8_t transfer,
1972                                struct rte_flow_error *error)
1973 {
1974         struct mlx5_flow_dv_encap_decap_resource res = {
1975                 .size = 0,
1976                 .reformat_type =
1977                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
1978                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
1979                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
1980         };
1981
1982         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1983                 return rte_flow_error_set(error, EINVAL,
1984                                           RTE_FLOW_ERROR_TYPE_ACTION,
1985                                           NULL, "can't create L2 decap action");
1986         return 0;
1987 }
1988
1989 /**
1990  * Convert raw decap/encap (L3 tunnel) action to DV specification.
1991  *
1992  * @param[in] dev
1993  *   Pointer to rte_eth_dev structure.
1994  * @param[in] action
1995  *   Pointer to action structure.
1996  * @param[in, out] dev_flow
1997  *   Pointer to the mlx5_flow.
1998  * @param[in] attr
1999  *   Pointer to the flow attributes.
2000  * @param[out] error
2001  *   Pointer to the error structure.
2002  *
2003  * @return
2004  *   0 on success, a negative errno value otherwise and rte_errno is set.
2005  */
2006 static int
2007 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
2008                                 const struct rte_flow_action *action,
2009                                 struct mlx5_flow *dev_flow,
2010                                 const struct rte_flow_attr *attr,
2011                                 struct rte_flow_error *error)
2012 {
2013         const struct rte_flow_action_raw_encap *encap_data;
2014         struct mlx5_flow_dv_encap_decap_resource res;
2015
2016         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
2017         res.size = encap_data->size;
2018         memcpy(res.buf, encap_data->data, res.size);
2019         res.reformat_type = attr->egress ?
2020                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL :
2021                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
2022         if (attr->transfer)
2023                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
2024         else
2025                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
2026                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
2027         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
2028                 return rte_flow_error_set(error, EINVAL,
2029                                           RTE_FLOW_ERROR_TYPE_ACTION,
2030                                           NULL, "can't create encap action");
2031         return 0;
2032 }
2033
2034 /**
2035  * Create action push VLAN.
2036  *
2037  * @param[in] dev
2038  *   Pointer to rte_eth_dev structure.
2039  * @param[in] vlan_tag
2040  *   the vlan tag to push to the Ethernet header.
2041  * @param[in, out] dev_flow
2042  *   Pointer to the mlx5_flow.
2043  * @param[in] attr
2044  *   Pointer to the flow attributes.
2045  * @param[out] error
2046  *   Pointer to the error structure.
2047  *
2048  * @return
2049  *   0 on success, a negative errno value otherwise and rte_errno is set.
2050  */
2051 static int
2052 flow_dv_create_action_push_vlan(struct rte_eth_dev *dev,
2053                                 const struct rte_flow_attr *attr,
2054                                 const struct rte_vlan_hdr *vlan,
2055                                 struct mlx5_flow *dev_flow,
2056                                 struct rte_flow_error *error)
2057 {
2058         struct mlx5_flow_dv_push_vlan_action_resource res;
2059
2060         res.vlan_tag =
2061                 rte_cpu_to_be_32(((uint32_t)vlan->eth_proto) << 16 |
2062                                  vlan->vlan_tci);
2063         if (attr->transfer)
2064                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
2065         else
2066                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
2067                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
2068         return flow_dv_push_vlan_action_resource_register
2069                                             (dev, &res, dev_flow, error);
2070 }
2071
2072 /**
2073  * Validate the modify-header actions.
2074  *
2075  * @param[in] action_flags
2076  *   Holds the actions detected until now.
2077  * @param[in] action
2078  *   Pointer to the modify action.
2079  * @param[out] error
2080  *   Pointer to error structure.
2081  *
2082  * @return
2083  *   0 on success, a negative errno value otherwise and rte_errno is set.
2084  */
2085 static int
2086 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
2087                                    const struct rte_flow_action *action,
2088                                    struct rte_flow_error *error)
2089 {
2090         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
2091                 return rte_flow_error_set(error, EINVAL,
2092                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2093                                           NULL, "action configuration not set");
2094         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
2095                 return rte_flow_error_set(error, EINVAL,
2096                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2097                                           "can't have encap action before"
2098                                           " modify action");
2099         return 0;
2100 }
2101
2102 /**
2103  * Validate the modify-header MAC address actions.
2104  *
2105  * @param[in] action_flags
2106  *   Holds the actions detected until now.
2107  * @param[in] action
2108  *   Pointer to the modify action.
2109  * @param[in] item_flags
2110  *   Holds the items detected.
2111  * @param[out] error
2112  *   Pointer to error structure.
2113  *
2114  * @return
2115  *   0 on success, a negative errno value otherwise and rte_errno is set.
2116  */
2117 static int
2118 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
2119                                    const struct rte_flow_action *action,
2120                                    const uint64_t item_flags,
2121                                    struct rte_flow_error *error)
2122 {
2123         int ret = 0;
2124
2125         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2126         if (!ret) {
2127                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
2128                         return rte_flow_error_set(error, EINVAL,
2129                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2130                                                   NULL,
2131                                                   "no L2 item in pattern");
2132         }
2133         return ret;
2134 }
2135
2136 /**
2137  * Validate the modify-header IPv4 address actions.
2138  *
2139  * @param[in] action_flags
2140  *   Holds the actions detected until now.
2141  * @param[in] action
2142  *   Pointer to the modify action.
2143  * @param[in] item_flags
2144  *   Holds the items detected.
2145  * @param[out] error
2146  *   Pointer to error structure.
2147  *
2148  * @return
2149  *   0 on success, a negative errno value otherwise and rte_errno is set.
2150  */
2151 static int
2152 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
2153                                     const struct rte_flow_action *action,
2154                                     const uint64_t item_flags,
2155                                     struct rte_flow_error *error)
2156 {
2157         int ret = 0;
2158
2159         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2160         if (!ret) {
2161                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
2162                         return rte_flow_error_set(error, EINVAL,
2163                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2164                                                   NULL,
2165                                                   "no ipv4 item in pattern");
2166         }
2167         return ret;
2168 }
2169
2170 /**
2171  * Validate the modify-header IPv6 address actions.
2172  *
2173  * @param[in] action_flags
2174  *   Holds the actions detected until now.
2175  * @param[in] action
2176  *   Pointer to the modify action.
2177  * @param[in] item_flags
2178  *   Holds the items detected.
2179  * @param[out] error
2180  *   Pointer to error structure.
2181  *
2182  * @return
2183  *   0 on success, a negative errno value otherwise and rte_errno is set.
2184  */
2185 static int
2186 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
2187                                     const struct rte_flow_action *action,
2188                                     const uint64_t item_flags,
2189                                     struct rte_flow_error *error)
2190 {
2191         int ret = 0;
2192
2193         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2194         if (!ret) {
2195                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
2196                         return rte_flow_error_set(error, EINVAL,
2197                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2198                                                   NULL,
2199                                                   "no ipv6 item in pattern");
2200         }
2201         return ret;
2202 }
2203
2204 /**
2205  * Validate the modify-header TP actions.
2206  *
2207  * @param[in] action_flags
2208  *   Holds the actions detected until now.
2209  * @param[in] action
2210  *   Pointer to the modify action.
2211  * @param[in] item_flags
2212  *   Holds the items detected.
2213  * @param[out] error
2214  *   Pointer to error structure.
2215  *
2216  * @return
2217  *   0 on success, a negative errno value otherwise and rte_errno is set.
2218  */
2219 static int
2220 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
2221                                   const struct rte_flow_action *action,
2222                                   const uint64_t item_flags,
2223                                   struct rte_flow_error *error)
2224 {
2225         int ret = 0;
2226
2227         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2228         if (!ret) {
2229                 if (!(item_flags & MLX5_FLOW_LAYER_L4))
2230                         return rte_flow_error_set(error, EINVAL,
2231                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2232                                                   NULL, "no transport layer "
2233                                                   "in pattern");
2234         }
2235         return ret;
2236 }
2237
2238 /**
2239  * Validate the modify-header actions of increment/decrement
2240  * TCP Sequence-number.
2241  *
2242  * @param[in] action_flags
2243  *   Holds the actions detected until now.
2244  * @param[in] action
2245  *   Pointer to the modify action.
2246  * @param[in] item_flags
2247  *   Holds the items detected.
2248  * @param[out] error
2249  *   Pointer to error structure.
2250  *
2251  * @return
2252  *   0 on success, a negative errno value otherwise and rte_errno is set.
2253  */
2254 static int
2255 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
2256                                        const struct rte_flow_action *action,
2257                                        const uint64_t item_flags,
2258                                        struct rte_flow_error *error)
2259 {
2260         int ret = 0;
2261
2262         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2263         if (!ret) {
2264                 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
2265                         return rte_flow_error_set(error, EINVAL,
2266                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2267                                                   NULL, "no TCP item in"
2268                                                   " pattern");
2269                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
2270                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
2271                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
2272                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
2273                         return rte_flow_error_set(error, EINVAL,
2274                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2275                                                   NULL,
2276                                                   "cannot decrease and increase"
2277                                                   " TCP sequence number"
2278                                                   " at the same time");
2279         }
2280         return ret;
2281 }
2282
2283 /**
2284  * Validate the modify-header actions of increment/decrement
2285  * TCP Acknowledgment number.
2286  *
2287  * @param[in] action_flags
2288  *   Holds the actions detected until now.
2289  * @param[in] action
2290  *   Pointer to the modify action.
2291  * @param[in] item_flags
2292  *   Holds the items detected.
2293  * @param[out] error
2294  *   Pointer to error structure.
2295  *
2296  * @return
2297  *   0 on success, a negative errno value otherwise and rte_errno is set.
2298  */
2299 static int
2300 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
2301                                        const struct rte_flow_action *action,
2302                                        const uint64_t item_flags,
2303                                        struct rte_flow_error *error)
2304 {
2305         int ret = 0;
2306
2307         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2308         if (!ret) {
2309                 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
2310                         return rte_flow_error_set(error, EINVAL,
2311                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2312                                                   NULL, "no TCP item in"
2313                                                   " pattern");
2314                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
2315                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
2316                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
2317                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
2318                         return rte_flow_error_set(error, EINVAL,
2319                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2320                                                   NULL,
2321                                                   "cannot decrease and increase"
2322                                                   " TCP acknowledgment number"
2323                                                   " at the same time");
2324         }
2325         return ret;
2326 }
2327
2328 /**
2329  * Validate the modify-header TTL actions.
2330  *
2331  * @param[in] action_flags
2332  *   Holds the actions detected until now.
2333  * @param[in] action
2334  *   Pointer to the modify action.
2335  * @param[in] item_flags
2336  *   Holds the items detected.
2337  * @param[out] error
2338  *   Pointer to error structure.
2339  *
2340  * @return
2341  *   0 on success, a negative errno value otherwise and rte_errno is set.
2342  */
2343 static int
2344 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
2345                                    const struct rte_flow_action *action,
2346                                    const uint64_t item_flags,
2347                                    struct rte_flow_error *error)
2348 {
2349         int ret = 0;
2350
2351         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
2352         if (!ret) {
2353                 if (!(item_flags & MLX5_FLOW_LAYER_L3))
2354                         return rte_flow_error_set(error, EINVAL,
2355                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2356                                                   NULL,
2357                                                   "no IP protocol in pattern");
2358         }
2359         return ret;
2360 }
2361
2362 /**
2363  * Validate jump action.
2364  *
2365  * @param[in] action
2366  *   Pointer to the jump action.
2367  * @param[in] action_flags
2368  *   Holds the actions detected until now.
2369  * @param[in] attributes
2370  *   Pointer to flow attributes
2371  * @param[in] external
2372  *   Action belongs to flow rule created by request external to PMD.
2373  * @param[out] error
2374  *   Pointer to error structure.
2375  *
2376  * @return
2377  *   0 on success, a negative errno value otherwise and rte_errno is set.
2378  */
2379 static int
2380 flow_dv_validate_action_jump(const struct rte_flow_action *action,
2381                              uint64_t action_flags,
2382                              const struct rte_flow_attr *attributes,
2383                              bool external, struct rte_flow_error *error)
2384 {
2385         uint32_t max_group = attributes->transfer ? MLX5_MAX_TABLES_FDB :
2386                                                     MLX5_MAX_TABLES;
2387         uint32_t target_group, table;
2388         int ret = 0;
2389
2390         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
2391                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
2392                 return rte_flow_error_set(error, EINVAL,
2393                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2394                                           "can't have 2 fate actions in"
2395                                           " same flow");
2396         if (!action->conf)
2397                 return rte_flow_error_set(error, EINVAL,
2398                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2399                                           NULL, "action configuration not set");
2400         target_group =
2401                 ((const struct rte_flow_action_jump *)action->conf)->group;
2402         ret = mlx5_flow_group_to_table(attributes, external, target_group,
2403                                        &table, error);
2404         if (ret)
2405                 return ret;
2406         if (table >= max_group)
2407                 return rte_flow_error_set(error, EINVAL,
2408                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL,
2409                                           "target group index out of range");
2410         if (attributes->group >= target_group)
2411                 return rte_flow_error_set(error, EINVAL,
2412                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2413                                           "target group must be higher than"
2414                                           " the current flow group");
2415         return 0;
2416 }
2417
2418 /*
2419  * Validate the port_id action.
2420  *
2421  * @param[in] dev
2422  *   Pointer to rte_eth_dev structure.
2423  * @param[in] action_flags
2424  *   Bit-fields that holds the actions detected until now.
2425  * @param[in] action
2426  *   Port_id RTE action structure.
2427  * @param[in] attr
2428  *   Attributes of flow that includes this action.
2429  * @param[out] error
2430  *   Pointer to error structure.
2431  *
2432  * @return
2433  *   0 on success, a negative errno value otherwise and rte_errno is set.
2434  */
2435 static int
2436 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
2437                                 uint64_t action_flags,
2438                                 const struct rte_flow_action *action,
2439                                 const struct rte_flow_attr *attr,
2440                                 struct rte_flow_error *error)
2441 {
2442         const struct rte_flow_action_port_id *port_id;
2443         uint16_t port;
2444         uint16_t esw_domain_id;
2445         uint16_t act_port_domain_id;
2446         int ret;
2447
2448         if (!attr->transfer)
2449                 return rte_flow_error_set(error, ENOTSUP,
2450                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2451                                           NULL,
2452                                           "port id action is valid in transfer"
2453                                           " mode only");
2454         if (!action || !action->conf)
2455                 return rte_flow_error_set(error, ENOTSUP,
2456                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2457                                           NULL,
2458                                           "port id action parameters must be"
2459                                           " specified");
2460         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
2461                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
2462                 return rte_flow_error_set(error, EINVAL,
2463                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2464                                           "can have only one fate actions in"
2465                                           " a flow");
2466         ret = mlx5_port_to_eswitch_info(dev->data->port_id,
2467                                         &esw_domain_id, NULL);
2468         if (ret < 0)
2469                 return rte_flow_error_set(error, -ret,
2470                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2471                                           NULL,
2472                                           "failed to obtain E-Switch info");
2473         port_id = action->conf;
2474         port = port_id->original ? dev->data->port_id : port_id->id;
2475         ret = mlx5_port_to_eswitch_info(port, &act_port_domain_id, NULL);
2476         if (ret)
2477                 return rte_flow_error_set
2478                                 (error, -ret,
2479                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
2480                                  "failed to obtain E-Switch port id for port");
2481         if (act_port_domain_id != esw_domain_id)
2482                 return rte_flow_error_set
2483                                 (error, -ret,
2484                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2485                                  "port does not belong to"
2486                                  " E-Switch being configured");
2487         return 0;
2488 }
2489
2490 /**
2491  * Find existing modify-header resource or create and register a new one.
2492  *
2493  * @param dev[in, out]
2494  *   Pointer to rte_eth_dev structure.
2495  * @param[in, out] resource
2496  *   Pointer to modify-header resource.
2497  * @parm[in, out] dev_flow
2498  *   Pointer to the dev_flow.
2499  * @param[out] error
2500  *   pointer to error structure.
2501  *
2502  * @return
2503  *   0 on success otherwise -errno and errno is set.
2504  */
2505 static int
2506 flow_dv_modify_hdr_resource_register
2507                         (struct rte_eth_dev *dev,
2508                          struct mlx5_flow_dv_modify_hdr_resource *resource,
2509                          struct mlx5_flow *dev_flow,
2510                          struct rte_flow_error *error)
2511 {
2512         struct mlx5_priv *priv = dev->data->dev_private;
2513         struct mlx5_ibv_shared *sh = priv->sh;
2514         struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
2515         struct mlx5dv_dr_domain *ns;
2516
2517         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
2518                 ns = sh->fdb_domain;
2519         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
2520                 ns = sh->tx_domain;
2521         else
2522                 ns = sh->rx_domain;
2523         resource->flags =
2524                 dev_flow->flow->group ? 0 : MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
2525         /* Lookup a matching resource from cache. */
2526         LIST_FOREACH(cache_resource, &sh->modify_cmds, next) {
2527                 if (resource->ft_type == cache_resource->ft_type &&
2528                     resource->actions_num == cache_resource->actions_num &&
2529                     resource->flags == cache_resource->flags &&
2530                     !memcmp((const void *)resource->actions,
2531                             (const void *)cache_resource->actions,
2532                             (resource->actions_num *
2533                                             sizeof(resource->actions[0])))) {
2534                         DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
2535                                 (void *)cache_resource,
2536                                 rte_atomic32_read(&cache_resource->refcnt));
2537                         rte_atomic32_inc(&cache_resource->refcnt);
2538                         dev_flow->dv.modify_hdr = cache_resource;
2539                         return 0;
2540                 }
2541         }
2542         /* Register new modify-header resource. */
2543         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
2544         if (!cache_resource)
2545                 return rte_flow_error_set(error, ENOMEM,
2546                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2547                                           "cannot allocate resource memory");
2548         *cache_resource = *resource;
2549         cache_resource->verbs_action =
2550                 mlx5_glue->dv_create_flow_action_modify_header
2551                                         (sh->ctx, cache_resource->ft_type,
2552                                          ns, cache_resource->flags,
2553                                          cache_resource->actions_num *
2554                                          sizeof(cache_resource->actions[0]),
2555                                          (uint64_t *)cache_resource->actions);
2556         if (!cache_resource->verbs_action) {
2557                 rte_free(cache_resource);
2558                 return rte_flow_error_set(error, ENOMEM,
2559                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2560                                           NULL, "cannot create action");
2561         }
2562         rte_atomic32_init(&cache_resource->refcnt);
2563         rte_atomic32_inc(&cache_resource->refcnt);
2564         LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
2565         dev_flow->dv.modify_hdr = cache_resource;
2566         DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
2567                 (void *)cache_resource,
2568                 rte_atomic32_read(&cache_resource->refcnt));
2569         return 0;
2570 }
2571
2572 #define MLX5_CNT_CONTAINER_RESIZE 64
2573
2574 /**
2575  * Get or create a flow counter.
2576  *
2577  * @param[in] dev
2578  *   Pointer to the Ethernet device structure.
2579  * @param[in] shared
2580  *   Indicate if this counter is shared with other flows.
2581  * @param[in] id
2582  *   Counter identifier.
2583  *
2584  * @return
2585  *   pointer to flow counter on success, NULL otherwise and rte_errno is set.
2586  */
2587 static struct mlx5_flow_counter *
2588 flow_dv_counter_alloc_fallback(struct rte_eth_dev *dev, uint32_t shared,
2589                                uint32_t id)
2590 {
2591         struct mlx5_priv *priv = dev->data->dev_private;
2592         struct mlx5_flow_counter *cnt = NULL;
2593         struct mlx5_devx_obj *dcs = NULL;
2594
2595         if (!priv->config.devx) {
2596                 rte_errno = ENOTSUP;
2597                 return NULL;
2598         }
2599         if (shared) {
2600                 TAILQ_FOREACH(cnt, &priv->sh->cmng.flow_counters, next) {
2601                         if (cnt->shared && cnt->id == id) {
2602                                 cnt->ref_cnt++;
2603                                 return cnt;
2604                         }
2605                 }
2606         }
2607         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
2608         if (!dcs)
2609                 return NULL;
2610         cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
2611         if (!cnt) {
2612                 claim_zero(mlx5_devx_cmd_destroy(cnt->dcs));
2613                 rte_errno = ENOMEM;
2614                 return NULL;
2615         }
2616         struct mlx5_flow_counter tmpl = {
2617                 .shared = shared,
2618                 .ref_cnt = 1,
2619                 .id = id,
2620                 .dcs = dcs,
2621         };
2622         tmpl.action = mlx5_glue->dv_create_flow_action_counter(dcs->obj, 0);
2623         if (!tmpl.action) {
2624                 claim_zero(mlx5_devx_cmd_destroy(cnt->dcs));
2625                 rte_errno = errno;
2626                 rte_free(cnt);
2627                 return NULL;
2628         }
2629         *cnt = tmpl;
2630         TAILQ_INSERT_HEAD(&priv->sh->cmng.flow_counters, cnt, next);
2631         return cnt;
2632 }
2633
2634 /**
2635  * Release a flow counter.
2636  *
2637  * @param[in] dev
2638  *   Pointer to the Ethernet device structure.
2639  * @param[in] counter
2640  *   Pointer to the counter handler.
2641  */
2642 static void
2643 flow_dv_counter_release_fallback(struct rte_eth_dev *dev,
2644                                  struct mlx5_flow_counter *counter)
2645 {
2646         struct mlx5_priv *priv = dev->data->dev_private;
2647
2648         if (!counter)
2649                 return;
2650         if (--counter->ref_cnt == 0) {
2651                 TAILQ_REMOVE(&priv->sh->cmng.flow_counters, counter, next);
2652                 claim_zero(mlx5_devx_cmd_destroy(counter->dcs));
2653                 rte_free(counter);
2654         }
2655 }
2656
2657 /**
2658  * Query a devx flow counter.
2659  *
2660  * @param[in] dev
2661  *   Pointer to the Ethernet device structure.
2662  * @param[in] cnt
2663  *   Pointer to the flow counter.
2664  * @param[out] pkts
2665  *   The statistics value of packets.
2666  * @param[out] bytes
2667  *   The statistics value of bytes.
2668  *
2669  * @return
2670  *   0 on success, otherwise a negative errno value and rte_errno is set.
2671  */
2672 static inline int
2673 _flow_dv_query_count_fallback(struct rte_eth_dev *dev __rte_unused,
2674                      struct mlx5_flow_counter *cnt, uint64_t *pkts,
2675                      uint64_t *bytes)
2676 {
2677         return mlx5_devx_cmd_flow_counter_query(cnt->dcs, 0, 0, pkts, bytes,
2678                                                 0, NULL, NULL, 0);
2679 }
2680
2681 /**
2682  * Get a pool by a counter.
2683  *
2684  * @param[in] cnt
2685  *   Pointer to the counter.
2686  *
2687  * @return
2688  *   The counter pool.
2689  */
2690 static struct mlx5_flow_counter_pool *
2691 flow_dv_counter_pool_get(struct mlx5_flow_counter *cnt)
2692 {
2693         if (!cnt->batch) {
2694                 cnt -= cnt->dcs->id % MLX5_COUNTERS_PER_POOL;
2695                 return (struct mlx5_flow_counter_pool *)cnt - 1;
2696         }
2697         return cnt->pool;
2698 }
2699
2700 /**
2701  * Get a pool by devx counter ID.
2702  *
2703  * @param[in] cont
2704  *   Pointer to the counter container.
2705  * @param[in] id
2706  *   The counter devx ID.
2707  *
2708  * @return
2709  *   The counter pool pointer if exists, NULL otherwise,
2710  */
2711 static struct mlx5_flow_counter_pool *
2712 flow_dv_find_pool_by_id(struct mlx5_pools_container *cont, int id)
2713 {
2714         struct mlx5_flow_counter_pool *pool;
2715
2716         TAILQ_FOREACH(pool, &cont->pool_list, next) {
2717                 int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
2718                                 MLX5_COUNTERS_PER_POOL;
2719
2720                 if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
2721                         return pool;
2722         };
2723         return NULL;
2724 }
2725
2726 /**
2727  * Allocate a new memory for the counter values wrapped by all the needed
2728  * management.
2729  *
2730  * @param[in] dev
2731  *   Pointer to the Ethernet device structure.
2732  * @param[in] raws_n
2733  *   The raw memory areas - each one for MLX5_COUNTERS_PER_POOL counters.
2734  *
2735  * @return
2736  *   The new memory management pointer on success, otherwise NULL and rte_errno
2737  *   is set.
2738  */
2739 static struct mlx5_counter_stats_mem_mng *
2740 flow_dv_create_counter_stat_mem_mng(struct rte_eth_dev *dev, int raws_n)
2741 {
2742         struct mlx5_ibv_shared *sh = ((struct mlx5_priv *)
2743                                         (dev->data->dev_private))->sh;
2744         struct mlx5_devx_mkey_attr mkey_attr;
2745         struct mlx5_counter_stats_mem_mng *mem_mng;
2746         volatile struct flow_counter_stats *raw_data;
2747         int size = (sizeof(struct flow_counter_stats) *
2748                         MLX5_COUNTERS_PER_POOL +
2749                         sizeof(struct mlx5_counter_stats_raw)) * raws_n +
2750                         sizeof(struct mlx5_counter_stats_mem_mng);
2751         uint8_t *mem = rte_calloc(__func__, 1, size, sysconf(_SC_PAGESIZE));
2752         int i;
2753
2754         if (!mem) {
2755                 rte_errno = ENOMEM;
2756                 return NULL;
2757         }
2758         mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1;
2759         size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n;
2760         mem_mng->umem = mlx5_glue->devx_umem_reg(sh->ctx, mem, size,
2761                                                  IBV_ACCESS_LOCAL_WRITE);
2762         if (!mem_mng->umem) {
2763                 rte_errno = errno;
2764                 rte_free(mem);
2765                 return NULL;
2766         }
2767         mkey_attr.addr = (uintptr_t)mem;
2768         mkey_attr.size = size;
2769         mkey_attr.umem_id = mem_mng->umem->umem_id;
2770         mkey_attr.pd = sh->pdn;
2771         mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);
2772         if (!mem_mng->dm) {
2773                 mlx5_glue->devx_umem_dereg(mem_mng->umem);
2774                 rte_errno = errno;
2775                 rte_free(mem);
2776                 return NULL;
2777         }
2778         mem_mng->raws = (struct mlx5_counter_stats_raw *)(mem + size);
2779         raw_data = (volatile struct flow_counter_stats *)mem;
2780         for (i = 0; i < raws_n; ++i) {
2781                 mem_mng->raws[i].mem_mng = mem_mng;
2782                 mem_mng->raws[i].data = raw_data + i * MLX5_COUNTERS_PER_POOL;
2783         }
2784         LIST_INSERT_HEAD(&sh->cmng.mem_mngs, mem_mng, next);
2785         return mem_mng;
2786 }
2787
2788 /**
2789  * Resize a counter container.
2790  *
2791  * @param[in] dev
2792  *   Pointer to the Ethernet device structure.
2793  * @param[in] batch
2794  *   Whether the pool is for counter that was allocated by batch command.
2795  *
2796  * @return
2797  *   The new container pointer on success, otherwise NULL and rte_errno is set.
2798  */
2799 static struct mlx5_pools_container *
2800 flow_dv_container_resize(struct rte_eth_dev *dev, uint32_t batch)
2801 {
2802         struct mlx5_priv *priv = dev->data->dev_private;
2803         struct mlx5_pools_container *cont =
2804                         MLX5_CNT_CONTAINER(priv->sh, batch, 0);
2805         struct mlx5_pools_container *new_cont =
2806                         MLX5_CNT_CONTAINER_UNUSED(priv->sh, batch, 0);
2807         struct mlx5_counter_stats_mem_mng *mem_mng;
2808         uint32_t resize = cont->n + MLX5_CNT_CONTAINER_RESIZE;
2809         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
2810         int i;
2811
2812         if (cont != MLX5_CNT_CONTAINER(priv->sh, batch, 1)) {
2813                 /* The last resize still hasn't detected by the host thread. */
2814                 rte_errno = EAGAIN;
2815                 return NULL;
2816         }
2817         new_cont->pools = rte_calloc(__func__, 1, mem_size, 0);
2818         if (!new_cont->pools) {
2819                 rte_errno = ENOMEM;
2820                 return NULL;
2821         }
2822         if (cont->n)
2823                 memcpy(new_cont->pools, cont->pools, cont->n *
2824                        sizeof(struct mlx5_flow_counter_pool *));
2825         mem_mng = flow_dv_create_counter_stat_mem_mng(dev,
2826                 MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES);
2827         if (!mem_mng) {
2828                 rte_free(new_cont->pools);
2829                 return NULL;
2830         }
2831         for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i)
2832                 LIST_INSERT_HEAD(&priv->sh->cmng.free_stat_raws,
2833                                  mem_mng->raws + MLX5_CNT_CONTAINER_RESIZE +
2834                                  i, next);
2835         new_cont->n = resize;
2836         rte_atomic16_set(&new_cont->n_valid, rte_atomic16_read(&cont->n_valid));
2837         TAILQ_INIT(&new_cont->pool_list);
2838         TAILQ_CONCAT(&new_cont->pool_list, &cont->pool_list, next);
2839         new_cont->init_mem_mng = mem_mng;
2840         rte_cio_wmb();
2841          /* Flip the master container. */
2842         priv->sh->cmng.mhi[batch] ^= (uint8_t)1;
2843         return new_cont;
2844 }
2845
2846 /**
2847  * Query a devx flow counter.
2848  *
2849  * @param[in] dev
2850  *   Pointer to the Ethernet device structure.
2851  * @param[in] cnt
2852  *   Pointer to the flow counter.
2853  * @param[out] pkts
2854  *   The statistics value of packets.
2855  * @param[out] bytes
2856  *   The statistics value of bytes.
2857  *
2858  * @return
2859  *   0 on success, otherwise a negative errno value and rte_errno is set.
2860  */
2861 static inline int
2862 _flow_dv_query_count(struct rte_eth_dev *dev,
2863                      struct mlx5_flow_counter *cnt, uint64_t *pkts,
2864                      uint64_t *bytes)
2865 {
2866         struct mlx5_priv *priv = dev->data->dev_private;
2867         struct mlx5_flow_counter_pool *pool =
2868                         flow_dv_counter_pool_get(cnt);
2869         int offset = cnt - &pool->counters_raw[0];
2870
2871         if (priv->counter_fallback)
2872                 return _flow_dv_query_count_fallback(dev, cnt, pkts, bytes);
2873
2874         rte_spinlock_lock(&pool->sl);
2875         /*
2876          * The single counters allocation may allocate smaller ID than the
2877          * current allocated in parallel to the host reading.
2878          * In this case the new counter values must be reported as 0.
2879          */
2880         if (unlikely(!cnt->batch && cnt->dcs->id < pool->raw->min_dcs_id)) {
2881                 *pkts = 0;
2882                 *bytes = 0;
2883         } else {
2884                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
2885                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
2886         }
2887         rte_spinlock_unlock(&pool->sl);
2888         return 0;
2889 }
2890
2891 /**
2892  * Create and initialize a new counter pool.
2893  *
2894  * @param[in] dev
2895  *   Pointer to the Ethernet device structure.
2896  * @param[out] dcs
2897  *   The devX counter handle.
2898  * @param[in] batch
2899  *   Whether the pool is for counter that was allocated by batch command.
2900  *
2901  * @return
2902  *   A new pool pointer on success, NULL otherwise and rte_errno is set.
2903  */
2904 static struct mlx5_flow_counter_pool *
2905 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
2906                     uint32_t batch)
2907 {
2908         struct mlx5_priv *priv = dev->data->dev_private;
2909         struct mlx5_flow_counter_pool *pool;
2910         struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
2911                                                                0);
2912         int16_t n_valid = rte_atomic16_read(&cont->n_valid);
2913         uint32_t size;
2914
2915         if (cont->n == n_valid) {
2916                 cont = flow_dv_container_resize(dev, batch);
2917                 if (!cont)
2918                         return NULL;
2919         }
2920         size = sizeof(*pool) + MLX5_COUNTERS_PER_POOL *
2921                         sizeof(struct mlx5_flow_counter);
2922         pool = rte_calloc(__func__, 1, size, 0);
2923         if (!pool) {
2924                 rte_errno = ENOMEM;
2925                 return NULL;
2926         }
2927         pool->min_dcs = dcs;
2928         pool->raw = cont->init_mem_mng->raws + n_valid %
2929                                                      MLX5_CNT_CONTAINER_RESIZE;
2930         pool->raw_hw = NULL;
2931         rte_spinlock_init(&pool->sl);
2932         /*
2933          * The generation of the new allocated counters in this pool is 0, 2 in
2934          * the pool generation makes all the counters valid for allocation.
2935          */
2936         rte_atomic64_set(&pool->query_gen, 0x2);
2937         TAILQ_INIT(&pool->counters);
2938         TAILQ_INSERT_TAIL(&cont->pool_list, pool, next);
2939         cont->pools[n_valid] = pool;
2940         /* Pool initialization must be updated before host thread access. */
2941         rte_cio_wmb();
2942         rte_atomic16_add(&cont->n_valid, 1);
2943         return pool;
2944 }
2945
2946 /**
2947  * Prepare a new counter and/or a new counter pool.
2948  *
2949  * @param[in] dev
2950  *   Pointer to the Ethernet device structure.
2951  * @param[out] cnt_free
2952  *   Where to put the pointer of a new counter.
2953  * @param[in] batch
2954  *   Whether the pool is for counter that was allocated by batch command.
2955  *
2956  * @return
2957  *   The free counter pool pointer and @p cnt_free is set on success,
2958  *   NULL otherwise and rte_errno is set.
2959  */
2960 static struct mlx5_flow_counter_pool *
2961 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
2962                              struct mlx5_flow_counter **cnt_free,
2963                              uint32_t batch)
2964 {
2965         struct mlx5_priv *priv = dev->data->dev_private;
2966         struct mlx5_flow_counter_pool *pool;
2967         struct mlx5_devx_obj *dcs = NULL;
2968         struct mlx5_flow_counter *cnt;
2969         uint32_t i;
2970
2971         if (!batch) {
2972                 /* bulk_bitmap must be 0 for single counter allocation. */
2973                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
2974                 if (!dcs)
2975                         return NULL;
2976                 pool = flow_dv_find_pool_by_id
2977                         (MLX5_CNT_CONTAINER(priv->sh, batch, 0), dcs->id);
2978                 if (!pool) {
2979                         pool = flow_dv_pool_create(dev, dcs, batch);
2980                         if (!pool) {
2981                                 mlx5_devx_cmd_destroy(dcs);
2982                                 return NULL;
2983                         }
2984                 } else if (dcs->id < pool->min_dcs->id) {
2985                         rte_atomic64_set(&pool->a64_dcs,
2986                                          (int64_t)(uintptr_t)dcs);
2987                 }
2988                 cnt = &pool->counters_raw[dcs->id % MLX5_COUNTERS_PER_POOL];
2989                 TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
2990                 cnt->dcs = dcs;
2991                 *cnt_free = cnt;
2992                 return pool;
2993         }
2994         /* bulk_bitmap is in 128 counters units. */
2995         if (priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4)
2996                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
2997         if (!dcs) {
2998                 rte_errno = ENODATA;
2999                 return NULL;
3000         }
3001         pool = flow_dv_pool_create(dev, dcs, batch);
3002         if (!pool) {
3003                 mlx5_devx_cmd_destroy(dcs);
3004                 return NULL;
3005         }
3006         for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
3007                 cnt = &pool->counters_raw[i];
3008                 cnt->pool = pool;
3009                 TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
3010         }
3011         *cnt_free = &pool->counters_raw[0];
3012         return pool;
3013 }
3014
3015 /**
3016  * Search for existed shared counter.
3017  *
3018  * @param[in] cont
3019  *   Pointer to the relevant counter pool container.
3020  * @param[in] id
3021  *   The shared counter ID to search.
3022  *
3023  * @return
3024  *   NULL if not existed, otherwise pointer to the shared counter.
3025  */
3026 static struct mlx5_flow_counter *
3027 flow_dv_counter_shared_search(struct mlx5_pools_container *cont,
3028                               uint32_t id)
3029 {
3030         static struct mlx5_flow_counter *cnt;
3031         struct mlx5_flow_counter_pool *pool;
3032         int i;
3033
3034         TAILQ_FOREACH(pool, &cont->pool_list, next) {
3035                 for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
3036                         cnt = &pool->counters_raw[i];
3037                         if (cnt->ref_cnt && cnt->shared && cnt->id == id)
3038                                 return cnt;
3039                 }
3040         }
3041         return NULL;
3042 }
3043
3044 /**
3045  * Allocate a flow counter.
3046  *
3047  * @param[in] dev
3048  *   Pointer to the Ethernet device structure.
3049  * @param[in] shared
3050  *   Indicate if this counter is shared with other flows.
3051  * @param[in] id
3052  *   Counter identifier.
3053  * @param[in] group
3054  *   Counter flow group.
3055  *
3056  * @return
3057  *   pointer to flow counter on success, NULL otherwise and rte_errno is set.
3058  */
3059 static struct mlx5_flow_counter *
3060 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id,
3061                       uint16_t group)
3062 {
3063         struct mlx5_priv *priv = dev->data->dev_private;
3064         struct mlx5_flow_counter_pool *pool = NULL;
3065         struct mlx5_flow_counter *cnt_free = NULL;
3066         /*
3067          * Currently group 0 flow counter cannot be assigned to a flow if it is
3068          * not the first one in the batch counter allocation, so it is better
3069          * to allocate counters one by one for these flows in a separate
3070          * container.
3071          * A counter can be shared between different groups so need to take
3072          * shared counters from the single container.
3073          */
3074         uint32_t batch = (group && !shared) ? 1 : 0;
3075         struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
3076                                                                0);
3077
3078         if (priv->counter_fallback)
3079                 return flow_dv_counter_alloc_fallback(dev, shared, id);
3080         if (!priv->config.devx) {
3081                 rte_errno = ENOTSUP;
3082                 return NULL;
3083         }
3084         if (shared) {
3085                 cnt_free = flow_dv_counter_shared_search(cont, id);
3086                 if (cnt_free) {
3087                         if (cnt_free->ref_cnt + 1 == 0) {
3088                                 rte_errno = E2BIG;
3089                                 return NULL;
3090                         }
3091                         cnt_free->ref_cnt++;
3092                         return cnt_free;
3093                 }
3094         }
3095         /* Pools which has a free counters are in the start. */
3096         TAILQ_FOREACH(pool, &cont->pool_list, next) {
3097                 /*
3098                  * The free counter reset values must be updated between the
3099                  * counter release to the counter allocation, so, at least one
3100                  * query must be done in this time. ensure it by saving the
3101                  * query generation in the release time.
3102                  * The free list is sorted according to the generation - so if
3103                  * the first one is not updated, all the others are not
3104                  * updated too.
3105                  */
3106                 cnt_free = TAILQ_FIRST(&pool->counters);
3107                 if (cnt_free && cnt_free->query_gen + 1 <
3108                     rte_atomic64_read(&pool->query_gen))
3109                         break;
3110                 cnt_free = NULL;
3111         }
3112         if (!cnt_free) {
3113                 pool = flow_dv_counter_pool_prepare(dev, &cnt_free, batch);
3114                 if (!pool)
3115                         return NULL;
3116         }
3117         cnt_free->batch = batch;
3118         /* Create a DV counter action only in the first time usage. */
3119         if (!cnt_free->action) {
3120                 uint16_t offset;
3121                 struct mlx5_devx_obj *dcs;
3122
3123                 if (batch) {
3124                         offset = cnt_free - &pool->counters_raw[0];
3125                         dcs = pool->min_dcs;
3126                 } else {
3127                         offset = 0;
3128                         dcs = cnt_free->dcs;
3129                 }
3130                 cnt_free->action = mlx5_glue->dv_create_flow_action_counter
3131                                         (dcs->obj, offset);
3132                 if (!cnt_free->action) {
3133                         rte_errno = errno;
3134                         return NULL;
3135                 }
3136         }
3137         /* Update the counter reset values. */
3138         if (_flow_dv_query_count(dev, cnt_free, &cnt_free->hits,
3139                                  &cnt_free->bytes))
3140                 return NULL;
3141         cnt_free->shared = shared;
3142         cnt_free->ref_cnt = 1;
3143         cnt_free->id = id;
3144         if (!priv->sh->cmng.query_thread_on)
3145                 /* Start the asynchronous batch query by the host thread. */
3146                 mlx5_set_query_alarm(priv->sh);
3147         TAILQ_REMOVE(&pool->counters, cnt_free, next);
3148         if (TAILQ_EMPTY(&pool->counters)) {
3149                 /* Move the pool to the end of the container pool list. */
3150                 TAILQ_REMOVE(&cont->pool_list, pool, next);
3151                 TAILQ_INSERT_TAIL(&cont->pool_list, pool, next);
3152         }
3153         return cnt_free;
3154 }
3155
3156 /**
3157  * Release a flow counter.
3158  *
3159  * @param[in] dev
3160  *   Pointer to the Ethernet device structure.
3161  * @param[in] counter
3162  *   Pointer to the counter handler.
3163  */
3164 static void
3165 flow_dv_counter_release(struct rte_eth_dev *dev,
3166                         struct mlx5_flow_counter *counter)
3167 {
3168         struct mlx5_priv *priv = dev->data->dev_private;
3169
3170         if (!counter)
3171                 return;
3172         if (priv->counter_fallback) {
3173                 flow_dv_counter_release_fallback(dev, counter);
3174                 return;
3175         }
3176         if (--counter->ref_cnt == 0) {
3177                 struct mlx5_flow_counter_pool *pool =
3178                                 flow_dv_counter_pool_get(counter);
3179
3180                 /* Put the counter in the end - the last updated one. */
3181                 TAILQ_INSERT_TAIL(&pool->counters, counter, next);
3182                 counter->query_gen = rte_atomic64_read(&pool->query_gen);
3183         }
3184 }
3185
3186 /**
3187  * Verify the @p attributes will be correctly understood by the NIC and store
3188  * them in the @p flow if everything is correct.
3189  *
3190  * @param[in] dev
3191  *   Pointer to dev struct.
3192  * @param[in] attributes
3193  *   Pointer to flow attributes
3194  * @param[in] external
3195  *   This flow rule is created by request external to PMD.
3196  * @param[out] error
3197  *   Pointer to error structure.
3198  *
3199  * @return
3200  *   0 on success, a negative errno value otherwise and rte_errno is set.
3201  */
3202 static int
3203 flow_dv_validate_attributes(struct rte_eth_dev *dev,
3204                             const struct rte_flow_attr *attributes,
3205                             bool external __rte_unused,
3206                             struct rte_flow_error *error)
3207 {
3208         struct mlx5_priv *priv = dev->data->dev_private;
3209         uint32_t priority_max = priv->config.flow_prio - 1;
3210
3211 #ifndef HAVE_MLX5DV_DR
3212         if (attributes->group)
3213                 return rte_flow_error_set(error, ENOTSUP,
3214                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
3215                                           NULL,
3216                                           "groups are not supported");
3217 #else
3218         uint32_t max_group = attributes->transfer ? MLX5_MAX_TABLES_FDB :
3219                                                     MLX5_MAX_TABLES;
3220         uint32_t table;
3221         int ret;
3222
3223         ret = mlx5_flow_group_to_table(attributes, external,
3224                                        attributes->group,
3225                                        &table, error);
3226         if (ret)
3227                 return ret;
3228         if (table >= max_group)
3229                 return rte_flow_error_set(error, EINVAL,
3230                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP, NULL,
3231                                           "group index out of range");
3232 #endif
3233         if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
3234             attributes->priority >= priority_max)
3235                 return rte_flow_error_set(error, ENOTSUP,
3236                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
3237                                           NULL,
3238                                           "priority out of range");
3239         if (attributes->transfer) {
3240                 if (!priv->config.dv_esw_en)
3241                         return rte_flow_error_set
3242                                 (error, ENOTSUP,
3243                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3244                                  "E-Switch dr is not supported");
3245                 if (!(priv->representor || priv->master))
3246                         return rte_flow_error_set
3247                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3248                                  NULL, "E-Switch configuration can only be"
3249                                  " done by a master or a representor device");
3250                 if (attributes->egress)
3251                         return rte_flow_error_set
3252                                 (error, ENOTSUP,
3253                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
3254                                  "egress is not supported");
3255         }
3256         if (!(attributes->egress ^ attributes->ingress))
3257                 return rte_flow_error_set(error, ENOTSUP,
3258                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
3259                                           "must specify exactly one of "
3260                                           "ingress or egress");
3261         return 0;
3262 }
3263
3264 /**
3265  * Internal validation function. For validating both actions and items.
3266  *
3267  * @param[in] dev
3268  *   Pointer to the rte_eth_dev structure.
3269  * @param[in] attr
3270  *   Pointer to the flow attributes.
3271  * @param[in] items
3272  *   Pointer to the list of items.
3273  * @param[in] actions
3274  *   Pointer to the list of actions.
3275  * @param[in] external
3276  *   This flow rule is created by request external to PMD.
3277  * @param[out] error
3278  *   Pointer to the error structure.
3279  *
3280  * @return
3281  *   0 on success, a negative errno value otherwise and rte_errno is set.
3282  */
3283 static int
3284 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
3285                  const struct rte_flow_item items[],
3286                  const struct rte_flow_action actions[],
3287                  bool external, struct rte_flow_error *error)
3288 {
3289         int ret;
3290         uint64_t action_flags = 0;
3291         uint64_t item_flags = 0;
3292         uint64_t last_item = 0;
3293         uint8_t next_protocol = 0xff;
3294         int actions_n = 0;
3295         const struct rte_flow_item *gre_item = NULL;
3296         struct rte_flow_item_tcp nic_tcp_mask = {
3297                 .hdr = {
3298                         .tcp_flags = 0xFF,
3299                         .src_port = RTE_BE16(UINT16_MAX),
3300                         .dst_port = RTE_BE16(UINT16_MAX),
3301                 }
3302         };
3303
3304         if (items == NULL)
3305                 return -1;
3306         ret = flow_dv_validate_attributes(dev, attr, external, error);
3307         if (ret < 0)
3308                 return ret;
3309         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3310                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
3311                 switch (items->type) {
3312                 case RTE_FLOW_ITEM_TYPE_VOID:
3313                         break;
3314                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
3315                         ret = flow_dv_validate_item_port_id
3316                                         (dev, items, attr, item_flags, error);
3317                         if (ret < 0)
3318                                 return ret;
3319                         last_item = MLX5_FLOW_ITEM_PORT_ID;
3320                         break;
3321                 case RTE_FLOW_ITEM_TYPE_ETH:
3322                         ret = mlx5_flow_validate_item_eth(items, item_flags,
3323                                                           error);
3324                         if (ret < 0)
3325                                 return ret;
3326                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
3327                                              MLX5_FLOW_LAYER_OUTER_L2;
3328                         break;
3329                 case RTE_FLOW_ITEM_TYPE_VLAN:
3330                         ret = mlx5_flow_validate_item_vlan(items, item_flags,
3331                                                            dev, error);
3332                         if (ret < 0)
3333                                 return ret;
3334                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
3335                                              MLX5_FLOW_LAYER_OUTER_VLAN;
3336                         break;
3337                 case RTE_FLOW_ITEM_TYPE_IPV4:
3338                         mlx5_flow_tunnel_ip_check(items, next_protocol,
3339                                                   &item_flags, &tunnel);
3340                         ret = mlx5_flow_validate_item_ipv4(items, item_flags,
3341                                                            NULL, error);
3342                         if (ret < 0)
3343                                 return ret;
3344                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
3345                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3346                         if (items->mask != NULL &&
3347                             ((const struct rte_flow_item_ipv4 *)
3348                              items->mask)->hdr.next_proto_id) {
3349                                 next_protocol =
3350                                         ((const struct rte_flow_item_ipv4 *)
3351                                          (items->spec))->hdr.next_proto_id;
3352                                 next_protocol &=
3353                                         ((const struct rte_flow_item_ipv4 *)
3354                                          (items->mask))->hdr.next_proto_id;
3355                         } else {
3356                                 /* Reset for inner layer. */
3357                                 next_protocol = 0xff;
3358                         }
3359                         break;
3360                 case RTE_FLOW_ITEM_TYPE_IPV6:
3361                         mlx5_flow_tunnel_ip_check(items, next_protocol,
3362                                                   &item_flags, &tunnel);
3363                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
3364                                                            NULL, error);
3365                         if (ret < 0)
3366                                 return ret;
3367                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
3368                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3369                         if (items->mask != NULL &&
3370                             ((const struct rte_flow_item_ipv6 *)
3371                              items->mask)->hdr.proto) {
3372                                 next_protocol =
3373                                         ((const struct rte_flow_item_ipv6 *)
3374                                          items->spec)->hdr.proto;
3375                                 next_protocol &=
3376                                         ((const struct rte_flow_item_ipv6 *)
3377                                          items->mask)->hdr.proto;
3378                         } else {
3379                                 /* Reset for inner layer. */
3380                                 next_protocol = 0xff;
3381                         }
3382                         break;
3383                 case RTE_FLOW_ITEM_TYPE_TCP:
3384                         ret = mlx5_flow_validate_item_tcp
3385                                                 (items, item_flags,
3386                                                  next_protocol,
3387                                                  &nic_tcp_mask,
3388                                                  error);
3389                         if (ret < 0)
3390                                 return ret;
3391                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
3392                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
3393                         break;
3394                 case RTE_FLOW_ITEM_TYPE_UDP:
3395                         ret = mlx5_flow_validate_item_udp(items, item_flags,
3396                                                           next_protocol,
3397                                                           error);
3398                         if (ret < 0)
3399                                 return ret;
3400                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
3401                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
3402                         break;
3403                 case RTE_FLOW_ITEM_TYPE_GRE:
3404                         ret = mlx5_flow_validate_item_gre(items, item_flags,
3405                                                           next_protocol, error);
3406                         if (ret < 0)
3407                                 return ret;
3408                         gre_item = items;
3409                         last_item = MLX5_FLOW_LAYER_GRE;
3410                         break;
3411                 case RTE_FLOW_ITEM_TYPE_NVGRE:
3412                         ret = mlx5_flow_validate_item_nvgre(items, item_flags,
3413                                                             next_protocol,
3414                                                             error);
3415                         if (ret < 0)
3416                                 return ret;
3417                         last_item = MLX5_FLOW_LAYER_NVGRE;
3418                         break;
3419                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
3420                         ret = mlx5_flow_validate_item_gre_key
3421                                 (items, item_flags, gre_item, error);
3422                         if (ret < 0)
3423                                 return ret;
3424                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
3425                         break;
3426                 case RTE_FLOW_ITEM_TYPE_VXLAN:
3427                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
3428                                                             error);
3429                         if (ret < 0)
3430                                 return ret;
3431                         last_item = MLX5_FLOW_LAYER_VXLAN;
3432                         break;
3433                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3434                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
3435                                                                 item_flags, dev,
3436                                                                 error);
3437                         if (ret < 0)
3438                                 return ret;
3439                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
3440                         break;
3441                 case RTE_FLOW_ITEM_TYPE_MPLS:
3442                         ret = mlx5_flow_validate_item_mpls(dev, items,
3443                                                            item_flags,
3444                                                            last_item, error);
3445                         if (ret < 0)
3446                                 return ret;
3447                         last_item = MLX5_FLOW_LAYER_MPLS;
3448                         break;
3449                 case RTE_FLOW_ITEM_TYPE_META:
3450                         ret = flow_dv_validate_item_meta(dev, items, attr,
3451                                                          error);
3452                         if (ret < 0)
3453                                 return ret;
3454                         last_item = MLX5_FLOW_ITEM_METADATA;
3455                         break;
3456                 case RTE_FLOW_ITEM_TYPE_ICMP:
3457                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
3458                                                            next_protocol,
3459                                                            error);
3460                         if (ret < 0)
3461                                 return ret;
3462                         last_item = MLX5_FLOW_LAYER_ICMP;
3463                         break;
3464                 case RTE_FLOW_ITEM_TYPE_ICMP6:
3465                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
3466                                                             next_protocol,
3467                                                             error);
3468                         if (ret < 0)
3469                                 return ret;
3470                         last_item = MLX5_FLOW_LAYER_ICMP6;
3471                         break;
3472                 default:
3473                         return rte_flow_error_set(error, ENOTSUP,
3474                                                   RTE_FLOW_ERROR_TYPE_ITEM,
3475                                                   NULL, "item not supported");
3476                 }
3477                 item_flags |= last_item;
3478         }
3479         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3480                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
3481                         return rte_flow_error_set(error, ENOTSUP,
3482                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3483                                                   actions, "too many actions");
3484                 switch (actions->type) {
3485                 case RTE_FLOW_ACTION_TYPE_VOID:
3486                         break;
3487                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
3488                         ret = flow_dv_validate_action_port_id(dev,
3489                                                               action_flags,
3490                                                               actions,
3491                                                               attr,
3492                                                               error);
3493                         if (ret)
3494                                 return ret;
3495                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
3496                         ++actions_n;
3497                         break;
3498                 case RTE_FLOW_ACTION_TYPE_FLAG:
3499                         ret = mlx5_flow_validate_action_flag(action_flags,
3500                                                              attr, error);
3501                         if (ret < 0)
3502                                 return ret;
3503                         action_flags |= MLX5_FLOW_ACTION_FLAG;
3504                         ++actions_n;
3505                         break;
3506                 case RTE_FLOW_ACTION_TYPE_MARK:
3507                         ret = mlx5_flow_validate_action_mark(actions,
3508                                                              action_flags,
3509                                                              attr, error);
3510                         if (ret < 0)
3511                                 return ret;
3512                         action_flags |= MLX5_FLOW_ACTION_MARK;
3513                         ++actions_n;
3514                         break;
3515                 case RTE_FLOW_ACTION_TYPE_DROP:
3516                         ret = mlx5_flow_validate_action_drop(action_flags,
3517                                                              attr, error);
3518                         if (ret < 0)
3519                                 return ret;
3520                         action_flags |= MLX5_FLOW_ACTION_DROP;
3521                         ++actions_n;
3522                         break;
3523                 case RTE_FLOW_ACTION_TYPE_QUEUE:
3524                         ret = mlx5_flow_validate_action_queue(actions,
3525                                                               action_flags, dev,
3526                                                               attr, error);
3527                         if (ret < 0)
3528                                 return ret;
3529                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
3530                         ++actions_n;
3531                         break;
3532                 case RTE_FLOW_ACTION_TYPE_RSS:
3533                         ret = mlx5_flow_validate_action_rss(actions,
3534                                                             action_flags, dev,
3535                                                             attr, item_flags,
3536                                                             error);
3537                         if (ret < 0)
3538                                 return ret;
3539                         action_flags |= MLX5_FLOW_ACTION_RSS;
3540                         ++actions_n;
3541                         break;
3542                 case RTE_FLOW_ACTION_TYPE_COUNT:
3543                         ret = flow_dv_validate_action_count(dev, error);
3544                         if (ret < 0)
3545                                 return ret;
3546                         action_flags |= MLX5_FLOW_ACTION_COUNT;
3547                         ++actions_n;
3548                         break;
3549                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
3550                         if (flow_dv_validate_action_pop_vlan(dev,
3551                                                              action_flags,
3552                                                              actions,
3553                                                              item_flags, attr,
3554                                                              error))
3555                                 return -rte_errno;
3556                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
3557                         ++actions_n;
3558                         break;
3559                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
3560                         ret = flow_dv_validate_action_push_vlan(action_flags,
3561                                                                 actions, attr,
3562                                                                 error);
3563                         if (ret < 0)
3564                                 return ret;
3565                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
3566                         ++actions_n;
3567                         break;
3568                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
3569                         ret = flow_dv_validate_action_set_vlan_pcp
3570                                                 (action_flags, actions, error);
3571                         if (ret < 0)
3572                                 return ret;
3573                         /* Count PCP with push_vlan command. */
3574                         break;
3575                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
3576                         ret = flow_dv_validate_action_set_vlan_vid
3577                                                 (item_flags, actions, error);
3578                         if (ret < 0)
3579                                 return ret;
3580                         /* Count VID with push_vlan command. */
3581                         break;
3582                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3583                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3584                         ret = flow_dv_validate_action_l2_encap(action_flags,
3585                                                                actions, attr,
3586                                                                error);
3587                         if (ret < 0)
3588                                 return ret;
3589                         action_flags |= actions->type ==
3590                                         RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
3591                                         MLX5_FLOW_ACTION_VXLAN_ENCAP :
3592                                         MLX5_FLOW_ACTION_NVGRE_ENCAP;
3593                         ++actions_n;
3594                         break;
3595                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
3596                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
3597                         ret = flow_dv_validate_action_l2_decap(action_flags,
3598                                                                attr, error);
3599                         if (ret < 0)
3600                                 return ret;
3601                         action_flags |= actions->type ==
3602                                         RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
3603                                         MLX5_FLOW_ACTION_VXLAN_DECAP :
3604                                         MLX5_FLOW_ACTION_NVGRE_DECAP;
3605                         ++actions_n;
3606                         break;
3607                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3608                         ret = flow_dv_validate_action_raw_encap(action_flags,
3609                                                                 actions, attr,
3610                                                                 error);
3611                         if (ret < 0)
3612                                 return ret;
3613                         action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
3614                         ++actions_n;
3615                         break;
3616                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
3617                         ret = flow_dv_validate_action_raw_decap(action_flags,
3618                                                                 actions, attr,
3619                                                                 error);
3620                         if (ret < 0)
3621                                 return ret;
3622                         action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
3623                         ++actions_n;
3624                         break;
3625                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
3626                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
3627                         ret = flow_dv_validate_action_modify_mac(action_flags,
3628                                                                  actions,
3629                                                                  item_flags,
3630                                                                  error);
3631                         if (ret < 0)
3632                                 return ret;
3633                         /* Count all modify-header actions as one action. */
3634                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3635                                 ++actions_n;
3636                         action_flags |= actions->type ==
3637                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
3638                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
3639                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
3640                         break;
3641
3642                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
3643                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
3644                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
3645                                                                   actions,
3646                                                                   item_flags,
3647                                                                   error);
3648                         if (ret < 0)
3649                                 return ret;
3650                         /* Count all modify-header actions as one action. */
3651                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3652                                 ++actions_n;
3653                         action_flags |= actions->type ==
3654                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
3655                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
3656                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
3657                         break;
3658                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
3659                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
3660                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
3661                                                                   actions,
3662                                                                   item_flags,
3663                                                                   error);
3664                         if (ret < 0)
3665                                 return ret;
3666                         /* Count all modify-header actions as one action. */
3667                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3668                                 ++actions_n;
3669                         action_flags |= actions->type ==
3670                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
3671                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
3672                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
3673                         break;
3674                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
3675                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
3676                         ret = flow_dv_validate_action_modify_tp(action_flags,
3677                                                                 actions,
3678                                                                 item_flags,
3679                                                                 error);
3680                         if (ret < 0)
3681                                 return ret;
3682                         /* Count all modify-header actions as one action. */
3683                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3684                                 ++actions_n;
3685                         action_flags |= actions->type ==
3686                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
3687                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
3688                                                 MLX5_FLOW_ACTION_SET_TP_DST;
3689                         break;
3690                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
3691                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
3692                         ret = flow_dv_validate_action_modify_ttl(action_flags,
3693                                                                  actions,
3694                                                                  item_flags,
3695                                                                  error);
3696                         if (ret < 0)
3697                                 return ret;
3698                         /* Count all modify-header actions as one action. */
3699                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3700                                 ++actions_n;
3701                         action_flags |= actions->type ==
3702                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
3703                                                 MLX5_FLOW_ACTION_SET_TTL :
3704                                                 MLX5_FLOW_ACTION_DEC_TTL;
3705                         break;
3706                 case RTE_FLOW_ACTION_TYPE_JUMP:
3707                         ret = flow_dv_validate_action_jump(actions,
3708                                                            action_flags,
3709                                                            attr, external,
3710                                                            error);
3711                         if (ret)
3712                                 return ret;
3713                         ++actions_n;
3714                         action_flags |= MLX5_FLOW_ACTION_JUMP;
3715                         break;
3716                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
3717                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
3718                         ret = flow_dv_validate_action_modify_tcp_seq
3719                                                                 (action_flags,
3720                                                                  actions,
3721                                                                  item_flags,
3722                                                                  error);
3723                         if (ret < 0)
3724                                 return ret;
3725                         /* Count all modify-header actions as one action. */
3726                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3727                                 ++actions_n;
3728                         action_flags |= actions->type ==
3729                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
3730                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
3731                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
3732                         break;
3733                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
3734                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
3735                         ret = flow_dv_validate_action_modify_tcp_ack
3736                                                                 (action_flags,
3737                                                                  actions,
3738                                                                  item_flags,
3739                                                                  error);
3740                         if (ret < 0)
3741                                 return ret;
3742                         /* Count all modify-header actions as one action. */
3743                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3744                                 ++actions_n;
3745                         action_flags |= actions->type ==
3746                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
3747                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
3748                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
3749                         break;
3750                 default:
3751                         return rte_flow_error_set(error, ENOTSUP,
3752                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3753                                                   actions,
3754                                                   "action not supported");
3755                 }
3756         }
3757         if ((action_flags & MLX5_FLOW_LAYER_TUNNEL) &&
3758             (action_flags & MLX5_FLOW_VLAN_ACTIONS))
3759                 return rte_flow_error_set(error, ENOTSUP,
3760                                           RTE_FLOW_ERROR_TYPE_ACTION,
3761                                           actions,
3762                                           "can't have vxlan and vlan"
3763                                           " actions in the same rule");
3764         /* Eswitch has few restrictions on using items and actions */
3765         if (attr->transfer) {
3766                 if (action_flags & MLX5_FLOW_ACTION_FLAG)
3767                         return rte_flow_error_set(error, ENOTSUP,
3768                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3769                                                   NULL,
3770                                                   "unsupported action FLAG");
3771                 if (action_flags & MLX5_FLOW_ACTION_MARK)
3772                         return rte_flow_error_set(error, ENOTSUP,
3773                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3774                                                   NULL,
3775                                                   "unsupported action MARK");
3776                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
3777                         return rte_flow_error_set(error, ENOTSUP,
3778                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3779                                                   NULL,
3780                                                   "unsupported action QUEUE");
3781                 if (action_flags & MLX5_FLOW_ACTION_RSS)
3782                         return rte_flow_error_set(error, ENOTSUP,
3783                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3784                                                   NULL,
3785                                                   "unsupported action RSS");
3786                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
3787                         return rte_flow_error_set(error, EINVAL,
3788                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3789                                                   actions,
3790                                                   "no fate action is found");
3791         } else {
3792                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
3793                         return rte_flow_error_set(error, EINVAL,
3794                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3795                                                   actions,
3796                                                   "no fate action is found");
3797         }
3798         return 0;
3799 }
3800
3801 /**
3802  * Internal preparation function. Allocates the DV flow size,
3803  * this size is constant.
3804  *
3805  * @param[in] attr
3806  *   Pointer to the flow attributes.
3807  * @param[in] items
3808  *   Pointer to the list of items.
3809  * @param[in] actions
3810  *   Pointer to the list of actions.
3811  * @param[out] error
3812  *   Pointer to the error structure.
3813  *
3814  * @return
3815  *   Pointer to mlx5_flow object on success,
3816  *   otherwise NULL and rte_errno is set.
3817  */
3818 static struct mlx5_flow *
3819 flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
3820                 const struct rte_flow_item items[] __rte_unused,
3821                 const struct rte_flow_action actions[] __rte_unused,
3822                 struct rte_flow_error *error)
3823 {
3824         uint32_t size = sizeof(struct mlx5_flow);
3825         struct mlx5_flow *flow;
3826
3827         flow = rte_calloc(__func__, 1, size, 0);
3828         if (!flow) {
3829                 rte_flow_error_set(error, ENOMEM,
3830                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3831                                    "not enough memory to create flow");
3832                 return NULL;
3833         }
3834         flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
3835         return flow;
3836 }
3837
3838 #ifndef NDEBUG
3839 /**
3840  * Sanity check for match mask and value. Similar to check_valid_spec() in
3841  * kernel driver. If unmasked bit is present in value, it returns failure.
3842  *
3843  * @param match_mask
3844  *   pointer to match mask buffer.
3845  * @param match_value
3846  *   pointer to match value buffer.
3847  *
3848  * @return
3849  *   0 if valid, -EINVAL otherwise.
3850  */
3851 static int
3852 flow_dv_check_valid_spec(void *match_mask, void *match_value)
3853 {
3854         uint8_t *m = match_mask;
3855         uint8_t *v = match_value;
3856         unsigned int i;
3857
3858         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
3859                 if (v[i] & ~m[i]) {
3860                         DRV_LOG(ERR,
3861                                 "match_value differs from match_criteria"
3862                                 " %p[%u] != %p[%u]",
3863                                 match_value, i, match_mask, i);
3864                         return -EINVAL;
3865                 }
3866         }
3867         return 0;
3868 }
3869 #endif
3870
3871 /**
3872  * Add Ethernet item to matcher and to the value.
3873  *
3874  * @param[in, out] matcher
3875  *   Flow matcher.
3876  * @param[in, out] key
3877  *   Flow matcher value.
3878  * @param[in] item
3879  *   Flow pattern to translate.
3880  * @param[in] inner
3881  *   Item is inner pattern.
3882  */
3883 static void
3884 flow_dv_translate_item_eth(void *matcher, void *key,
3885                            const struct rte_flow_item *item, int inner)
3886 {
3887         const struct rte_flow_item_eth *eth_m = item->mask;
3888         const struct rte_flow_item_eth *eth_v = item->spec;
3889         const struct rte_flow_item_eth nic_mask = {
3890                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
3891                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
3892                 .type = RTE_BE16(0xffff),
3893         };
3894         void *headers_m;
3895         void *headers_v;
3896         char *l24_v;
3897         unsigned int i;
3898
3899         if (!eth_v)
3900                 return;
3901         if (!eth_m)
3902                 eth_m = &nic_mask;
3903         if (inner) {
3904                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3905                                          inner_headers);
3906                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3907         } else {
3908                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3909                                          outer_headers);
3910                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3911         }
3912         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
3913                &eth_m->dst, sizeof(eth_m->dst));
3914         /* The value must be in the range of the mask. */
3915         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
3916         for (i = 0; i < sizeof(eth_m->dst); ++i)
3917                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
3918         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
3919                &eth_m->src, sizeof(eth_m->src));
3920         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
3921         /* The value must be in the range of the mask. */
3922         for (i = 0; i < sizeof(eth_m->dst); ++i)
3923                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
3924         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
3925                  rte_be_to_cpu_16(eth_m->type));
3926         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
3927         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
3928 }
3929
3930 /**
3931  * Add VLAN item to matcher and to the value.
3932  *
3933  * @param[in, out] dev_flow
3934  *   Flow descriptor.
3935  * @param[in, out] matcher
3936  *   Flow matcher.
3937  * @param[in, out] key
3938  *   Flow matcher value.
3939  * @param[in] item
3940  *   Flow pattern to translate.
3941  * @param[in] inner
3942  *   Item is inner pattern.
3943  */
3944 static void
3945 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
3946                             void *matcher, void *key,
3947                             const struct rte_flow_item *item,
3948                             int inner)
3949 {
3950         const struct rte_flow_item_vlan *vlan_m = item->mask;
3951         const struct rte_flow_item_vlan *vlan_v = item->spec;
3952         void *headers_m;
3953         void *headers_v;
3954         uint16_t tci_m;
3955         uint16_t tci_v;
3956
3957         if (!vlan_v)
3958                 return;
3959         if (!vlan_m)
3960                 vlan_m = &rte_flow_item_vlan_mask;
3961         if (inner) {
3962                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3963                                          inner_headers);
3964                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3965         } else {
3966                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3967                                          outer_headers);
3968                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3969                 /*
3970                  * This is workaround, masks are not supported,
3971                  * and pre-validated.
3972                  */
3973                 dev_flow->dv.vf_vlan.tag =
3974                         rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
3975         }
3976         tci_m = rte_be_to_cpu_16(vlan_m->tci);
3977         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
3978         MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
3979         MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
3980         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
3981         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
3982         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
3983         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
3984         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
3985         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
3986         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
3987                  rte_be_to_cpu_16(vlan_m->inner_type));
3988         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
3989                  rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
3990 }
3991
3992 /**
3993  * Add IPV4 item to matcher and to the value.
3994  *
3995  * @param[in, out] matcher
3996  *   Flow matcher.
3997  * @param[in, out] key
3998  *   Flow matcher value.
3999  * @param[in] item
4000  *   Flow pattern to translate.
4001  * @param[in] inner
4002  *   Item is inner pattern.
4003  * @param[in] group
4004  *   The group to insert the rule.
4005  */
4006 static void
4007 flow_dv_translate_item_ipv4(void *matcher, void *key,
4008                             const struct rte_flow_item *item,
4009                             int inner, uint32_t group)
4010 {
4011         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
4012         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
4013         const struct rte_flow_item_ipv4 nic_mask = {
4014                 .hdr = {
4015                         .src_addr = RTE_BE32(0xffffffff),
4016                         .dst_addr = RTE_BE32(0xffffffff),
4017                         .type_of_service = 0xff,
4018                         .next_proto_id = 0xff,
4019                 },
4020         };
4021         void *headers_m;
4022         void *headers_v;
4023         char *l24_m;
4024         char *l24_v;
4025         uint8_t tos;
4026
4027         if (inner) {
4028                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4029                                          inner_headers);
4030                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4031         } else {
4032                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4033                                          outer_headers);
4034                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4035         }
4036         if (group == 0)
4037                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
4038         else
4039                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x4);
4040         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
4041         if (!ipv4_v)
4042                 return;
4043         if (!ipv4_m)
4044                 ipv4_m = &nic_mask;
4045         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
4046                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
4047         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
4048                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
4049         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
4050         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
4051         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
4052                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
4053         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
4054                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
4055         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
4056         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
4057         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
4058         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
4059                  ipv4_m->hdr.type_of_service);
4060         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
4061         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
4062                  ipv4_m->hdr.type_of_service >> 2);
4063         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
4064         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
4065                  ipv4_m->hdr.next_proto_id);
4066         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
4067                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
4068 }
4069
4070 /**
4071  * Add IPV6 item to matcher and to the value.
4072  *
4073  * @param[in, out] matcher
4074  *   Flow matcher.
4075  * @param[in, out] key
4076  *   Flow matcher value.
4077  * @param[in] item
4078  *   Flow pattern to translate.
4079  * @param[in] inner
4080  *   Item is inner pattern.
4081  * @param[in] group
4082  *   The group to insert the rule.
4083  */
4084 static void
4085 flow_dv_translate_item_ipv6(void *matcher, void *key,
4086                             const struct rte_flow_item *item,
4087                             int inner, uint32_t group)
4088 {
4089         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
4090         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
4091         const struct rte_flow_item_ipv6 nic_mask = {
4092                 .hdr = {
4093                         .src_addr =
4094                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
4095                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
4096                         .dst_addr =
4097                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
4098                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
4099                         .vtc_flow = RTE_BE32(0xffffffff),
4100                         .proto = 0xff,
4101                         .hop_limits = 0xff,
4102                 },
4103         };
4104         void *headers_m;
4105         void *headers_v;
4106         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4107         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4108         char *l24_m;
4109         char *l24_v;
4110         uint32_t vtc_m;
4111         uint32_t vtc_v;
4112         int i;
4113         int size;
4114
4115         if (inner) {
4116                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4117                                          inner_headers);
4118                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4119         } else {
4120                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4121                                          outer_headers);
4122                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4123         }
4124         if (group == 0)
4125                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
4126         else
4127                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x6);
4128         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
4129         if (!ipv6_v)
4130                 return;
4131         if (!ipv6_m)
4132                 ipv6_m = &nic_mask;
4133         size = sizeof(ipv6_m->hdr.dst_addr);
4134         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
4135                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
4136         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
4137                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
4138         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
4139         for (i = 0; i < size; ++i)
4140                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
4141         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
4142                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
4143         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
4144                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
4145         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
4146         for (i = 0; i < size; ++i)
4147                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
4148         /* TOS. */
4149         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
4150         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
4151         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
4152         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
4153         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
4154         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
4155         /* Label. */
4156         if (inner) {
4157                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
4158                          vtc_m);
4159                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
4160                          vtc_v);
4161         } else {
4162                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
4163                          vtc_m);
4164                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
4165                          vtc_v);
4166         }
4167         /* Protocol. */
4168         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
4169                  ipv6_m->hdr.proto);
4170         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
4171                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
4172 }
4173
4174 /**
4175  * Add TCP item to matcher and to the value.
4176  *
4177  * @param[in, out] matcher
4178  *   Flow matcher.
4179  * @param[in, out] key
4180  *   Flow matcher value.
4181  * @param[in] item
4182  *   Flow pattern to translate.
4183  * @param[in] inner
4184  *   Item is inner pattern.
4185  */
4186 static void
4187 flow_dv_translate_item_tcp(void *matcher, void *key,
4188                            const struct rte_flow_item *item,
4189                            int inner)
4190 {
4191         const struct rte_flow_item_tcp *tcp_m = item->mask;
4192         const struct rte_flow_item_tcp *tcp_v = item->spec;
4193         void *headers_m;
4194         void *headers_v;
4195
4196         if (inner) {
4197                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4198                                          inner_headers);
4199                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4200         } else {
4201                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4202                                          outer_headers);
4203                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4204         }
4205         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
4206         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
4207         if (!tcp_v)
4208                 return;
4209         if (!tcp_m)
4210                 tcp_m = &rte_flow_item_tcp_mask;
4211         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
4212                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
4213         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
4214                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
4215         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
4216                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
4217         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
4218                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
4219         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
4220                  tcp_m->hdr.tcp_flags);
4221         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
4222                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
4223 }
4224
4225 /**
4226  * Add UDP item to matcher and to the value.
4227  *
4228  * @param[in, out] matcher
4229  *   Flow matcher.
4230  * @param[in, out] key
4231  *   Flow matcher value.
4232  * @param[in] item
4233  *   Flow pattern to translate.
4234  * @param[in] inner
4235  *   Item is inner pattern.
4236  */
4237 static void
4238 flow_dv_translate_item_udp(void *matcher, void *key,
4239                            const struct rte_flow_item *item,
4240                            int inner)
4241 {
4242         const struct rte_flow_item_udp *udp_m = item->mask;
4243         const struct rte_flow_item_udp *udp_v = item->spec;
4244         void *headers_m;
4245         void *headers_v;
4246
4247         if (inner) {
4248                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4249                                          inner_headers);
4250                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4251         } else {
4252                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4253                                          outer_headers);
4254                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4255         }
4256         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
4257         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
4258         if (!udp_v)
4259                 return;
4260         if (!udp_m)
4261                 udp_m = &rte_flow_item_udp_mask;
4262         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
4263                  rte_be_to_cpu_16(udp_m->hdr.src_port));
4264         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
4265                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
4266         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
4267                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
4268         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
4269                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
4270 }
4271
4272 /**
4273  * Add GRE optional Key item to matcher and to the value.
4274  *
4275  * @param[in, out] matcher
4276  *   Flow matcher.
4277  * @param[in, out] key
4278  *   Flow matcher value.
4279  * @param[in] item
4280  *   Flow pattern to translate.
4281  * @param[in] inner
4282  *   Item is inner pattern.
4283  */
4284 static void
4285 flow_dv_translate_item_gre_key(void *matcher, void *key,
4286                                    const struct rte_flow_item *item)
4287 {
4288         const rte_be32_t *key_m = item->mask;
4289         const rte_be32_t *key_v = item->spec;
4290         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4291         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4292         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
4293
4294         if (!key_v)
4295                 return;
4296         if (!key_m)
4297                 key_m = &gre_key_default_mask;
4298         /* GRE K bit must be on and should already be validated */
4299         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
4300         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
4301         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
4302                  rte_be_to_cpu_32(*key_m) >> 8);
4303         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
4304                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
4305         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
4306                  rte_be_to_cpu_32(*key_m) & 0xFF);
4307         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
4308                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
4309 }
4310
4311 /**
4312  * Add GRE item to matcher and to the value.
4313  *
4314  * @param[in, out] matcher
4315  *   Flow matcher.
4316  * @param[in, out] key
4317  *   Flow matcher value.
4318  * @param[in] item
4319  *   Flow pattern to translate.
4320  * @param[in] inner
4321  *   Item is inner pattern.
4322  */
4323 static void
4324 flow_dv_translate_item_gre(void *matcher, void *key,
4325                            const struct rte_flow_item *item,
4326                            int inner)
4327 {
4328         const struct rte_flow_item_gre *gre_m = item->mask;
4329         const struct rte_flow_item_gre *gre_v = item->spec;
4330         void *headers_m;
4331         void *headers_v;
4332         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4333         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4334         struct {
4335                 union {
4336                         __extension__
4337                         struct {
4338                                 uint16_t version:3;
4339                                 uint16_t rsvd0:9;
4340                                 uint16_t s_present:1;
4341                                 uint16_t k_present:1;
4342                                 uint16_t rsvd_bit1:1;
4343                                 uint16_t c_present:1;
4344                         };
4345                         uint16_t value;
4346                 };
4347         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
4348
4349         if (inner) {
4350                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4351                                          inner_headers);
4352                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4353         } else {
4354                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4355                                          outer_headers);
4356                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4357         }
4358         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
4359         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
4360         if (!gre_v)
4361                 return;
4362         if (!gre_m)
4363                 gre_m = &rte_flow_item_gre_mask;
4364         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
4365                  rte_be_to_cpu_16(gre_m->protocol));
4366         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
4367                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
4368         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
4369         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
4370         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
4371                  gre_crks_rsvd0_ver_m.c_present);
4372         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
4373                  gre_crks_rsvd0_ver_v.c_present &
4374                  gre_crks_rsvd0_ver_m.c_present);
4375         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
4376                  gre_crks_rsvd0_ver_m.k_present);
4377         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
4378                  gre_crks_rsvd0_ver_v.k_present &
4379                  gre_crks_rsvd0_ver_m.k_present);
4380         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
4381                  gre_crks_rsvd0_ver_m.s_present);
4382         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
4383                  gre_crks_rsvd0_ver_v.s_present &
4384                  gre_crks_rsvd0_ver_m.s_present);
4385 }
4386
4387 /**
4388  * Add NVGRE item to matcher and to the value.
4389  *
4390  * @param[in, out] matcher
4391  *   Flow matcher.
4392  * @param[in, out] key
4393  *   Flow matcher value.
4394  * @param[in] item
4395  *   Flow pattern to translate.
4396  * @param[in] inner
4397  *   Item is inner pattern.
4398  */
4399 static void
4400 flow_dv_translate_item_nvgre(void *matcher, void *key,
4401                              const struct rte_flow_item *item,
4402                              int inner)
4403 {
4404         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
4405         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
4406         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4407         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4408         const char *tni_flow_id_m = (const char *)nvgre_m->tni;
4409         const char *tni_flow_id_v = (const char *)nvgre_v->tni;
4410         char *gre_key_m;
4411         char *gre_key_v;
4412         int size;
4413         int i;
4414
4415         /* For NVGRE, GRE header fields must be set with defined values. */
4416         const struct rte_flow_item_gre gre_spec = {
4417                 .c_rsvd0_ver = RTE_BE16(0x2000),
4418                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
4419         };
4420         const struct rte_flow_item_gre gre_mask = {
4421                 .c_rsvd0_ver = RTE_BE16(0xB000),
4422                 .protocol = RTE_BE16(UINT16_MAX),
4423         };
4424         const struct rte_flow_item gre_item = {
4425                 .spec = &gre_spec,
4426                 .mask = &gre_mask,
4427                 .last = NULL,
4428         };
4429         flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
4430         if (!nvgre_v)
4431                 return;
4432         if (!nvgre_m)
4433                 nvgre_m = &rte_flow_item_nvgre_mask;
4434         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
4435         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
4436         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
4437         memcpy(gre_key_m, tni_flow_id_m, size);
4438         for (i = 0; i < size; ++i)
4439                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
4440 }
4441
4442 /**
4443  * Add VXLAN item to matcher and to the value.
4444  *
4445  * @param[in, out] matcher
4446  *   Flow matcher.
4447  * @param[in, out] key
4448  *   Flow matcher value.
4449  * @param[in] item
4450  *   Flow pattern to translate.
4451  * @param[in] inner
4452  *   Item is inner pattern.
4453  */
4454 static void
4455 flow_dv_translate_item_vxlan(void *matcher, void *key,
4456                              const struct rte_flow_item *item,
4457                              int inner)
4458 {
4459         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
4460         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
4461         void *headers_m;
4462         void *headers_v;
4463         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4464         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4465         char *vni_m;
4466         char *vni_v;
4467         uint16_t dport;
4468         int size;
4469         int i;
4470
4471         if (inner) {
4472                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4473                                          inner_headers);
4474                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4475         } else {
4476                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4477                                          outer_headers);
4478                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4479         }
4480         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
4481                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
4482         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
4483                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
4484                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
4485         }
4486         if (!vxlan_v)
4487                 return;
4488         if (!vxlan_m)
4489                 vxlan_m = &rte_flow_item_vxlan_mask;
4490         size = sizeof(vxlan_m->vni);
4491         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
4492         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
4493         memcpy(vni_m, vxlan_m->vni, size);
4494         for (i = 0; i < size; ++i)
4495                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
4496 }
4497
4498 /**
4499  * Add MPLS item to matcher and to the value.
4500  *
4501  * @param[in, out] matcher
4502  *   Flow matcher.
4503  * @param[in, out] key
4504  *   Flow matcher value.
4505  * @param[in] item
4506  *   Flow pattern to translate.
4507  * @param[in] prev_layer
4508  *   The protocol layer indicated in previous item.
4509  * @param[in] inner
4510  *   Item is inner pattern.
4511  */
4512 static void
4513 flow_dv_translate_item_mpls(void *matcher, void *key,
4514                             const struct rte_flow_item *item,
4515                             uint64_t prev_layer,
4516                             int inner)
4517 {
4518         const uint32_t *in_mpls_m = item->mask;
4519         const uint32_t *in_mpls_v = item->spec;
4520         uint32_t *out_mpls_m = 0;
4521         uint32_t *out_mpls_v = 0;
4522         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4523         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4524         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
4525                                      misc_parameters_2);
4526         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
4527         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
4528         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4529
4530         switch (prev_layer) {
4531         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
4532                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
4533                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
4534                          MLX5_UDP_PORT_MPLS);
4535                 break;
4536         case MLX5_FLOW_LAYER_GRE:
4537                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
4538                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
4539                          RTE_ETHER_TYPE_MPLS);
4540                 break;
4541         default:
4542                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
4543                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
4544                          IPPROTO_MPLS);
4545                 break;
4546         }
4547         if (!in_mpls_v)
4548                 return;
4549         if (!in_mpls_m)
4550                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
4551         switch (prev_layer) {
4552         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
4553                 out_mpls_m =
4554                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
4555                                                  outer_first_mpls_over_udp);
4556                 out_mpls_v =
4557                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
4558                                                  outer_first_mpls_over_udp);
4559                 break;
4560         case MLX5_FLOW_LAYER_GRE:
4561                 out_mpls_m =
4562                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
4563                                                  outer_first_mpls_over_gre);
4564                 out_mpls_v =
4565                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
4566                                                  outer_first_mpls_over_gre);
4567                 break;
4568         default:
4569                 /* Inner MPLS not over GRE is not supported. */
4570                 if (!inner) {
4571                         out_mpls_m =
4572                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
4573                                                          misc2_m,
4574                                                          outer_first_mpls);
4575                         out_mpls_v =
4576                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
4577                                                          misc2_v,
4578                                                          outer_first_mpls);
4579                 }
4580                 break;
4581         }
4582         if (out_mpls_m && out_mpls_v) {
4583                 *out_mpls_m = *in_mpls_m;
4584                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
4585         }
4586 }
4587
4588 /**
4589  * Add META item to matcher
4590  *
4591  * @param[in, out] matcher
4592  *   Flow matcher.
4593  * @param[in, out] key
4594  *   Flow matcher value.
4595  * @param[in] item
4596  *   Flow pattern to translate.
4597  * @param[in] inner
4598  *   Item is inner pattern.
4599  */
4600 static void
4601 flow_dv_translate_item_meta(void *matcher, void *key,
4602                             const struct rte_flow_item *item)
4603 {
4604         const struct rte_flow_item_meta *meta_m;
4605         const struct rte_flow_item_meta *meta_v;
4606         void *misc2_m =
4607                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
4608         void *misc2_v =
4609                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
4610
4611         meta_m = (const void *)item->mask;
4612         if (!meta_m)
4613                 meta_m = &rte_flow_item_meta_mask;
4614         meta_v = (const void *)item->spec;
4615         if (meta_v) {
4616                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a,
4617                          rte_be_to_cpu_32(meta_m->data));
4618                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a,
4619                          rte_be_to_cpu_32(meta_v->data & meta_m->data));
4620         }
4621 }
4622
4623 /**
4624  * Add source vport match to the specified matcher.
4625  *
4626  * @param[in, out] matcher
4627  *   Flow matcher.
4628  * @param[in, out] key
4629  *   Flow matcher value.
4630  * @param[in] port
4631  *   Source vport value to match
4632  * @param[in] mask
4633  *   Mask
4634  */
4635 static void
4636 flow_dv_translate_item_source_vport(void *matcher, void *key,
4637                                     int16_t port, uint16_t mask)
4638 {
4639         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4640         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4641
4642         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
4643         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
4644 }
4645
4646 /**
4647  * Translate port-id item to eswitch match on  port-id.
4648  *
4649  * @param[in] dev
4650  *   The devich to configure through.
4651  * @param[in, out] matcher
4652  *   Flow matcher.
4653  * @param[in, out] key
4654  *   Flow matcher value.
4655  * @param[in] item
4656  *   Flow pattern to translate.
4657  *
4658  * @return
4659  *   0 on success, a negative errno value otherwise.
4660  */
4661 static int
4662 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
4663                                void *key, const struct rte_flow_item *item)
4664 {
4665         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
4666         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
4667         uint16_t mask, val, id;
4668         int ret;
4669
4670         mask = pid_m ? pid_m->id : 0xffff;
4671         id = pid_v ? pid_v->id : dev->data->port_id;
4672         ret = mlx5_port_to_eswitch_info(id, NULL, &val);
4673         if (ret)
4674                 return ret;
4675         flow_dv_translate_item_source_vport(matcher, key, val, mask);
4676         return 0;
4677 }
4678
4679 /**
4680  * Add ICMP6 item to matcher and to the value.
4681  *
4682  * @param[in, out] matcher
4683  *   Flow matcher.
4684  * @param[in, out] key
4685  *   Flow matcher value.
4686  * @param[in] item
4687  *   Flow pattern to translate.
4688  * @param[in] inner
4689  *   Item is inner pattern.
4690  */
4691 static void
4692 flow_dv_translate_item_icmp6(void *matcher, void *key,
4693                               const struct rte_flow_item *item,
4694                               int inner)
4695 {
4696         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
4697         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
4698         void *headers_m;
4699         void *headers_v;
4700         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
4701                                      misc_parameters_3);
4702         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
4703         if (inner) {
4704                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4705                                          inner_headers);
4706                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4707         } else {
4708                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4709                                          outer_headers);
4710                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4711         }
4712         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
4713         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
4714         if (!icmp6_v)
4715                 return;
4716         if (!icmp6_m)
4717                 icmp6_m = &rte_flow_item_icmp6_mask;
4718         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
4719         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
4720                  icmp6_v->type & icmp6_m->type);
4721         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
4722         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
4723                  icmp6_v->code & icmp6_m->code);
4724 }
4725
4726 /**
4727  * Add ICMP item to matcher and to the value.
4728  *
4729  * @param[in, out] matcher
4730  *   Flow matcher.
4731  * @param[in, out] key
4732  *   Flow matcher value.
4733  * @param[in] item
4734  *   Flow pattern to translate.
4735  * @param[in] inner
4736  *   Item is inner pattern.
4737  */
4738 static void
4739 flow_dv_translate_item_icmp(void *matcher, void *key,
4740                             const struct rte_flow_item *item,
4741                             int inner)
4742 {
4743         const struct rte_flow_item_icmp *icmp_m = item->mask;
4744         const struct rte_flow_item_icmp *icmp_v = item->spec;
4745         void *headers_m;
4746         void *headers_v;
4747         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
4748                                      misc_parameters_3);
4749         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
4750         if (inner) {
4751                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4752                                          inner_headers);
4753                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4754         } else {
4755                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4756                                          outer_headers);
4757                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4758         }
4759         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
4760         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
4761         if (!icmp_v)
4762                 return;
4763         if (!icmp_m)
4764                 icmp_m = &rte_flow_item_icmp_mask;
4765         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
4766                  icmp_m->hdr.icmp_type);
4767         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
4768                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
4769         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
4770                  icmp_m->hdr.icmp_code);
4771         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
4772                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
4773 }
4774
4775 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
4776
4777 #define HEADER_IS_ZERO(match_criteria, headers)                              \
4778         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
4779                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
4780
4781 /**
4782  * Calculate flow matcher enable bitmap.
4783  *
4784  * @param match_criteria
4785  *   Pointer to flow matcher criteria.
4786  *
4787  * @return
4788  *   Bitmap of enabled fields.
4789  */
4790 static uint8_t
4791 flow_dv_matcher_enable(uint32_t *match_criteria)
4792 {
4793         uint8_t match_criteria_enable;
4794
4795         match_criteria_enable =
4796                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
4797                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
4798         match_criteria_enable |=
4799                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
4800                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
4801         match_criteria_enable |=
4802                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
4803                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
4804         match_criteria_enable |=
4805                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
4806                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
4807         match_criteria_enable |=
4808                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
4809                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
4810         return match_criteria_enable;
4811 }
4812
4813
4814 /**
4815  * Get a flow table.
4816  *
4817  * @param dev[in, out]
4818  *   Pointer to rte_eth_dev structure.
4819  * @param[in] table_id
4820  *   Table id to use.
4821  * @param[in] egress
4822  *   Direction of the table.
4823  * @param[in] transfer
4824  *   E-Switch or NIC flow.
4825  * @param[out] error
4826  *   pointer to error structure.
4827  *
4828  * @return
4829  *   Returns tables resource based on the index, NULL in case of failed.
4830  */
4831 static struct mlx5_flow_tbl_resource *
4832 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
4833                          uint32_t table_id, uint8_t egress,
4834                          uint8_t transfer,
4835                          struct rte_flow_error *error)
4836 {
4837         struct mlx5_priv *priv = dev->data->dev_private;
4838         struct mlx5_ibv_shared *sh = priv->sh;
4839         struct mlx5_flow_tbl_resource *tbl;
4840
4841 #ifdef HAVE_MLX5DV_DR
4842         if (transfer) {
4843                 tbl = &sh->fdb_tbl[table_id];
4844                 if (!tbl->obj)
4845                         tbl->obj = mlx5_glue->dr_create_flow_tbl
4846                                 (sh->fdb_domain, table_id);
4847         } else if (egress) {
4848                 tbl = &sh->tx_tbl[table_id];
4849                 if (!tbl->obj)
4850                         tbl->obj = mlx5_glue->dr_create_flow_tbl
4851                                 (sh->tx_domain, table_id);
4852         } else {
4853                 tbl = &sh->rx_tbl[table_id];
4854                 if (!tbl->obj)
4855                         tbl->obj = mlx5_glue->dr_create_flow_tbl
4856                                 (sh->rx_domain, table_id);
4857         }
4858         if (!tbl->obj) {
4859                 rte_flow_error_set(error, ENOMEM,
4860                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4861                                    NULL, "cannot create table");
4862                 return NULL;
4863         }
4864         rte_atomic32_inc(&tbl->refcnt);
4865         return tbl;
4866 #else
4867         (void)error;
4868         (void)tbl;
4869         if (transfer)
4870                 return &sh->fdb_tbl[table_id];
4871         else if (egress)
4872                 return &sh->tx_tbl[table_id];
4873         else
4874                 return &sh->rx_tbl[table_id];
4875 #endif
4876 }
4877
4878 /**
4879  * Release a flow table.
4880  *
4881  * @param[in] tbl
4882  *   Table resource to be released.
4883  *
4884  * @return
4885  *   Returns 0 if table was released, else return 1;
4886  */
4887 static int
4888 flow_dv_tbl_resource_release(struct mlx5_flow_tbl_resource *tbl)
4889 {
4890         if (!tbl)
4891                 return 0;
4892         if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
4893                 mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
4894                 tbl->obj = NULL;
4895                 return 0;
4896         }
4897         return 1;
4898 }
4899
4900 /**
4901  * Register the flow matcher.
4902  *
4903  * @param dev[in, out]
4904  *   Pointer to rte_eth_dev structure.
4905  * @param[in, out] matcher
4906  *   Pointer to flow matcher.
4907  * @parm[in, out] dev_flow
4908  *   Pointer to the dev_flow.
4909  * @param[out] error
4910  *   pointer to error structure.
4911  *
4912  * @return
4913  *   0 on success otherwise -errno and errno is set.
4914  */
4915 static int
4916 flow_dv_matcher_register(struct rte_eth_dev *dev,
4917                          struct mlx5_flow_dv_matcher *matcher,
4918                          struct mlx5_flow *dev_flow,
4919                          struct rte_flow_error *error)
4920 {
4921         struct mlx5_priv *priv = dev->data->dev_private;
4922         struct mlx5_ibv_shared *sh = priv->sh;
4923         struct mlx5_flow_dv_matcher *cache_matcher;
4924         struct mlx5dv_flow_matcher_attr dv_attr = {
4925                 .type = IBV_FLOW_ATTR_NORMAL,
4926                 .match_mask = (void *)&matcher->mask,
4927         };
4928         struct mlx5_flow_tbl_resource *tbl = NULL;
4929
4930         /* Lookup from cache. */
4931         LIST_FOREACH(cache_matcher, &sh->matchers, next) {
4932                 if (matcher->crc == cache_matcher->crc &&
4933                     matcher->priority == cache_matcher->priority &&
4934                     matcher->egress == cache_matcher->egress &&
4935                     matcher->group == cache_matcher->group &&
4936                     matcher->transfer == cache_matcher->transfer &&
4937                     !memcmp((const void *)matcher->mask.buf,
4938                             (const void *)cache_matcher->mask.buf,
4939                             cache_matcher->mask.size)) {
4940                         DRV_LOG(DEBUG,
4941                                 "priority %hd use %s matcher %p: refcnt %d++",
4942                                 cache_matcher->priority,
4943                                 cache_matcher->egress ? "tx" : "rx",
4944                                 (void *)cache_matcher,
4945                                 rte_atomic32_read(&cache_matcher->refcnt));
4946                         rte_atomic32_inc(&cache_matcher->refcnt);
4947                         dev_flow->dv.matcher = cache_matcher;
4948                         return 0;
4949                 }
4950         }
4951         /* Register new matcher. */
4952         cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
4953         if (!cache_matcher)
4954                 return rte_flow_error_set(error, ENOMEM,
4955                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4956                                           "cannot allocate matcher memory");
4957         tbl = flow_dv_tbl_resource_get(dev, matcher->group,
4958                                        matcher->egress, matcher->transfer,
4959                                        error);
4960         if (!tbl) {
4961                 rte_free(cache_matcher);
4962                 return rte_flow_error_set(error, ENOMEM,
4963                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4964                                           NULL, "cannot create table");
4965         }
4966         *cache_matcher = *matcher;
4967         dv_attr.match_criteria_enable =
4968                 flow_dv_matcher_enable(cache_matcher->mask.buf);
4969         dv_attr.priority = matcher->priority;
4970         if (matcher->egress)
4971                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
4972         cache_matcher->matcher_object =
4973                 mlx5_glue->dv_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj);
4974         if (!cache_matcher->matcher_object) {
4975                 rte_free(cache_matcher);
4976 #ifdef HAVE_MLX5DV_DR
4977                 flow_dv_tbl_resource_release(tbl);
4978 #endif
4979                 return rte_flow_error_set(error, ENOMEM,
4980                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4981                                           NULL, "cannot create matcher");
4982         }
4983         rte_atomic32_inc(&cache_matcher->refcnt);
4984         LIST_INSERT_HEAD(&sh->matchers, cache_matcher, next);
4985         dev_flow->dv.matcher = cache_matcher;
4986         DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
4987                 cache_matcher->priority,
4988                 cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
4989                 rte_atomic32_read(&cache_matcher->refcnt));
4990         rte_atomic32_inc(&tbl->refcnt);
4991         return 0;
4992 }
4993
4994 /**
4995  * Find existing tag resource or create and register a new one.
4996  *
4997  * @param dev[in, out]
4998  *   Pointer to rte_eth_dev structure.
4999  * @param[in, out] resource
5000  *   Pointer to tag resource.
5001  * @parm[in, out] dev_flow
5002  *   Pointer to the dev_flow.
5003  * @param[out] error
5004  *   pointer to error structure.
5005  *
5006  * @return
5007  *   0 on success otherwise -errno and errno is set.
5008  */
5009 static int
5010 flow_dv_tag_resource_register
5011                         (struct rte_eth_dev *dev,
5012                          struct mlx5_flow_dv_tag_resource *resource,
5013                          struct mlx5_flow *dev_flow,
5014                          struct rte_flow_error *error)
5015 {
5016         struct mlx5_priv *priv = dev->data->dev_private;
5017         struct mlx5_ibv_shared *sh = priv->sh;
5018         struct mlx5_flow_dv_tag_resource *cache_resource;
5019
5020         /* Lookup a matching resource from cache. */
5021         LIST_FOREACH(cache_resource, &sh->tags, next) {
5022                 if (resource->tag == cache_resource->tag) {
5023                         DRV_LOG(DEBUG, "tag resource %p: refcnt %d++",
5024                                 (void *)cache_resource,
5025                                 rte_atomic32_read(&cache_resource->refcnt));
5026                         rte_atomic32_inc(&cache_resource->refcnt);
5027                         dev_flow->flow->tag_resource = cache_resource;
5028                         return 0;
5029                 }
5030         }
5031         /* Register new  resource. */
5032         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
5033         if (!cache_resource)
5034                 return rte_flow_error_set(error, ENOMEM,
5035                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5036                                           "cannot allocate resource memory");
5037         *cache_resource = *resource;
5038         cache_resource->action = mlx5_glue->dv_create_flow_action_tag
5039                 (resource->tag);
5040         if (!cache_resource->action) {
5041                 rte_free(cache_resource);
5042                 return rte_flow_error_set(error, ENOMEM,
5043                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5044                                           NULL, "cannot create action");
5045         }
5046         rte_atomic32_init(&cache_resource->refcnt);
5047         rte_atomic32_inc(&cache_resource->refcnt);
5048         LIST_INSERT_HEAD(&sh->tags, cache_resource, next);
5049         dev_flow->flow->tag_resource = cache_resource;
5050         DRV_LOG(DEBUG, "new tag resource %p: refcnt %d++",
5051                 (void *)cache_resource,
5052                 rte_atomic32_read(&cache_resource->refcnt));
5053         return 0;
5054 }
5055
5056 /**
5057  * Release the tag.
5058  *
5059  * @param dev
5060  *   Pointer to Ethernet device.
5061  * @param flow
5062  *   Pointer to mlx5_flow.
5063  *
5064  * @return
5065  *   1 while a reference on it exists, 0 when freed.
5066  */
5067 static int
5068 flow_dv_tag_release(struct rte_eth_dev *dev,
5069                     struct mlx5_flow_dv_tag_resource *tag)
5070 {
5071         assert(tag);
5072         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
5073                 dev->data->port_id, (void *)tag,
5074                 rte_atomic32_read(&tag->refcnt));
5075         if (rte_atomic32_dec_and_test(&tag->refcnt)) {
5076                 claim_zero(mlx5_glue->destroy_flow_action(tag->action));
5077                 LIST_REMOVE(tag, next);
5078                 DRV_LOG(DEBUG, "port %u tag %p: removed",
5079                         dev->data->port_id, (void *)tag);
5080                 rte_free(tag);
5081                 return 0;
5082         }
5083         return 1;
5084 }
5085
5086 /**
5087  * Translate port ID action to vport.
5088  *
5089  * @param[in] dev
5090  *   Pointer to rte_eth_dev structure.
5091  * @param[in] action
5092  *   Pointer to the port ID action.
5093  * @param[out] dst_port_id
5094  *   The target port ID.
5095  * @param[out] error
5096  *   Pointer to the error structure.
5097  *
5098  * @return
5099  *   0 on success, a negative errno value otherwise and rte_errno is set.
5100  */
5101 static int
5102 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
5103                                  const struct rte_flow_action *action,
5104                                  uint32_t *dst_port_id,
5105                                  struct rte_flow_error *error)
5106 {
5107         uint32_t port;
5108         uint16_t port_id;
5109         int ret;
5110         const struct rte_flow_action_port_id *conf =
5111                         (const struct rte_flow_action_port_id *)action->conf;
5112
5113         port = conf->original ? dev->data->port_id : conf->id;
5114         ret = mlx5_port_to_eswitch_info(port, NULL, &port_id);
5115         if (ret)
5116                 return rte_flow_error_set(error, -ret,
5117                                           RTE_FLOW_ERROR_TYPE_ACTION,
5118                                           NULL,
5119                                           "No eswitch info was found for port");
5120         *dst_port_id = port_id;
5121         return 0;
5122 }
5123
5124 /**
5125  * Fill the flow with DV spec.
5126  *
5127  * @param[in] dev
5128  *   Pointer to rte_eth_dev structure.
5129  * @param[in, out] dev_flow
5130  *   Pointer to the sub flow.
5131  * @param[in] attr
5132  *   Pointer to the flow attributes.
5133  * @param[in] items
5134  *   Pointer to the list of items.
5135  * @param[in] actions
5136  *   Pointer to the list of actions.
5137  * @param[out] error
5138  *   Pointer to the error structure.
5139  *
5140  * @return
5141  *   0 on success, a negative errno value otherwise and rte_errno is set.
5142  */
5143 static int
5144 flow_dv_translate(struct rte_eth_dev *dev,
5145                   struct mlx5_flow *dev_flow,
5146                   const struct rte_flow_attr *attr,
5147                   const struct rte_flow_item items[],
5148                   const struct rte_flow_action actions[],
5149                   struct rte_flow_error *error)
5150 {
5151         struct mlx5_priv *priv = dev->data->dev_private;
5152         struct rte_flow *flow = dev_flow->flow;
5153         uint64_t item_flags = 0;
5154         uint64_t last_item = 0;
5155         uint64_t action_flags = 0;
5156         uint64_t priority = attr->priority;
5157         struct mlx5_flow_dv_matcher matcher = {
5158                 .mask = {
5159                         .size = sizeof(matcher.mask.buf),
5160                 },
5161         };
5162         int actions_n = 0;
5163         bool actions_end = false;
5164         struct mlx5_flow_dv_modify_hdr_resource res = {
5165                 .ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
5166                                           MLX5DV_FLOW_TABLE_TYPE_NIC_RX
5167         };
5168         union flow_dv_attr flow_attr = { .attr = 0 };
5169         struct mlx5_flow_dv_tag_resource tag_resource;
5170         uint32_t modify_action_position = UINT32_MAX;
5171         void *match_mask = matcher.mask.buf;
5172         void *match_value = dev_flow->dv.value.buf;
5173         uint8_t next_protocol = 0xff;
5174         struct rte_vlan_hdr vlan = { 0 };
5175         bool vlan_inherited = false;
5176         uint16_t vlan_tci;
5177         uint32_t table;
5178         int ret = 0;
5179
5180         ret = mlx5_flow_group_to_table(attr, dev_flow->external, attr->group,
5181                                        &table, error);
5182         if (ret)
5183                 return ret;
5184         flow->group = table;
5185         if (attr->transfer)
5186                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
5187         if (priority == MLX5_FLOW_PRIO_RSVD)
5188                 priority = priv->config.flow_prio - 1;
5189         for (; !actions_end ; actions++) {
5190                 const struct rte_flow_action_queue *queue;
5191                 const struct rte_flow_action_rss *rss;
5192                 const struct rte_flow_action *action = actions;
5193                 const struct rte_flow_action_count *count = action->conf;
5194                 const uint8_t *rss_key;
5195                 const struct rte_flow_action_jump *jump_data;
5196                 struct mlx5_flow_dv_jump_tbl_resource jump_tbl_resource;
5197                 struct mlx5_flow_tbl_resource *tbl;
5198                 uint32_t port_id = 0;
5199                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
5200
5201                 switch (actions->type) {
5202                 case RTE_FLOW_ACTION_TYPE_VOID:
5203                         break;
5204                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
5205                         if (flow_dv_translate_action_port_id(dev, action,
5206                                                              &port_id, error))
5207                                 return -rte_errno;
5208                         port_id_resource.port_id = port_id;
5209                         if (flow_dv_port_id_action_resource_register
5210                             (dev, &port_id_resource, dev_flow, error))
5211                                 return -rte_errno;
5212                         dev_flow->dv.actions[actions_n++] =
5213                                 dev_flow->dv.port_id_action->action;
5214                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
5215                         break;
5216                 case RTE_FLOW_ACTION_TYPE_FLAG:
5217                         tag_resource.tag =
5218                                 mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
5219                         if (!flow->tag_resource)
5220                                 if (flow_dv_tag_resource_register
5221                                     (dev, &tag_resource, dev_flow, error))
5222                                         return errno;
5223                         dev_flow->dv.actions[actions_n++] =
5224                                 flow->tag_resource->action;
5225                         action_flags |= MLX5_FLOW_ACTION_FLAG;
5226                         break;
5227                 case RTE_FLOW_ACTION_TYPE_MARK:
5228                         tag_resource.tag = mlx5_flow_mark_set
5229                               (((const struct rte_flow_action_mark *)
5230                                (actions->conf))->id);
5231                         if (!flow->tag_resource)
5232                                 if (flow_dv_tag_resource_register
5233                                     (dev, &tag_resource, dev_flow, error))
5234                                         return errno;
5235                         dev_flow->dv.actions[actions_n++] =
5236                                 flow->tag_resource->action;
5237                         action_flags |= MLX5_FLOW_ACTION_MARK;
5238                         break;
5239                 case RTE_FLOW_ACTION_TYPE_DROP:
5240                         action_flags |= MLX5_FLOW_ACTION_DROP;
5241                         break;
5242                 case RTE_FLOW_ACTION_TYPE_QUEUE:
5243                         queue = actions->conf;
5244                         flow->rss.queue_num = 1;
5245                         (*flow->queue)[0] = queue->index;
5246                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
5247                         break;
5248                 case RTE_FLOW_ACTION_TYPE_RSS:
5249                         rss = actions->conf;
5250                         if (flow->queue)
5251                                 memcpy((*flow->queue), rss->queue,
5252                                        rss->queue_num * sizeof(uint16_t));
5253                         flow->rss.queue_num = rss->queue_num;
5254                         /* NULL RSS key indicates default RSS key. */
5255                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
5256                         memcpy(flow->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
5257                         /* RSS type 0 indicates default RSS type ETH_RSS_IP. */
5258                         flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
5259                         flow->rss.level = rss->level;
5260                         action_flags |= MLX5_FLOW_ACTION_RSS;
5261                         break;
5262                 case RTE_FLOW_ACTION_TYPE_COUNT:
5263                         if (!priv->config.devx) {
5264                                 rte_errno = ENOTSUP;
5265                                 goto cnt_err;
5266                         }
5267                         flow->counter = flow_dv_counter_alloc(dev,
5268                                                               count->shared,
5269                                                               count->id,
5270                                                               flow->group);
5271                         if (flow->counter == NULL)
5272                                 goto cnt_err;
5273                         dev_flow->dv.actions[actions_n++] =
5274                                 flow->counter->action;
5275                         action_flags |= MLX5_FLOW_ACTION_COUNT;
5276                         break;
5277 cnt_err:
5278                         if (rte_errno == ENOTSUP)
5279                                 return rte_flow_error_set
5280                                               (error, ENOTSUP,
5281                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5282                                                NULL,
5283                                                "count action not supported");
5284                         else
5285                                 return rte_flow_error_set
5286                                                 (error, rte_errno,
5287                                                  RTE_FLOW_ERROR_TYPE_ACTION,
5288                                                  action,
5289                                                  "cannot create counter"
5290                                                   " object.");
5291                         break;
5292                 case RTE_FLOW_ACTION_TYPE_OF_POP_VLAN:
5293                         dev_flow->dv.actions[actions_n++] =
5294                                                 priv->sh->pop_vlan_action;
5295                         action_flags |= MLX5_FLOW_ACTION_OF_POP_VLAN;
5296                         break;
5297                 case RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN:
5298                         if (!vlan_inherited) {
5299                                 flow_dev_get_vlan_info_from_items(items, &vlan);
5300                                 vlan_inherited = true;
5301                         }
5302                         vlan.eth_proto = rte_be_to_cpu_16
5303                              ((((const struct rte_flow_action_of_push_vlan *)
5304                                                    actions->conf)->ethertype));
5305                         if (flow_dv_create_action_push_vlan
5306                                             (dev, attr, &vlan, dev_flow, error))
5307                                 return -rte_errno;
5308                         dev_flow->dv.actions[actions_n++] =
5309                                            dev_flow->dv.push_vlan_res->action;
5310                         action_flags |= MLX5_FLOW_ACTION_OF_PUSH_VLAN;
5311                         /* Push VLAN command is also handling this VLAN_VID */
5312                         action_flags &= ~MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
5313                         break;
5314                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_PCP:
5315                         if (!vlan_inherited) {
5316                                 flow_dev_get_vlan_info_from_items(items, &vlan);
5317                                 vlan_inherited = true;
5318                         }
5319                         vlan_tci =
5320                             ((const struct rte_flow_action_of_set_vlan_pcp *)
5321                                                        actions->conf)->vlan_pcp;
5322                         vlan_tci = vlan_tci << MLX5DV_FLOW_VLAN_PCP_SHIFT;
5323                         vlan.vlan_tci &= ~MLX5DV_FLOW_VLAN_PCP_MASK;
5324                         vlan.vlan_tci |= vlan_tci;
5325                         /* Push VLAN command will use this value */
5326                         break;
5327                 case RTE_FLOW_ACTION_TYPE_OF_SET_VLAN_VID:
5328                         if (!vlan_inherited) {
5329                                 flow_dev_get_vlan_info_from_items(items, &vlan);
5330                                 vlan_inherited = true;
5331                         }
5332                         vlan.vlan_tci &= ~MLX5DV_FLOW_VLAN_VID_MASK;
5333                         vlan.vlan_tci |= rte_be_to_cpu_16
5334                             (((const struct rte_flow_action_of_set_vlan_vid *)
5335                                                      actions->conf)->vlan_vid);
5336                         /* Push VLAN command will use this value */
5337                         if (mlx5_flow_find_action
5338                                 (actions,
5339                                  RTE_FLOW_ACTION_TYPE_OF_PUSH_VLAN))
5340                                 break;
5341                         /* If no VLAN push - this is a modify header action */
5342                         if (flow_dv_convert_action_modify_vlan_vid
5343                                                         (&res, actions, error))
5344                                 return -rte_errno;
5345                         action_flags |= MLX5_FLOW_ACTION_OF_SET_VLAN_VID;
5346                         break;
5347                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
5348                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
5349                         if (flow_dv_create_action_l2_encap(dev, actions,
5350                                                            dev_flow,
5351                                                            attr->transfer,
5352                                                            error))
5353                                 return -rte_errno;
5354                         dev_flow->dv.actions[actions_n++] =
5355                                 dev_flow->dv.encap_decap->verbs_action;
5356                         action_flags |= actions->type ==
5357                                         RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
5358                                         MLX5_FLOW_ACTION_VXLAN_ENCAP :
5359                                         MLX5_FLOW_ACTION_NVGRE_ENCAP;
5360                         break;
5361                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
5362                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
5363                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
5364                                                            attr->transfer,
5365                                                            error))
5366                                 return -rte_errno;
5367                         dev_flow->dv.actions[actions_n++] =
5368                                 dev_flow->dv.encap_decap->verbs_action;
5369                         action_flags |= actions->type ==
5370                                         RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
5371                                         MLX5_FLOW_ACTION_VXLAN_DECAP :
5372                                         MLX5_FLOW_ACTION_NVGRE_DECAP;
5373                         break;
5374                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
5375                         /* Handle encap with preceding decap. */
5376                         if (action_flags & MLX5_FLOW_ACTION_RAW_DECAP) {
5377                                 if (flow_dv_create_action_raw_encap
5378                                         (dev, actions, dev_flow, attr, error))
5379                                         return -rte_errno;
5380                                 dev_flow->dv.actions[actions_n++] =
5381                                         dev_flow->dv.encap_decap->verbs_action;
5382                         } else {
5383                                 /* Handle encap without preceding decap. */
5384                                 if (flow_dv_create_action_l2_encap
5385                                     (dev, actions, dev_flow, attr->transfer,
5386                                      error))
5387                                         return -rte_errno;
5388                                 dev_flow->dv.actions[actions_n++] =
5389                                         dev_flow->dv.encap_decap->verbs_action;
5390                         }
5391                         action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
5392                         break;
5393                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
5394                         /* Check if this decap is followed by encap. */
5395                         for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
5396                                action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
5397                                action++) {
5398                         }
5399                         /* Handle decap only if it isn't followed by encap. */
5400                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
5401                                 if (flow_dv_create_action_l2_decap
5402                                     (dev, dev_flow, attr->transfer, error))
5403                                         return -rte_errno;
5404                                 dev_flow->dv.actions[actions_n++] =
5405                                         dev_flow->dv.encap_decap->verbs_action;
5406                         }
5407                         /* If decap is followed by encap, handle it at encap. */
5408                         action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
5409                         break;
5410                 case RTE_FLOW_ACTION_TYPE_JUMP:
5411                         jump_data = action->conf;
5412                         ret = mlx5_flow_group_to_table(attr, dev_flow->external,
5413                                                        jump_data->group, &table,
5414                                                        error);
5415                         if (ret)
5416                                 return ret;
5417                         tbl = flow_dv_tbl_resource_get(dev, table,
5418                                                        attr->egress,
5419                                                        attr->transfer, error);
5420                         if (!tbl)
5421                                 return rte_flow_error_set
5422                                                 (error, errno,
5423                                                  RTE_FLOW_ERROR_TYPE_ACTION,
5424                                                  NULL,
5425                                                  "cannot create jump action.");
5426                         jump_tbl_resource.tbl = tbl;
5427                         if (flow_dv_jump_tbl_resource_register
5428                             (dev, &jump_tbl_resource, dev_flow, error)) {
5429                                 flow_dv_tbl_resource_release(tbl);
5430                                 return rte_flow_error_set
5431                                                 (error, errno,
5432                                                  RTE_FLOW_ERROR_TYPE_ACTION,
5433                                                  NULL,
5434                                                  "cannot create jump action.");
5435                         }
5436                         dev_flow->dv.actions[actions_n++] =
5437                                 dev_flow->dv.jump->action;
5438                         action_flags |= MLX5_FLOW_ACTION_JUMP;
5439                         break;
5440                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
5441                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
5442                         if (flow_dv_convert_action_modify_mac(&res, actions,
5443                                                               error))
5444                                 return -rte_errno;
5445                         action_flags |= actions->type ==
5446                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
5447                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
5448                                         MLX5_FLOW_ACTION_SET_MAC_DST;
5449                         break;
5450                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
5451                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
5452                         if (flow_dv_convert_action_modify_ipv4(&res, actions,
5453                                                                error))
5454                                 return -rte_errno;
5455                         action_flags |= actions->type ==
5456                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
5457                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
5458                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
5459                         break;
5460                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
5461                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
5462                         if (flow_dv_convert_action_modify_ipv6(&res, actions,
5463                                                                error))
5464                                 return -rte_errno;
5465                         action_flags |= actions->type ==
5466                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
5467                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
5468                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
5469                         break;
5470                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
5471                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
5472                         if (flow_dv_convert_action_modify_tp(&res, actions,
5473                                                              items, &flow_attr,
5474                                                              error))
5475                                 return -rte_errno;
5476                         action_flags |= actions->type ==
5477                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
5478                                         MLX5_FLOW_ACTION_SET_TP_SRC :
5479                                         MLX5_FLOW_ACTION_SET_TP_DST;
5480                         break;
5481                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
5482                         if (flow_dv_convert_action_modify_dec_ttl(&res, items,
5483                                                                   &flow_attr,
5484                                                                   error))
5485                                 return -rte_errno;
5486                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
5487                         break;
5488                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
5489                         if (flow_dv_convert_action_modify_ttl(&res, actions,
5490                                                              items, &flow_attr,
5491                                                              error))
5492                                 return -rte_errno;
5493                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
5494                         break;
5495                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
5496                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
5497                         if (flow_dv_convert_action_modify_tcp_seq(&res, actions,
5498                                                                   error))
5499                                 return -rte_errno;
5500                         action_flags |= actions->type ==
5501                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
5502                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
5503                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
5504                         break;
5505
5506                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
5507                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
5508                         if (flow_dv_convert_action_modify_tcp_ack(&res, actions,
5509                                                                   error))
5510                                 return -rte_errno;
5511                         action_flags |= actions->type ==
5512                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
5513                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
5514                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
5515                         break;
5516                 case RTE_FLOW_ACTION_TYPE_END:
5517                         actions_end = true;
5518                         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) {
5519                                 /* create modify action if needed. */
5520                                 if (flow_dv_modify_hdr_resource_register
5521                                                                 (dev, &res,
5522                                                                  dev_flow,
5523                                                                  error))
5524                                         return -rte_errno;
5525                                 dev_flow->dv.actions[modify_action_position] =
5526                                         dev_flow->dv.modify_hdr->verbs_action;
5527                         }
5528                         break;
5529                 default:
5530                         break;
5531                 }
5532                 if ((action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) &&
5533                     modify_action_position == UINT32_MAX)
5534                         modify_action_position = actions_n++;
5535         }
5536         dev_flow->dv.actions_n = actions_n;
5537         flow->actions = action_flags;
5538         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
5539                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
5540
5541                 switch (items->type) {
5542                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
5543                         flow_dv_translate_item_port_id(dev, match_mask,
5544                                                        match_value, items);
5545                         last_item = MLX5_FLOW_ITEM_PORT_ID;
5546                         break;
5547                 case RTE_FLOW_ITEM_TYPE_ETH:
5548                         flow_dv_translate_item_eth(match_mask, match_value,
5549                                                    items, tunnel);
5550                         matcher.priority = MLX5_PRIORITY_MAP_L2;
5551                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
5552                                              MLX5_FLOW_LAYER_OUTER_L2;
5553                         break;
5554                 case RTE_FLOW_ITEM_TYPE_VLAN:
5555                         flow_dv_translate_item_vlan(dev_flow,
5556                                                     match_mask, match_value,
5557                                                     items, tunnel);
5558                         matcher.priority = MLX5_PRIORITY_MAP_L2;
5559                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
5560                                               MLX5_FLOW_LAYER_INNER_VLAN) :
5561                                              (MLX5_FLOW_LAYER_OUTER_L2 |
5562                                               MLX5_FLOW_LAYER_OUTER_VLAN);
5563                         break;
5564                 case RTE_FLOW_ITEM_TYPE_IPV4:
5565                         mlx5_flow_tunnel_ip_check(items, next_protocol,
5566                                                   &item_flags, &tunnel);
5567                         flow_dv_translate_item_ipv4(match_mask, match_value,
5568                                                     items, tunnel, flow->group);
5569                         matcher.priority = MLX5_PRIORITY_MAP_L3;
5570                         dev_flow->dv.hash_fields |=
5571                                 mlx5_flow_hashfields_adjust
5572                                         (dev_flow, tunnel,
5573                                          MLX5_IPV4_LAYER_TYPES,
5574                                          MLX5_IPV4_IBV_RX_HASH);
5575                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
5576                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
5577                         if (items->mask != NULL &&
5578                             ((const struct rte_flow_item_ipv4 *)
5579                              items->mask)->hdr.next_proto_id) {
5580                                 next_protocol =
5581                                         ((const struct rte_flow_item_ipv4 *)
5582                                          (items->spec))->hdr.next_proto_id;
5583                                 next_protocol &=
5584                                         ((const struct rte_flow_item_ipv4 *)
5585                                          (items->mask))->hdr.next_proto_id;
5586                         } else {
5587                                 /* Reset for inner layer. */
5588                                 next_protocol = 0xff;
5589                         }
5590                         break;
5591                 case RTE_FLOW_ITEM_TYPE_IPV6:
5592                         mlx5_flow_tunnel_ip_check(items, next_protocol,
5593                                                   &item_flags, &tunnel);
5594                         flow_dv_translate_item_ipv6(match_mask, match_value,
5595                                                     items, tunnel, flow->group);
5596                         matcher.priority = MLX5_PRIORITY_MAP_L3;
5597                         dev_flow->dv.hash_fields |=
5598                                 mlx5_flow_hashfields_adjust
5599                                         (dev_flow, tunnel,
5600                                          MLX5_IPV6_LAYER_TYPES,
5601                                          MLX5_IPV6_IBV_RX_HASH);
5602                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
5603                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
5604                         if (items->mask != NULL &&
5605                             ((const struct rte_flow_item_ipv6 *)
5606                              items->mask)->hdr.proto) {
5607                                 next_protocol =
5608                                         ((const struct rte_flow_item_ipv6 *)
5609                                          items->spec)->hdr.proto;
5610                                 next_protocol &=
5611                                         ((const struct rte_flow_item_ipv6 *)
5612                                          items->mask)->hdr.proto;
5613                         } else {
5614                                 /* Reset for inner layer. */
5615                                 next_protocol = 0xff;
5616                         }
5617                         break;
5618                 case RTE_FLOW_ITEM_TYPE_TCP:
5619                         flow_dv_translate_item_tcp(match_mask, match_value,
5620                                                    items, tunnel);
5621                         matcher.priority = MLX5_PRIORITY_MAP_L4;
5622                         dev_flow->dv.hash_fields |=
5623                                 mlx5_flow_hashfields_adjust
5624                                         (dev_flow, tunnel, ETH_RSS_TCP,
5625                                          IBV_RX_HASH_SRC_PORT_TCP |
5626                                          IBV_RX_HASH_DST_PORT_TCP);
5627                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
5628                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
5629                         break;
5630                 case RTE_FLOW_ITEM_TYPE_UDP:
5631                         flow_dv_translate_item_udp(match_mask, match_value,
5632                                                    items, tunnel);
5633                         matcher.priority = MLX5_PRIORITY_MAP_L4;
5634                         dev_flow->dv.hash_fields |=
5635                                 mlx5_flow_hashfields_adjust
5636                                         (dev_flow, tunnel, ETH_RSS_UDP,
5637                                          IBV_RX_HASH_SRC_PORT_UDP |
5638                                          IBV_RX_HASH_DST_PORT_UDP);
5639                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
5640                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
5641                         break;
5642                 case RTE_FLOW_ITEM_TYPE_GRE:
5643                         flow_dv_translate_item_gre(match_mask, match_value,
5644                                                    items, tunnel);
5645                         last_item = MLX5_FLOW_LAYER_GRE;
5646                         break;
5647                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
5648                         flow_dv_translate_item_gre_key(match_mask,
5649                                                        match_value, items);
5650                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
5651                         break;
5652                 case RTE_FLOW_ITEM_TYPE_NVGRE:
5653                         flow_dv_translate_item_nvgre(match_mask, match_value,
5654                                                      items, tunnel);
5655                         last_item = MLX5_FLOW_LAYER_GRE;
5656                         break;
5657                 case RTE_FLOW_ITEM_TYPE_VXLAN:
5658                         flow_dv_translate_item_vxlan(match_mask, match_value,
5659                                                      items, tunnel);
5660                         last_item = MLX5_FLOW_LAYER_VXLAN;
5661                         break;
5662                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
5663                         flow_dv_translate_item_vxlan(match_mask, match_value,
5664                                                      items, tunnel);
5665                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
5666                         break;
5667                 case RTE_FLOW_ITEM_TYPE_MPLS:
5668                         flow_dv_translate_item_mpls(match_mask, match_value,
5669                                                     items, last_item, tunnel);
5670                         last_item = MLX5_FLOW_LAYER_MPLS;
5671                         break;
5672                 case RTE_FLOW_ITEM_TYPE_META:
5673                         flow_dv_translate_item_meta(match_mask, match_value,
5674                                                     items);
5675                         last_item = MLX5_FLOW_ITEM_METADATA;
5676                         break;
5677                 case RTE_FLOW_ITEM_TYPE_ICMP:
5678                         flow_dv_translate_item_icmp(match_mask, match_value,
5679                                                     items, tunnel);
5680                         last_item = MLX5_FLOW_LAYER_ICMP;
5681                         break;
5682                 case RTE_FLOW_ITEM_TYPE_ICMP6:
5683                         flow_dv_translate_item_icmp6(match_mask, match_value,
5684                                                       items, tunnel);
5685                         last_item = MLX5_FLOW_LAYER_ICMP6;
5686                         break;
5687                 default:
5688                         break;
5689                 }
5690                 item_flags |= last_item;
5691         }
5692         /*
5693          * In case of ingress traffic when E-Switch mode is enabled,
5694          * we have two cases where we need to set the source port manually.
5695          * The first one, is in case of Nic steering rule, and the second is
5696          * E-Switch rule where no port_id item was found. In both cases
5697          * the source port is set according the current port in use.
5698          */
5699         if ((attr->ingress && !(item_flags & MLX5_FLOW_ITEM_PORT_ID)) &&
5700             (priv->representor || priv->master)) {
5701                 if (flow_dv_translate_item_port_id(dev, match_mask,
5702                                                    match_value, NULL))
5703                         return -rte_errno;
5704         }
5705         assert(!flow_dv_check_valid_spec(matcher.mask.buf,
5706                                          dev_flow->dv.value.buf));
5707         dev_flow->layers = item_flags;
5708         /* Register matcher. */
5709         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
5710                                     matcher.mask.size);
5711         matcher.priority = mlx5_flow_adjust_priority(dev, priority,
5712                                                      matcher.priority);
5713         matcher.egress = attr->egress;
5714         matcher.group = flow->group;
5715         matcher.transfer = attr->transfer;
5716         if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
5717                 return -rte_errno;
5718         return 0;
5719 }
5720
5721 /**
5722  * Apply the flow to the NIC.
5723  *
5724  * @param[in] dev
5725  *   Pointer to the Ethernet device structure.
5726  * @param[in, out] flow
5727  *   Pointer to flow structure.
5728  * @param[out] error
5729  *   Pointer to error structure.
5730  *
5731  * @return
5732  *   0 on success, a negative errno value otherwise and rte_errno is set.
5733  */
5734 static int
5735 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
5736               struct rte_flow_error *error)
5737 {
5738         struct mlx5_flow_dv *dv;
5739         struct mlx5_flow *dev_flow;
5740         struct mlx5_priv *priv = dev->data->dev_private;
5741         int n;
5742         int err;
5743
5744         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
5745                 dv = &dev_flow->dv;
5746                 n = dv->actions_n;
5747                 if (flow->actions & MLX5_FLOW_ACTION_DROP) {
5748                         if (flow->transfer) {
5749                                 dv->actions[n++] = priv->sh->esw_drop_action;
5750                         } else {
5751                                 dv->hrxq = mlx5_hrxq_drop_new(dev);
5752                                 if (!dv->hrxq) {
5753                                         rte_flow_error_set
5754                                                 (error, errno,
5755                                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5756                                                  NULL,
5757                                                  "cannot get drop hash queue");
5758                                         goto error;
5759                                 }
5760                                 dv->actions[n++] = dv->hrxq->action;
5761                         }
5762                 } else if (flow->actions &
5763                            (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
5764                         struct mlx5_hrxq *hrxq;
5765
5766                         hrxq = mlx5_hrxq_get(dev, flow->key,
5767                                              MLX5_RSS_HASH_KEY_LEN,
5768                                              dv->hash_fields,
5769                                              (*flow->queue),
5770                                              flow->rss.queue_num);
5771                         if (!hrxq) {
5772                                 hrxq = mlx5_hrxq_new
5773                                         (dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
5774                                          dv->hash_fields, (*flow->queue),
5775                                          flow->rss.queue_num,
5776                                          !!(dev_flow->layers &
5777                                             MLX5_FLOW_LAYER_TUNNEL));
5778                         }
5779                         if (!hrxq) {
5780                                 rte_flow_error_set
5781                                         (error, rte_errno,
5782                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5783                                          "cannot get hash queue");
5784                                 goto error;
5785                         }
5786                         dv->hrxq = hrxq;
5787                         dv->actions[n++] = dv->hrxq->action;
5788                 }
5789                 dv->flow =
5790                         mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
5791                                                   (void *)&dv->value, n,
5792                                                   dv->actions);
5793                 if (!dv->flow) {
5794                         rte_flow_error_set(error, errno,
5795                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5796                                            NULL,
5797                                            "hardware refuses to create flow");
5798                         goto error;
5799                 }
5800                 if (priv->vmwa_context &&
5801                     dev_flow->dv.vf_vlan.tag &&
5802                     !dev_flow->dv.vf_vlan.created) {
5803                         /*
5804                          * The rule contains the VLAN pattern.
5805                          * For VF we are going to create VLAN
5806                          * interface to make hypervisor set correct
5807                          * e-Switch vport context.
5808                          */
5809                         mlx5_vlan_vmwa_acquire(dev, &dev_flow->dv.vf_vlan);
5810                 }
5811         }
5812         return 0;
5813 error:
5814         err = rte_errno; /* Save rte_errno before cleanup. */
5815         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
5816                 struct mlx5_flow_dv *dv = &dev_flow->dv;
5817                 if (dv->hrxq) {
5818                         if (flow->actions & MLX5_FLOW_ACTION_DROP)
5819                                 mlx5_hrxq_drop_release(dev);
5820                         else
5821                                 mlx5_hrxq_release(dev, dv->hrxq);
5822                         dv->hrxq = NULL;
5823                 }
5824                 if (dev_flow->dv.vf_vlan.tag &&
5825                     dev_flow->dv.vf_vlan.created)
5826                         mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan);
5827         }
5828         rte_errno = err; /* Restore rte_errno. */
5829         return -rte_errno;
5830 }
5831
5832 /**
5833  * Release the flow matcher.
5834  *
5835  * @param dev
5836  *   Pointer to Ethernet device.
5837  * @param flow
5838  *   Pointer to mlx5_flow.
5839  *
5840  * @return
5841  *   1 while a reference on it exists, 0 when freed.
5842  */
5843 static int
5844 flow_dv_matcher_release(struct rte_eth_dev *dev,
5845                         struct mlx5_flow *flow)
5846 {
5847         struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
5848         struct mlx5_priv *priv = dev->data->dev_private;
5849         struct mlx5_ibv_shared *sh = priv->sh;
5850         struct mlx5_flow_tbl_resource *tbl;
5851
5852         assert(matcher->matcher_object);
5853         DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
5854                 dev->data->port_id, (void *)matcher,
5855                 rte_atomic32_read(&matcher->refcnt));
5856         if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
5857                 claim_zero(mlx5_glue->dv_destroy_flow_matcher
5858                            (matcher->matcher_object));
5859                 LIST_REMOVE(matcher, next);
5860                 if (matcher->egress)
5861                         tbl = &sh->tx_tbl[matcher->group];
5862                 else
5863                         tbl = &sh->rx_tbl[matcher->group];
5864                 flow_dv_tbl_resource_release(tbl);
5865                 rte_free(matcher);
5866                 DRV_LOG(DEBUG, "port %u matcher %p: removed",
5867                         dev->data->port_id, (void *)matcher);
5868                 return 0;
5869         }
5870         return 1;
5871 }
5872
5873 /**
5874  * Release an encap/decap resource.
5875  *
5876  * @param flow
5877  *   Pointer to mlx5_flow.
5878  *
5879  * @return
5880  *   1 while a reference on it exists, 0 when freed.
5881  */
5882 static int
5883 flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
5884 {
5885         struct mlx5_flow_dv_encap_decap_resource *cache_resource =
5886                                                 flow->dv.encap_decap;
5887
5888         assert(cache_resource->verbs_action);
5889         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
5890                 (void *)cache_resource,
5891                 rte_atomic32_read(&cache_resource->refcnt));
5892         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
5893                 claim_zero(mlx5_glue->destroy_flow_action
5894                                 (cache_resource->verbs_action));
5895                 LIST_REMOVE(cache_resource, next);
5896                 rte_free(cache_resource);
5897                 DRV_LOG(DEBUG, "encap/decap resource %p: removed",
5898                         (void *)cache_resource);
5899                 return 0;
5900         }
5901         return 1;
5902 }
5903
5904 /**
5905  * Release an jump to table action resource.
5906  *
5907  * @param flow
5908  *   Pointer to mlx5_flow.
5909  *
5910  * @return
5911  *   1 while a reference on it exists, 0 when freed.
5912  */
5913 static int
5914 flow_dv_jump_tbl_resource_release(struct mlx5_flow *flow)
5915 {
5916         struct mlx5_flow_dv_jump_tbl_resource *cache_resource =
5917                                                 flow->dv.jump;
5918
5919         assert(cache_resource->action);
5920         DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--",
5921                 (void *)cache_resource,
5922                 rte_atomic32_read(&cache_resource->refcnt));
5923         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
5924                 claim_zero(mlx5_glue->destroy_flow_action
5925                                 (cache_resource->action));
5926                 LIST_REMOVE(cache_resource, next);
5927                 flow_dv_tbl_resource_release(cache_resource->tbl);
5928                 rte_free(cache_resource);
5929                 DRV_LOG(DEBUG, "jump table resource %p: removed",
5930                         (void *)cache_resource);
5931                 return 0;
5932         }
5933         return 1;
5934 }
5935
5936 /**
5937  * Release a modify-header resource.
5938  *
5939  * @param flow
5940  *   Pointer to mlx5_flow.
5941  *
5942  * @return
5943  *   1 while a reference on it exists, 0 when freed.
5944  */
5945 static int
5946 flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
5947 {
5948         struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
5949                                                 flow->dv.modify_hdr;
5950
5951         assert(cache_resource->verbs_action);
5952         DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
5953                 (void *)cache_resource,
5954                 rte_atomic32_read(&cache_resource->refcnt));
5955         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
5956                 claim_zero(mlx5_glue->destroy_flow_action
5957                                 (cache_resource->verbs_action));
5958                 LIST_REMOVE(cache_resource, next);
5959                 rte_free(cache_resource);
5960                 DRV_LOG(DEBUG, "modify-header resource %p: removed",
5961                         (void *)cache_resource);
5962                 return 0;
5963         }
5964         return 1;
5965 }
5966
5967 /**
5968  * Release port ID action resource.
5969  *
5970  * @param flow
5971  *   Pointer to mlx5_flow.
5972  *
5973  * @return
5974  *   1 while a reference on it exists, 0 when freed.
5975  */
5976 static int
5977 flow_dv_port_id_action_resource_release(struct mlx5_flow *flow)
5978 {
5979         struct mlx5_flow_dv_port_id_action_resource *cache_resource =
5980                 flow->dv.port_id_action;
5981
5982         assert(cache_resource->action);
5983         DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--",
5984                 (void *)cache_resource,
5985                 rte_atomic32_read(&cache_resource->refcnt));
5986         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
5987                 claim_zero(mlx5_glue->destroy_flow_action
5988                                 (cache_resource->action));
5989                 LIST_REMOVE(cache_resource, next);
5990                 rte_free(cache_resource);
5991                 DRV_LOG(DEBUG, "port id action resource %p: removed",
5992                         (void *)cache_resource);
5993                 return 0;
5994         }
5995         return 1;
5996 }
5997
5998 /**
5999  * Release push vlan action resource.
6000  *
6001  * @param flow
6002  *   Pointer to mlx5_flow.
6003  *
6004  * @return
6005  *   1 while a reference on it exists, 0 when freed.
6006  */
6007 static int
6008 flow_dv_push_vlan_action_resource_release(struct mlx5_flow *flow)
6009 {
6010         struct mlx5_flow_dv_push_vlan_action_resource *cache_resource =
6011                 flow->dv.push_vlan_res;
6012
6013         assert(cache_resource->action);
6014         DRV_LOG(DEBUG, "push VLAN action resource %p: refcnt %d--",
6015                 (void *)cache_resource,
6016                 rte_atomic32_read(&cache_resource->refcnt));
6017         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
6018                 claim_zero(mlx5_glue->destroy_flow_action
6019                                 (cache_resource->action));
6020                 LIST_REMOVE(cache_resource, next);
6021                 rte_free(cache_resource);
6022                 DRV_LOG(DEBUG, "push vlan action resource %p: removed",
6023                         (void *)cache_resource);
6024                 return 0;
6025         }
6026         return 1;
6027 }
6028
6029 /**
6030  * Remove the flow from the NIC but keeps it in memory.
6031  *
6032  * @param[in] dev
6033  *   Pointer to Ethernet device.
6034  * @param[in, out] flow
6035  *   Pointer to flow structure.
6036  */
6037 static void
6038 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
6039 {
6040         struct mlx5_flow_dv *dv;
6041         struct mlx5_flow *dev_flow;
6042
6043         if (!flow)
6044                 return;
6045         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
6046                 dv = &dev_flow->dv;
6047                 if (dv->flow) {
6048                         claim_zero(mlx5_glue->dv_destroy_flow(dv->flow));
6049                         dv->flow = NULL;
6050                 }
6051                 if (dv->hrxq) {
6052                         if (flow->actions & MLX5_FLOW_ACTION_DROP)
6053                                 mlx5_hrxq_drop_release(dev);
6054                         else
6055                                 mlx5_hrxq_release(dev, dv->hrxq);
6056                         dv->hrxq = NULL;
6057                 }
6058                 if (dev_flow->dv.vf_vlan.tag &&
6059                     dev_flow->dv.vf_vlan.created)
6060                         mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan);
6061         }
6062 }
6063
6064 /**
6065  * Remove the flow from the NIC and the memory.
6066  *
6067  * @param[in] dev
6068  *   Pointer to the Ethernet device structure.
6069  * @param[in, out] flow
6070  *   Pointer to flow structure.
6071  */
6072 static void
6073 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
6074 {
6075         struct mlx5_flow *dev_flow;
6076
6077         if (!flow)
6078                 return;
6079         flow_dv_remove(dev, flow);
6080         if (flow->counter) {
6081                 flow_dv_counter_release(dev, flow->counter);
6082                 flow->counter = NULL;
6083         }
6084         if (flow->tag_resource) {
6085                 flow_dv_tag_release(dev, flow->tag_resource);
6086                 flow->tag_resource = NULL;
6087         }
6088         while (!LIST_EMPTY(&flow->dev_flows)) {
6089                 dev_flow = LIST_FIRST(&flow->dev_flows);
6090                 LIST_REMOVE(dev_flow, next);
6091                 if (dev_flow->dv.matcher)
6092                         flow_dv_matcher_release(dev, dev_flow);
6093                 if (dev_flow->dv.encap_decap)
6094                         flow_dv_encap_decap_resource_release(dev_flow);
6095                 if (dev_flow->dv.modify_hdr)
6096                         flow_dv_modify_hdr_resource_release(dev_flow);
6097                 if (dev_flow->dv.jump)
6098                         flow_dv_jump_tbl_resource_release(dev_flow);
6099                 if (dev_flow->dv.port_id_action)
6100                         flow_dv_port_id_action_resource_release(dev_flow);
6101                 if (dev_flow->dv.push_vlan_res)
6102                         flow_dv_push_vlan_action_resource_release(dev_flow);
6103                 rte_free(dev_flow);
6104         }
6105 }
6106
6107 /**
6108  * Query a dv flow  rule for its statistics via devx.
6109  *
6110  * @param[in] dev
6111  *   Pointer to Ethernet device.
6112  * @param[in] flow
6113  *   Pointer to the sub flow.
6114  * @param[out] data
6115  *   data retrieved by the query.
6116  * @param[out] error
6117  *   Perform verbose error reporting if not NULL.
6118  *
6119  * @return
6120  *   0 on success, a negative errno value otherwise and rte_errno is set.
6121  */
6122 static int
6123 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
6124                     void *data, struct rte_flow_error *error)
6125 {
6126         struct mlx5_priv *priv = dev->data->dev_private;
6127         struct rte_flow_query_count *qc = data;
6128
6129         if (!priv->config.devx)
6130                 return rte_flow_error_set(error, ENOTSUP,
6131                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6132                                           NULL,
6133                                           "counters are not supported");
6134         if (flow->counter) {
6135                 uint64_t pkts, bytes;
6136                 int err = _flow_dv_query_count(dev, flow->counter, &pkts,
6137                                                &bytes);
6138
6139                 if (err)
6140                         return rte_flow_error_set(error, -err,
6141                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6142                                         NULL, "cannot read counters");
6143                 qc->hits_set = 1;
6144                 qc->bytes_set = 1;
6145                 qc->hits = pkts - flow->counter->hits;
6146                 qc->bytes = bytes - flow->counter->bytes;
6147                 if (qc->reset) {
6148                         flow->counter->hits = pkts;
6149                         flow->counter->bytes = bytes;
6150                 }
6151                 return 0;
6152         }
6153         return rte_flow_error_set(error, EINVAL,
6154                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
6155                                   NULL,
6156                                   "counters are not available");
6157 }
6158
6159 /**
6160  * Query a flow.
6161  *
6162  * @see rte_flow_query()
6163  * @see rte_flow_ops
6164  */
6165 static int
6166 flow_dv_query(struct rte_eth_dev *dev,
6167               struct rte_flow *flow __rte_unused,
6168               const struct rte_flow_action *actions __rte_unused,
6169               void *data __rte_unused,
6170               struct rte_flow_error *error __rte_unused)
6171 {
6172         int ret = -EINVAL;
6173
6174         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
6175                 switch (actions->type) {
6176                 case RTE_FLOW_ACTION_TYPE_VOID:
6177                         break;
6178                 case RTE_FLOW_ACTION_TYPE_COUNT:
6179                         ret = flow_dv_query_count(dev, flow, data, error);
6180                         break;
6181                 default:
6182                         return rte_flow_error_set(error, ENOTSUP,
6183                                                   RTE_FLOW_ERROR_TYPE_ACTION,
6184                                                   actions,
6185                                                   "action not supported");
6186                 }
6187         }
6188         return ret;
6189 }
6190
6191 /*
6192  * Mutex-protected thunk to flow_dv_translate().
6193  */
6194 static int
6195 flow_d_translate(struct rte_eth_dev *dev,
6196                  struct mlx5_flow *dev_flow,
6197                  const struct rte_flow_attr *attr,
6198                  const struct rte_flow_item items[],
6199                  const struct rte_flow_action actions[],
6200                  struct rte_flow_error *error)
6201 {
6202         int ret;
6203
6204         flow_d_shared_lock(dev);
6205         ret = flow_dv_translate(dev, dev_flow, attr, items, actions, error);
6206         flow_d_shared_unlock(dev);
6207         return ret;
6208 }
6209
6210 /*
6211  * Mutex-protected thunk to flow_dv_apply().
6212  */
6213 static int
6214 flow_d_apply(struct rte_eth_dev *dev,
6215              struct rte_flow *flow,
6216              struct rte_flow_error *error)
6217 {
6218         int ret;
6219
6220         flow_d_shared_lock(dev);
6221         ret = flow_dv_apply(dev, flow, error);
6222         flow_d_shared_unlock(dev);
6223         return ret;
6224 }
6225
6226 /*
6227  * Mutex-protected thunk to flow_dv_remove().
6228  */
6229 static void
6230 flow_d_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
6231 {
6232         flow_d_shared_lock(dev);
6233         flow_dv_remove(dev, flow);
6234         flow_d_shared_unlock(dev);
6235 }
6236
6237 /*
6238  * Mutex-protected thunk to flow_dv_destroy().
6239  */
6240 static void
6241 flow_d_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
6242 {
6243         flow_d_shared_lock(dev);
6244         flow_dv_destroy(dev, flow);
6245         flow_d_shared_unlock(dev);
6246 }
6247
6248 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
6249         .validate = flow_dv_validate,
6250         .prepare = flow_dv_prepare,
6251         .translate = flow_d_translate,
6252         .apply = flow_d_apply,
6253         .remove = flow_d_remove,
6254         .destroy = flow_d_destroy,
6255         .query = flow_dv_query,
6256 };
6257
6258 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */