net/mlx5: support IP-in-IP tunnel
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9
10 /* Verbs header. */
11 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
12 #ifdef PEDANTIC
13 #pragma GCC diagnostic ignored "-Wpedantic"
14 #endif
15 #include <infiniband/verbs.h>
16 #ifdef PEDANTIC
17 #pragma GCC diagnostic error "-Wpedantic"
18 #endif
19
20 #include <rte_common.h>
21 #include <rte_ether.h>
22 #include <rte_ethdev_driver.h>
23 #include <rte_flow.h>
24 #include <rte_flow_driver.h>
25 #include <rte_malloc.h>
26 #include <rte_ip.h>
27 #include <rte_gre.h>
28
29 #include "mlx5.h"
30 #include "mlx5_defs.h"
31 #include "mlx5_glue.h"
32 #include "mlx5_flow.h"
33 #include "mlx5_prm.h"
34 #include "mlx5_rxtx.h"
35
36 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
37
38 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
39 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
40 #endif
41
42 #ifndef HAVE_MLX5DV_DR_ESWITCH
43 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
44 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
45 #endif
46 #endif
47
48 #ifndef HAVE_MLX5DV_DR
49 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
50 #endif
51
52 union flow_dv_attr {
53         struct {
54                 uint32_t valid:1;
55                 uint32_t ipv4:1;
56                 uint32_t ipv6:1;
57                 uint32_t tcp:1;
58                 uint32_t udp:1;
59                 uint32_t reserved:27;
60         };
61         uint32_t attr;
62 };
63
64 /**
65  * Initialize flow attributes structure according to flow items' types.
66  *
67  * @param[in] item
68  *   Pointer to item specification.
69  * @param[out] attr
70  *   Pointer to flow attributes structure.
71  */
72 static void
73 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr)
74 {
75         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
76                 switch (item->type) {
77                 case RTE_FLOW_ITEM_TYPE_IPV4:
78                         attr->ipv4 = 1;
79                         break;
80                 case RTE_FLOW_ITEM_TYPE_IPV6:
81                         attr->ipv6 = 1;
82                         break;
83                 case RTE_FLOW_ITEM_TYPE_UDP:
84                         attr->udp = 1;
85                         break;
86                 case RTE_FLOW_ITEM_TYPE_TCP:
87                         attr->tcp = 1;
88                         break;
89                 default:
90                         break;
91                 }
92         }
93         attr->valid = 1;
94 }
95
96 struct field_modify_info {
97         uint32_t size; /* Size of field in protocol header, in bytes. */
98         uint32_t offset; /* Offset of field in protocol header, in bytes. */
99         enum mlx5_modification_field id;
100 };
101
102 struct field_modify_info modify_eth[] = {
103         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
104         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
105         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
106         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
107         {0, 0, 0},
108 };
109
110 struct field_modify_info modify_ipv4[] = {
111         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
112         {4, 12, MLX5_MODI_OUT_SIPV4},
113         {4, 16, MLX5_MODI_OUT_DIPV4},
114         {0, 0, 0},
115 };
116
117 struct field_modify_info modify_ipv6[] = {
118         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
119         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
120         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
121         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
122         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
123         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
124         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
125         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
126         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
127         {0, 0, 0},
128 };
129
130 struct field_modify_info modify_udp[] = {
131         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
132         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
133         {0, 0, 0},
134 };
135
136 struct field_modify_info modify_tcp[] = {
137         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
138         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
139         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
140         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
141         {0, 0, 0},
142 };
143
144 static void
145 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item, uint64_t *flags)
146 {
147         uint8_t next_protocol = 0xFF;
148
149         if (item->mask != NULL) {
150                 switch (item->type) {
151                 case RTE_FLOW_ITEM_TYPE_IPV4:
152                         next_protocol =
153                                 ((const struct rte_flow_item_ipv4 *)
154                                  (item->spec))->hdr.next_proto_id;
155                         next_protocol &=
156                                 ((const struct rte_flow_item_ipv4 *)
157                                  (item->mask))->hdr.next_proto_id;
158                         break;
159                 case RTE_FLOW_ITEM_TYPE_IPV6:
160                         next_protocol =
161                                 ((const struct rte_flow_item_ipv6 *)
162                                  (item->spec))->hdr.proto;
163                         next_protocol &=
164                                 ((const struct rte_flow_item_ipv6 *)
165                                  (item->mask))->hdr.proto;
166                         break;
167                 default:
168                         break;
169                 }
170         }
171         if (next_protocol == IPPROTO_IPIP)
172                 *flags |= MLX5_FLOW_LAYER_IPIP;
173         if (next_protocol == IPPROTO_IPV6)
174                 *flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
175 }
176
177 /**
178  * Acquire the synchronizing object to protect multithreaded access
179  * to shared dv context. Lock occurs only if context is actually
180  * shared, i.e. we have multiport IB device and representors are
181  * created.
182  *
183  * @param[in] dev
184  *   Pointer to the rte_eth_dev structure.
185  */
186 static void
187 flow_d_shared_lock(struct rte_eth_dev *dev)
188 {
189         struct mlx5_priv *priv = dev->data->dev_private;
190         struct mlx5_ibv_shared *sh = priv->sh;
191
192         if (sh->dv_refcnt > 1) {
193                 int ret;
194
195                 ret = pthread_mutex_lock(&sh->dv_mutex);
196                 assert(!ret);
197                 (void)ret;
198         }
199 }
200
201 static void
202 flow_d_shared_unlock(struct rte_eth_dev *dev)
203 {
204         struct mlx5_priv *priv = dev->data->dev_private;
205         struct mlx5_ibv_shared *sh = priv->sh;
206
207         if (sh->dv_refcnt > 1) {
208                 int ret;
209
210                 ret = pthread_mutex_unlock(&sh->dv_mutex);
211                 assert(!ret);
212                 (void)ret;
213         }
214 }
215
216 /**
217  * Convert modify-header action to DV specification.
218  *
219  * @param[in] item
220  *   Pointer to item specification.
221  * @param[in] field
222  *   Pointer to field modification information.
223  * @param[in,out] resource
224  *   Pointer to the modify-header resource.
225  * @param[in] type
226  *   Type of modification.
227  * @param[out] error
228  *   Pointer to the error structure.
229  *
230  * @return
231  *   0 on success, a negative errno value otherwise and rte_errno is set.
232  */
233 static int
234 flow_dv_convert_modify_action(struct rte_flow_item *item,
235                               struct field_modify_info *field,
236                               struct mlx5_flow_dv_modify_hdr_resource *resource,
237                               uint32_t type,
238                               struct rte_flow_error *error)
239 {
240         uint32_t i = resource->actions_num;
241         struct mlx5_modification_cmd *actions = resource->actions;
242         const uint8_t *spec = item->spec;
243         const uint8_t *mask = item->mask;
244         uint32_t set;
245
246         while (field->size) {
247                 set = 0;
248                 /* Generate modify command for each mask segment. */
249                 memcpy(&set, &mask[field->offset], field->size);
250                 if (set) {
251                         if (i >= MLX5_MODIFY_NUM)
252                                 return rte_flow_error_set(error, EINVAL,
253                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
254                                          "too many items to modify");
255                         actions[i].action_type = type;
256                         actions[i].field = field->id;
257                         actions[i].length = field->size ==
258                                         4 ? 0 : field->size * 8;
259                         rte_memcpy(&actions[i].data[4 - field->size],
260                                    &spec[field->offset], field->size);
261                         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
262                         ++i;
263                 }
264                 if (resource->actions_num != i)
265                         resource->actions_num = i;
266                 field++;
267         }
268         if (!resource->actions_num)
269                 return rte_flow_error_set(error, EINVAL,
270                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
271                                           "invalid modification flow item");
272         return 0;
273 }
274
275 /**
276  * Convert modify-header set IPv4 address action to DV specification.
277  *
278  * @param[in,out] resource
279  *   Pointer to the modify-header resource.
280  * @param[in] action
281  *   Pointer to action specification.
282  * @param[out] error
283  *   Pointer to the error structure.
284  *
285  * @return
286  *   0 on success, a negative errno value otherwise and rte_errno is set.
287  */
288 static int
289 flow_dv_convert_action_modify_ipv4
290                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
291                          const struct rte_flow_action *action,
292                          struct rte_flow_error *error)
293 {
294         const struct rte_flow_action_set_ipv4 *conf =
295                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
296         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
297         struct rte_flow_item_ipv4 ipv4;
298         struct rte_flow_item_ipv4 ipv4_mask;
299
300         memset(&ipv4, 0, sizeof(ipv4));
301         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
302         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
303                 ipv4.hdr.src_addr = conf->ipv4_addr;
304                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
305         } else {
306                 ipv4.hdr.dst_addr = conf->ipv4_addr;
307                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
308         }
309         item.spec = &ipv4;
310         item.mask = &ipv4_mask;
311         return flow_dv_convert_modify_action(&item, modify_ipv4, resource,
312                                              MLX5_MODIFICATION_TYPE_SET, error);
313 }
314
315 /**
316  * Convert modify-header set IPv6 address action to DV specification.
317  *
318  * @param[in,out] resource
319  *   Pointer to the modify-header resource.
320  * @param[in] action
321  *   Pointer to action specification.
322  * @param[out] error
323  *   Pointer to the error structure.
324  *
325  * @return
326  *   0 on success, a negative errno value otherwise and rte_errno is set.
327  */
328 static int
329 flow_dv_convert_action_modify_ipv6
330                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
331                          const struct rte_flow_action *action,
332                          struct rte_flow_error *error)
333 {
334         const struct rte_flow_action_set_ipv6 *conf =
335                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
336         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
337         struct rte_flow_item_ipv6 ipv6;
338         struct rte_flow_item_ipv6 ipv6_mask;
339
340         memset(&ipv6, 0, sizeof(ipv6));
341         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
342         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
343                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
344                        sizeof(ipv6.hdr.src_addr));
345                 memcpy(&ipv6_mask.hdr.src_addr,
346                        &rte_flow_item_ipv6_mask.hdr.src_addr,
347                        sizeof(ipv6.hdr.src_addr));
348         } else {
349                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
350                        sizeof(ipv6.hdr.dst_addr));
351                 memcpy(&ipv6_mask.hdr.dst_addr,
352                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
353                        sizeof(ipv6.hdr.dst_addr));
354         }
355         item.spec = &ipv6;
356         item.mask = &ipv6_mask;
357         return flow_dv_convert_modify_action(&item, modify_ipv6, resource,
358                                              MLX5_MODIFICATION_TYPE_SET, error);
359 }
360
361 /**
362  * Convert modify-header set MAC address action to DV specification.
363  *
364  * @param[in,out] resource
365  *   Pointer to the modify-header resource.
366  * @param[in] action
367  *   Pointer to action specification.
368  * @param[out] error
369  *   Pointer to the error structure.
370  *
371  * @return
372  *   0 on success, a negative errno value otherwise and rte_errno is set.
373  */
374 static int
375 flow_dv_convert_action_modify_mac
376                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
377                          const struct rte_flow_action *action,
378                          struct rte_flow_error *error)
379 {
380         const struct rte_flow_action_set_mac *conf =
381                 (const struct rte_flow_action_set_mac *)(action->conf);
382         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
383         struct rte_flow_item_eth eth;
384         struct rte_flow_item_eth eth_mask;
385
386         memset(&eth, 0, sizeof(eth));
387         memset(&eth_mask, 0, sizeof(eth_mask));
388         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
389                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
390                        sizeof(eth.src.addr_bytes));
391                 memcpy(&eth_mask.src.addr_bytes,
392                        &rte_flow_item_eth_mask.src.addr_bytes,
393                        sizeof(eth_mask.src.addr_bytes));
394         } else {
395                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
396                        sizeof(eth.dst.addr_bytes));
397                 memcpy(&eth_mask.dst.addr_bytes,
398                        &rte_flow_item_eth_mask.dst.addr_bytes,
399                        sizeof(eth_mask.dst.addr_bytes));
400         }
401         item.spec = &eth;
402         item.mask = &eth_mask;
403         return flow_dv_convert_modify_action(&item, modify_eth, resource,
404                                              MLX5_MODIFICATION_TYPE_SET, error);
405 }
406
407 /**
408  * Convert modify-header set TP action to DV specification.
409  *
410  * @param[in,out] resource
411  *   Pointer to the modify-header resource.
412  * @param[in] action
413  *   Pointer to action specification.
414  * @param[in] items
415  *   Pointer to rte_flow_item objects list.
416  * @param[in] attr
417  *   Pointer to flow attributes structure.
418  * @param[out] error
419  *   Pointer to the error structure.
420  *
421  * @return
422  *   0 on success, a negative errno value otherwise and rte_errno is set.
423  */
424 static int
425 flow_dv_convert_action_modify_tp
426                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
427                          const struct rte_flow_action *action,
428                          const struct rte_flow_item *items,
429                          union flow_dv_attr *attr,
430                          struct rte_flow_error *error)
431 {
432         const struct rte_flow_action_set_tp *conf =
433                 (const struct rte_flow_action_set_tp *)(action->conf);
434         struct rte_flow_item item;
435         struct rte_flow_item_udp udp;
436         struct rte_flow_item_udp udp_mask;
437         struct rte_flow_item_tcp tcp;
438         struct rte_flow_item_tcp tcp_mask;
439         struct field_modify_info *field;
440
441         if (!attr->valid)
442                 flow_dv_attr_init(items, attr);
443         if (attr->udp) {
444                 memset(&udp, 0, sizeof(udp));
445                 memset(&udp_mask, 0, sizeof(udp_mask));
446                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
447                         udp.hdr.src_port = conf->port;
448                         udp_mask.hdr.src_port =
449                                         rte_flow_item_udp_mask.hdr.src_port;
450                 } else {
451                         udp.hdr.dst_port = conf->port;
452                         udp_mask.hdr.dst_port =
453                                         rte_flow_item_udp_mask.hdr.dst_port;
454                 }
455                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
456                 item.spec = &udp;
457                 item.mask = &udp_mask;
458                 field = modify_udp;
459         }
460         if (attr->tcp) {
461                 memset(&tcp, 0, sizeof(tcp));
462                 memset(&tcp_mask, 0, sizeof(tcp_mask));
463                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
464                         tcp.hdr.src_port = conf->port;
465                         tcp_mask.hdr.src_port =
466                                         rte_flow_item_tcp_mask.hdr.src_port;
467                 } else {
468                         tcp.hdr.dst_port = conf->port;
469                         tcp_mask.hdr.dst_port =
470                                         rte_flow_item_tcp_mask.hdr.dst_port;
471                 }
472                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
473                 item.spec = &tcp;
474                 item.mask = &tcp_mask;
475                 field = modify_tcp;
476         }
477         return flow_dv_convert_modify_action(&item, field, resource,
478                                              MLX5_MODIFICATION_TYPE_SET, error);
479 }
480
481 /**
482  * Convert modify-header set TTL action to DV specification.
483  *
484  * @param[in,out] resource
485  *   Pointer to the modify-header resource.
486  * @param[in] action
487  *   Pointer to action specification.
488  * @param[in] items
489  *   Pointer to rte_flow_item objects list.
490  * @param[in] attr
491  *   Pointer to flow attributes structure.
492  * @param[out] error
493  *   Pointer to the error structure.
494  *
495  * @return
496  *   0 on success, a negative errno value otherwise and rte_errno is set.
497  */
498 static int
499 flow_dv_convert_action_modify_ttl
500                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
501                          const struct rte_flow_action *action,
502                          const struct rte_flow_item *items,
503                          union flow_dv_attr *attr,
504                          struct rte_flow_error *error)
505 {
506         const struct rte_flow_action_set_ttl *conf =
507                 (const struct rte_flow_action_set_ttl *)(action->conf);
508         struct rte_flow_item item;
509         struct rte_flow_item_ipv4 ipv4;
510         struct rte_flow_item_ipv4 ipv4_mask;
511         struct rte_flow_item_ipv6 ipv6;
512         struct rte_flow_item_ipv6 ipv6_mask;
513         struct field_modify_info *field;
514
515         if (!attr->valid)
516                 flow_dv_attr_init(items, attr);
517         if (attr->ipv4) {
518                 memset(&ipv4, 0, sizeof(ipv4));
519                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
520                 ipv4.hdr.time_to_live = conf->ttl_value;
521                 ipv4_mask.hdr.time_to_live = 0xFF;
522                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
523                 item.spec = &ipv4;
524                 item.mask = &ipv4_mask;
525                 field = modify_ipv4;
526         }
527         if (attr->ipv6) {
528                 memset(&ipv6, 0, sizeof(ipv6));
529                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
530                 ipv6.hdr.hop_limits = conf->ttl_value;
531                 ipv6_mask.hdr.hop_limits = 0xFF;
532                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
533                 item.spec = &ipv6;
534                 item.mask = &ipv6_mask;
535                 field = modify_ipv6;
536         }
537         return flow_dv_convert_modify_action(&item, field, resource,
538                                              MLX5_MODIFICATION_TYPE_SET, error);
539 }
540
541 /**
542  * Convert modify-header decrement TTL action to DV specification.
543  *
544  * @param[in,out] resource
545  *   Pointer to the modify-header resource.
546  * @param[in] action
547  *   Pointer to action specification.
548  * @param[in] items
549  *   Pointer to rte_flow_item objects list.
550  * @param[in] attr
551  *   Pointer to flow attributes structure.
552  * @param[out] error
553  *   Pointer to the error structure.
554  *
555  * @return
556  *   0 on success, a negative errno value otherwise and rte_errno is set.
557  */
558 static int
559 flow_dv_convert_action_modify_dec_ttl
560                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
561                          const struct rte_flow_item *items,
562                          union flow_dv_attr *attr,
563                          struct rte_flow_error *error)
564 {
565         struct rte_flow_item item;
566         struct rte_flow_item_ipv4 ipv4;
567         struct rte_flow_item_ipv4 ipv4_mask;
568         struct rte_flow_item_ipv6 ipv6;
569         struct rte_flow_item_ipv6 ipv6_mask;
570         struct field_modify_info *field;
571
572         if (!attr->valid)
573                 flow_dv_attr_init(items, attr);
574         if (attr->ipv4) {
575                 memset(&ipv4, 0, sizeof(ipv4));
576                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
577                 ipv4.hdr.time_to_live = 0xFF;
578                 ipv4_mask.hdr.time_to_live = 0xFF;
579                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
580                 item.spec = &ipv4;
581                 item.mask = &ipv4_mask;
582                 field = modify_ipv4;
583         }
584         if (attr->ipv6) {
585                 memset(&ipv6, 0, sizeof(ipv6));
586                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
587                 ipv6.hdr.hop_limits = 0xFF;
588                 ipv6_mask.hdr.hop_limits = 0xFF;
589                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
590                 item.spec = &ipv6;
591                 item.mask = &ipv6_mask;
592                 field = modify_ipv6;
593         }
594         return flow_dv_convert_modify_action(&item, field, resource,
595                                              MLX5_MODIFICATION_TYPE_ADD, error);
596 }
597
598 /**
599  * Convert modify-header increment/decrement TCP Sequence number
600  * to DV specification.
601  *
602  * @param[in,out] resource
603  *   Pointer to the modify-header resource.
604  * @param[in] action
605  *   Pointer to action specification.
606  * @param[out] error
607  *   Pointer to the error structure.
608  *
609  * @return
610  *   0 on success, a negative errno value otherwise and rte_errno is set.
611  */
612 static int
613 flow_dv_convert_action_modify_tcp_seq
614                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
615                          const struct rte_flow_action *action,
616                          struct rte_flow_error *error)
617 {
618         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
619         uint64_t value = rte_be_to_cpu_32(*conf);
620         struct rte_flow_item item;
621         struct rte_flow_item_tcp tcp;
622         struct rte_flow_item_tcp tcp_mask;
623
624         memset(&tcp, 0, sizeof(tcp));
625         memset(&tcp_mask, 0, sizeof(tcp_mask));
626         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
627                 /*
628                  * The HW has no decrement operation, only increment operation.
629                  * To simulate decrement X from Y using increment operation
630                  * we need to add UINT32_MAX X times to Y.
631                  * Each adding of UINT32_MAX decrements Y by 1.
632                  */
633                 value *= UINT32_MAX;
634         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
635         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
636         item.type = RTE_FLOW_ITEM_TYPE_TCP;
637         item.spec = &tcp;
638         item.mask = &tcp_mask;
639         return flow_dv_convert_modify_action(&item, modify_tcp, resource,
640                                              MLX5_MODIFICATION_TYPE_ADD, error);
641 }
642
643 /**
644  * Convert modify-header increment/decrement TCP Acknowledgment number
645  * to DV specification.
646  *
647  * @param[in,out] resource
648  *   Pointer to the modify-header resource.
649  * @param[in] action
650  *   Pointer to action specification.
651  * @param[out] error
652  *   Pointer to the error structure.
653  *
654  * @return
655  *   0 on success, a negative errno value otherwise and rte_errno is set.
656  */
657 static int
658 flow_dv_convert_action_modify_tcp_ack
659                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
660                          const struct rte_flow_action *action,
661                          struct rte_flow_error *error)
662 {
663         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
664         uint64_t value = rte_be_to_cpu_32(*conf);
665         struct rte_flow_item item;
666         struct rte_flow_item_tcp tcp;
667         struct rte_flow_item_tcp tcp_mask;
668
669         memset(&tcp, 0, sizeof(tcp));
670         memset(&tcp_mask, 0, sizeof(tcp_mask));
671         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
672                 /*
673                  * The HW has no decrement operation, only increment operation.
674                  * To simulate decrement X from Y using increment operation
675                  * we need to add UINT32_MAX X times to Y.
676                  * Each adding of UINT32_MAX decrements Y by 1.
677                  */
678                 value *= UINT32_MAX;
679         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
680         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
681         item.type = RTE_FLOW_ITEM_TYPE_TCP;
682         item.spec = &tcp;
683         item.mask = &tcp_mask;
684         return flow_dv_convert_modify_action(&item, modify_tcp, resource,
685                                              MLX5_MODIFICATION_TYPE_ADD, error);
686 }
687
688 /**
689  * Validate META item.
690  *
691  * @param[in] dev
692  *   Pointer to the rte_eth_dev structure.
693  * @param[in] item
694  *   Item specification.
695  * @param[in] attr
696  *   Attributes of flow that includes this item.
697  * @param[out] error
698  *   Pointer to error structure.
699  *
700  * @return
701  *   0 on success, a negative errno value otherwise and rte_errno is set.
702  */
703 static int
704 flow_dv_validate_item_meta(struct rte_eth_dev *dev,
705                            const struct rte_flow_item *item,
706                            const struct rte_flow_attr *attr,
707                            struct rte_flow_error *error)
708 {
709         const struct rte_flow_item_meta *spec = item->spec;
710         const struct rte_flow_item_meta *mask = item->mask;
711         const struct rte_flow_item_meta nic_mask = {
712                 .data = RTE_BE32(UINT32_MAX)
713         };
714         int ret;
715         uint64_t offloads = dev->data->dev_conf.txmode.offloads;
716
717         if (!(offloads & DEV_TX_OFFLOAD_MATCH_METADATA))
718                 return rte_flow_error_set(error, EPERM,
719                                           RTE_FLOW_ERROR_TYPE_ITEM,
720                                           NULL,
721                                           "match on metadata offload "
722                                           "configuration is off for this port");
723         if (!spec)
724                 return rte_flow_error_set(error, EINVAL,
725                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
726                                           item->spec,
727                                           "data cannot be empty");
728         if (!spec->data)
729                 return rte_flow_error_set(error, EINVAL,
730                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
731                                           NULL,
732                                           "data cannot be zero");
733         if (!mask)
734                 mask = &rte_flow_item_meta_mask;
735         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
736                                         (const uint8_t *)&nic_mask,
737                                         sizeof(struct rte_flow_item_meta),
738                                         error);
739         if (ret < 0)
740                 return ret;
741         if (attr->ingress)
742                 return rte_flow_error_set(error, ENOTSUP,
743                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
744                                           NULL,
745                                           "pattern not supported for ingress");
746         return 0;
747 }
748
749 /**
750  * Validate vport item.
751  *
752  * @param[in] dev
753  *   Pointer to the rte_eth_dev structure.
754  * @param[in] item
755  *   Item specification.
756  * @param[in] attr
757  *   Attributes of flow that includes this item.
758  * @param[in] item_flags
759  *   Bit-fields that holds the items detected until now.
760  * @param[out] error
761  *   Pointer to error structure.
762  *
763  * @return
764  *   0 on success, a negative errno value otherwise and rte_errno is set.
765  */
766 static int
767 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
768                               const struct rte_flow_item *item,
769                               const struct rte_flow_attr *attr,
770                               uint64_t item_flags,
771                               struct rte_flow_error *error)
772 {
773         const struct rte_flow_item_port_id *spec = item->spec;
774         const struct rte_flow_item_port_id *mask = item->mask;
775         const struct rte_flow_item_port_id switch_mask = {
776                         .id = 0xffffffff,
777         };
778         uint16_t esw_domain_id;
779         uint16_t item_port_esw_domain_id;
780         int ret;
781
782         if (!attr->transfer)
783                 return rte_flow_error_set(error, EINVAL,
784                                           RTE_FLOW_ERROR_TYPE_ITEM,
785                                           NULL,
786                                           "match on port id is valid only"
787                                           " when transfer flag is enabled");
788         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
789                 return rte_flow_error_set(error, ENOTSUP,
790                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
791                                           "multiple source ports are not"
792                                           " supported");
793         if (!mask)
794                 mask = &switch_mask;
795         if (mask->id != 0xffffffff)
796                 return rte_flow_error_set(error, ENOTSUP,
797                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
798                                            mask,
799                                            "no support for partial mask on"
800                                            " \"id\" field");
801         ret = mlx5_flow_item_acceptable
802                                 (item, (const uint8_t *)mask,
803                                  (const uint8_t *)&rte_flow_item_port_id_mask,
804                                  sizeof(struct rte_flow_item_port_id),
805                                  error);
806         if (ret)
807                 return ret;
808         if (!spec)
809                 return 0;
810         ret = mlx5_port_to_eswitch_info(spec->id, &item_port_esw_domain_id,
811                                         NULL);
812         if (ret)
813                 return rte_flow_error_set(error, -ret,
814                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
815                                           "failed to obtain E-Switch info for"
816                                           " port");
817         ret = mlx5_port_to_eswitch_info(dev->data->port_id,
818                                         &esw_domain_id, NULL);
819         if (ret < 0)
820                 return rte_flow_error_set(error, -ret,
821                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
822                                           NULL,
823                                           "failed to obtain E-Switch info");
824         if (item_port_esw_domain_id != esw_domain_id)
825                 return rte_flow_error_set(error, -ret,
826                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
827                                           "cannot match on a port from a"
828                                           " different E-Switch");
829         return 0;
830 }
831
832 /**
833  * Validate count action.
834  *
835  * @param[in] dev
836  *   device otr.
837  * @param[out] error
838  *   Pointer to error structure.
839  *
840  * @return
841  *   0 on success, a negative errno value otherwise and rte_errno is set.
842  */
843 static int
844 flow_dv_validate_action_count(struct rte_eth_dev *dev,
845                               struct rte_flow_error *error)
846 {
847         struct mlx5_priv *priv = dev->data->dev_private;
848
849         if (!priv->config.devx)
850                 goto notsup_err;
851 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
852         return 0;
853 #endif
854 notsup_err:
855         return rte_flow_error_set
856                       (error, ENOTSUP,
857                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
858                        NULL,
859                        "count action not supported");
860 }
861
862 /**
863  * Validate the L2 encap action.
864  *
865  * @param[in] action_flags
866  *   Holds the actions detected until now.
867  * @param[in] action
868  *   Pointer to the encap action.
869  * @param[in] attr
870  *   Pointer to flow attributes
871  * @param[out] error
872  *   Pointer to error structure.
873  *
874  * @return
875  *   0 on success, a negative errno value otherwise and rte_errno is set.
876  */
877 static int
878 flow_dv_validate_action_l2_encap(uint64_t action_flags,
879                                  const struct rte_flow_action *action,
880                                  const struct rte_flow_attr *attr,
881                                  struct rte_flow_error *error)
882 {
883         if (!(action->conf))
884                 return rte_flow_error_set(error, EINVAL,
885                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
886                                           "configuration cannot be null");
887         if (action_flags & MLX5_FLOW_ACTION_DROP)
888                 return rte_flow_error_set(error, EINVAL,
889                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
890                                           "can't drop and encap in same flow");
891         if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
892                 return rte_flow_error_set(error, EINVAL,
893                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
894                                           "can only have a single encap or"
895                                           " decap action in a flow");
896         if (!attr->transfer && attr->ingress)
897                 return rte_flow_error_set(error, ENOTSUP,
898                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
899                                           NULL,
900                                           "encap action not supported for "
901                                           "ingress");
902         return 0;
903 }
904
905 /**
906  * Validate the L2 decap action.
907  *
908  * @param[in] action_flags
909  *   Holds the actions detected until now.
910  * @param[in] attr
911  *   Pointer to flow attributes
912  * @param[out] error
913  *   Pointer to error structure.
914  *
915  * @return
916  *   0 on success, a negative errno value otherwise and rte_errno is set.
917  */
918 static int
919 flow_dv_validate_action_l2_decap(uint64_t action_flags,
920                                  const struct rte_flow_attr *attr,
921                                  struct rte_flow_error *error)
922 {
923         if (action_flags & MLX5_FLOW_ACTION_DROP)
924                 return rte_flow_error_set(error, EINVAL,
925                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
926                                           "can't drop and decap in same flow");
927         if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
928                 return rte_flow_error_set(error, EINVAL,
929                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
930                                           "can only have a single encap or"
931                                           " decap action in a flow");
932         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
933                 return rte_flow_error_set(error, EINVAL,
934                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
935                                           "can't have decap action after"
936                                           " modify action");
937         if (attr->egress)
938                 return rte_flow_error_set(error, ENOTSUP,
939                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
940                                           NULL,
941                                           "decap action not supported for "
942                                           "egress");
943         return 0;
944 }
945
946 /**
947  * Validate the raw encap action.
948  *
949  * @param[in] action_flags
950  *   Holds the actions detected until now.
951  * @param[in] action
952  *   Pointer to the encap action.
953  * @param[in] attr
954  *   Pointer to flow attributes
955  * @param[out] error
956  *   Pointer to error structure.
957  *
958  * @return
959  *   0 on success, a negative errno value otherwise and rte_errno is set.
960  */
961 static int
962 flow_dv_validate_action_raw_encap(uint64_t action_flags,
963                                   const struct rte_flow_action *action,
964                                   const struct rte_flow_attr *attr,
965                                   struct rte_flow_error *error)
966 {
967         if (!(action->conf))
968                 return rte_flow_error_set(error, EINVAL,
969                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
970                                           "configuration cannot be null");
971         if (action_flags & MLX5_FLOW_ACTION_DROP)
972                 return rte_flow_error_set(error, EINVAL,
973                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
974                                           "can't drop and encap in same flow");
975         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
976                 return rte_flow_error_set(error, EINVAL,
977                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
978                                           "can only have a single encap"
979                                           " action in a flow");
980         /* encap without preceding decap is not supported for ingress */
981         if (!attr->transfer &&  attr->ingress &&
982             !(action_flags & MLX5_FLOW_ACTION_RAW_DECAP))
983                 return rte_flow_error_set(error, ENOTSUP,
984                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
985                                           NULL,
986                                           "encap action not supported for "
987                                           "ingress");
988         return 0;
989 }
990
991 /**
992  * Validate the raw decap action.
993  *
994  * @param[in] action_flags
995  *   Holds the actions detected until now.
996  * @param[in] action
997  *   Pointer to the encap action.
998  * @param[in] attr
999  *   Pointer to flow attributes
1000  * @param[out] error
1001  *   Pointer to error structure.
1002  *
1003  * @return
1004  *   0 on success, a negative errno value otherwise and rte_errno is set.
1005  */
1006 static int
1007 flow_dv_validate_action_raw_decap(uint64_t action_flags,
1008                                   const struct rte_flow_action *action,
1009                                   const struct rte_flow_attr *attr,
1010                                   struct rte_flow_error *error)
1011 {
1012         if (action_flags & MLX5_FLOW_ACTION_DROP)
1013                 return rte_flow_error_set(error, EINVAL,
1014                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1015                                           "can't drop and decap in same flow");
1016         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
1017                 return rte_flow_error_set(error, EINVAL,
1018                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1019                                           "can't have encap action before"
1020                                           " decap action");
1021         if (action_flags & MLX5_FLOW_DECAP_ACTIONS)
1022                 return rte_flow_error_set(error, EINVAL,
1023                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1024                                           "can only have a single decap"
1025                                           " action in a flow");
1026         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
1027                 return rte_flow_error_set(error, EINVAL,
1028                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1029                                           "can't have decap action after"
1030                                           " modify action");
1031         /* decap action is valid on egress only if it is followed by encap */
1032         if (attr->egress) {
1033                 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
1034                        action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
1035                        action++) {
1036                 }
1037                 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
1038                         return rte_flow_error_set
1039                                         (error, ENOTSUP,
1040                                          RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1041                                          NULL, "decap action not supported"
1042                                          " for egress");
1043         }
1044         return 0;
1045 }
1046
1047 /**
1048  * Find existing encap/decap resource or create and register a new one.
1049  *
1050  * @param dev[in, out]
1051  *   Pointer to rte_eth_dev structure.
1052  * @param[in, out] resource
1053  *   Pointer to encap/decap resource.
1054  * @parm[in, out] dev_flow
1055  *   Pointer to the dev_flow.
1056  * @param[out] error
1057  *   pointer to error structure.
1058  *
1059  * @return
1060  *   0 on success otherwise -errno and errno is set.
1061  */
1062 static int
1063 flow_dv_encap_decap_resource_register
1064                         (struct rte_eth_dev *dev,
1065                          struct mlx5_flow_dv_encap_decap_resource *resource,
1066                          struct mlx5_flow *dev_flow,
1067                          struct rte_flow_error *error)
1068 {
1069         struct mlx5_priv *priv = dev->data->dev_private;
1070         struct mlx5_ibv_shared *sh = priv->sh;
1071         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
1072         struct rte_flow *flow = dev_flow->flow;
1073         struct mlx5dv_dr_domain *domain;
1074
1075         resource->flags = flow->group ? 0 : 1;
1076         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
1077                 domain = sh->fdb_domain;
1078         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
1079                 domain = sh->rx_domain;
1080         else
1081                 domain = sh->tx_domain;
1082
1083         /* Lookup a matching resource from cache. */
1084         LIST_FOREACH(cache_resource, &sh->encaps_decaps, next) {
1085                 if (resource->reformat_type == cache_resource->reformat_type &&
1086                     resource->ft_type == cache_resource->ft_type &&
1087                     resource->flags == cache_resource->flags &&
1088                     resource->size == cache_resource->size &&
1089                     !memcmp((const void *)resource->buf,
1090                             (const void *)cache_resource->buf,
1091                             resource->size)) {
1092                         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
1093                                 (void *)cache_resource,
1094                                 rte_atomic32_read(&cache_resource->refcnt));
1095                         rte_atomic32_inc(&cache_resource->refcnt);
1096                         dev_flow->dv.encap_decap = cache_resource;
1097                         return 0;
1098                 }
1099         }
1100         /* Register new encap/decap resource. */
1101         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1102         if (!cache_resource)
1103                 return rte_flow_error_set(error, ENOMEM,
1104                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1105                                           "cannot allocate resource memory");
1106         *cache_resource = *resource;
1107         cache_resource->verbs_action =
1108                 mlx5_glue->dv_create_flow_action_packet_reformat
1109                         (sh->ctx, cache_resource->reformat_type,
1110                          cache_resource->ft_type, domain, cache_resource->flags,
1111                          cache_resource->size,
1112                          (cache_resource->size ? cache_resource->buf : NULL));
1113         if (!cache_resource->verbs_action) {
1114                 rte_free(cache_resource);
1115                 return rte_flow_error_set(error, ENOMEM,
1116                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1117                                           NULL, "cannot create action");
1118         }
1119         rte_atomic32_init(&cache_resource->refcnt);
1120         rte_atomic32_inc(&cache_resource->refcnt);
1121         LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next);
1122         dev_flow->dv.encap_decap = cache_resource;
1123         DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
1124                 (void *)cache_resource,
1125                 rte_atomic32_read(&cache_resource->refcnt));
1126         return 0;
1127 }
1128
1129 /**
1130  * Find existing table jump resource or create and register a new one.
1131  *
1132  * @param dev[in, out]
1133  *   Pointer to rte_eth_dev structure.
1134  * @param[in, out] resource
1135  *   Pointer to jump table resource.
1136  * @parm[in, out] dev_flow
1137  *   Pointer to the dev_flow.
1138  * @param[out] error
1139  *   pointer to error structure.
1140  *
1141  * @return
1142  *   0 on success otherwise -errno and errno is set.
1143  */
1144 static int
1145 flow_dv_jump_tbl_resource_register
1146                         (struct rte_eth_dev *dev,
1147                          struct mlx5_flow_dv_jump_tbl_resource *resource,
1148                          struct mlx5_flow *dev_flow,
1149                          struct rte_flow_error *error)
1150 {
1151         struct mlx5_priv *priv = dev->data->dev_private;
1152         struct mlx5_ibv_shared *sh = priv->sh;
1153         struct mlx5_flow_dv_jump_tbl_resource *cache_resource;
1154
1155         /* Lookup a matching resource from cache. */
1156         LIST_FOREACH(cache_resource, &sh->jump_tbl, next) {
1157                 if (resource->tbl == cache_resource->tbl) {
1158                         DRV_LOG(DEBUG, "jump table resource resource %p: refcnt %d++",
1159                                 (void *)cache_resource,
1160                                 rte_atomic32_read(&cache_resource->refcnt));
1161                         rte_atomic32_inc(&cache_resource->refcnt);
1162                         dev_flow->dv.jump = cache_resource;
1163                         return 0;
1164                 }
1165         }
1166         /* Register new jump table resource. */
1167         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1168         if (!cache_resource)
1169                 return rte_flow_error_set(error, ENOMEM,
1170                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1171                                           "cannot allocate resource memory");
1172         *cache_resource = *resource;
1173         cache_resource->action =
1174                 mlx5_glue->dr_create_flow_action_dest_flow_tbl
1175                 (resource->tbl->obj);
1176         if (!cache_resource->action) {
1177                 rte_free(cache_resource);
1178                 return rte_flow_error_set(error, ENOMEM,
1179                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1180                                           NULL, "cannot create action");
1181         }
1182         rte_atomic32_init(&cache_resource->refcnt);
1183         rte_atomic32_inc(&cache_resource->refcnt);
1184         LIST_INSERT_HEAD(&sh->jump_tbl, cache_resource, next);
1185         dev_flow->dv.jump = cache_resource;
1186         DRV_LOG(DEBUG, "new jump table  resource %p: refcnt %d++",
1187                 (void *)cache_resource,
1188                 rte_atomic32_read(&cache_resource->refcnt));
1189         return 0;
1190 }
1191
1192 /**
1193  * Find existing table port ID resource or create and register a new one.
1194  *
1195  * @param dev[in, out]
1196  *   Pointer to rte_eth_dev structure.
1197  * @param[in, out] resource
1198  *   Pointer to port ID action resource.
1199  * @parm[in, out] dev_flow
1200  *   Pointer to the dev_flow.
1201  * @param[out] error
1202  *   pointer to error structure.
1203  *
1204  * @return
1205  *   0 on success otherwise -errno and errno is set.
1206  */
1207 static int
1208 flow_dv_port_id_action_resource_register
1209                         (struct rte_eth_dev *dev,
1210                          struct mlx5_flow_dv_port_id_action_resource *resource,
1211                          struct mlx5_flow *dev_flow,
1212                          struct rte_flow_error *error)
1213 {
1214         struct mlx5_priv *priv = dev->data->dev_private;
1215         struct mlx5_ibv_shared *sh = priv->sh;
1216         struct mlx5_flow_dv_port_id_action_resource *cache_resource;
1217
1218         /* Lookup a matching resource from cache. */
1219         LIST_FOREACH(cache_resource, &sh->port_id_action_list, next) {
1220                 if (resource->port_id == cache_resource->port_id) {
1221                         DRV_LOG(DEBUG, "port id action resource resource %p: "
1222                                 "refcnt %d++",
1223                                 (void *)cache_resource,
1224                                 rte_atomic32_read(&cache_resource->refcnt));
1225                         rte_atomic32_inc(&cache_resource->refcnt);
1226                         dev_flow->dv.port_id_action = cache_resource;
1227                         return 0;
1228                 }
1229         }
1230         /* Register new port id action resource. */
1231         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1232         if (!cache_resource)
1233                 return rte_flow_error_set(error, ENOMEM,
1234                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1235                                           "cannot allocate resource memory");
1236         *cache_resource = *resource;
1237         cache_resource->action =
1238                 mlx5_glue->dr_create_flow_action_dest_vport
1239                         (priv->sh->fdb_domain, resource->port_id);
1240         if (!cache_resource->action) {
1241                 rte_free(cache_resource);
1242                 return rte_flow_error_set(error, ENOMEM,
1243                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1244                                           NULL, "cannot create action");
1245         }
1246         rte_atomic32_init(&cache_resource->refcnt);
1247         rte_atomic32_inc(&cache_resource->refcnt);
1248         LIST_INSERT_HEAD(&sh->port_id_action_list, cache_resource, next);
1249         dev_flow->dv.port_id_action = cache_resource;
1250         DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
1251                 (void *)cache_resource,
1252                 rte_atomic32_read(&cache_resource->refcnt));
1253         return 0;
1254 }
1255
1256 /**
1257  * Get the size of specific rte_flow_item_type
1258  *
1259  * @param[in] item_type
1260  *   Tested rte_flow_item_type.
1261  *
1262  * @return
1263  *   sizeof struct item_type, 0 if void or irrelevant.
1264  */
1265 static size_t
1266 flow_dv_get_item_len(const enum rte_flow_item_type item_type)
1267 {
1268         size_t retval;
1269
1270         switch (item_type) {
1271         case RTE_FLOW_ITEM_TYPE_ETH:
1272                 retval = sizeof(struct rte_flow_item_eth);
1273                 break;
1274         case RTE_FLOW_ITEM_TYPE_VLAN:
1275                 retval = sizeof(struct rte_flow_item_vlan);
1276                 break;
1277         case RTE_FLOW_ITEM_TYPE_IPV4:
1278                 retval = sizeof(struct rte_flow_item_ipv4);
1279                 break;
1280         case RTE_FLOW_ITEM_TYPE_IPV6:
1281                 retval = sizeof(struct rte_flow_item_ipv6);
1282                 break;
1283         case RTE_FLOW_ITEM_TYPE_UDP:
1284                 retval = sizeof(struct rte_flow_item_udp);
1285                 break;
1286         case RTE_FLOW_ITEM_TYPE_TCP:
1287                 retval = sizeof(struct rte_flow_item_tcp);
1288                 break;
1289         case RTE_FLOW_ITEM_TYPE_VXLAN:
1290                 retval = sizeof(struct rte_flow_item_vxlan);
1291                 break;
1292         case RTE_FLOW_ITEM_TYPE_GRE:
1293                 retval = sizeof(struct rte_flow_item_gre);
1294                 break;
1295         case RTE_FLOW_ITEM_TYPE_NVGRE:
1296                 retval = sizeof(struct rte_flow_item_nvgre);
1297                 break;
1298         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1299                 retval = sizeof(struct rte_flow_item_vxlan_gpe);
1300                 break;
1301         case RTE_FLOW_ITEM_TYPE_MPLS:
1302                 retval = sizeof(struct rte_flow_item_mpls);
1303                 break;
1304         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
1305         default:
1306                 retval = 0;
1307                 break;
1308         }
1309         return retval;
1310 }
1311
1312 #define MLX5_ENCAP_IPV4_VERSION         0x40
1313 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
1314 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
1315 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
1316 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
1317 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
1318 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
1319
1320 /**
1321  * Convert the encap action data from list of rte_flow_item to raw buffer
1322  *
1323  * @param[in] items
1324  *   Pointer to rte_flow_item objects list.
1325  * @param[out] buf
1326  *   Pointer to the output buffer.
1327  * @param[out] size
1328  *   Pointer to the output buffer size.
1329  * @param[out] error
1330  *   Pointer to the error structure.
1331  *
1332  * @return
1333  *   0 on success, a negative errno value otherwise and rte_errno is set.
1334  */
1335 static int
1336 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
1337                            size_t *size, struct rte_flow_error *error)
1338 {
1339         struct rte_ether_hdr *eth = NULL;
1340         struct rte_vlan_hdr *vlan = NULL;
1341         struct rte_ipv4_hdr *ipv4 = NULL;
1342         struct rte_ipv6_hdr *ipv6 = NULL;
1343         struct rte_udp_hdr *udp = NULL;
1344         struct rte_vxlan_hdr *vxlan = NULL;
1345         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
1346         struct rte_gre_hdr *gre = NULL;
1347         size_t len;
1348         size_t temp_size = 0;
1349
1350         if (!items)
1351                 return rte_flow_error_set(error, EINVAL,
1352                                           RTE_FLOW_ERROR_TYPE_ACTION,
1353                                           NULL, "invalid empty data");
1354         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1355                 len = flow_dv_get_item_len(items->type);
1356                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
1357                         return rte_flow_error_set(error, EINVAL,
1358                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1359                                                   (void *)items->type,
1360                                                   "items total size is too big"
1361                                                   " for encap action");
1362                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
1363                 switch (items->type) {
1364                 case RTE_FLOW_ITEM_TYPE_ETH:
1365                         eth = (struct rte_ether_hdr *)&buf[temp_size];
1366                         break;
1367                 case RTE_FLOW_ITEM_TYPE_VLAN:
1368                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
1369                         if (!eth)
1370                                 return rte_flow_error_set(error, EINVAL,
1371                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1372                                                 (void *)items->type,
1373                                                 "eth header not found");
1374                         if (!eth->ether_type)
1375                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
1376                         break;
1377                 case RTE_FLOW_ITEM_TYPE_IPV4:
1378                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
1379                         if (!vlan && !eth)
1380                                 return rte_flow_error_set(error, EINVAL,
1381                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1382                                                 (void *)items->type,
1383                                                 "neither eth nor vlan"
1384                                                 " header found");
1385                         if (vlan && !vlan->eth_proto)
1386                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
1387                         else if (eth && !eth->ether_type)
1388                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
1389                         if (!ipv4->version_ihl)
1390                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
1391                                                     MLX5_ENCAP_IPV4_IHL_MIN;
1392                         if (!ipv4->time_to_live)
1393                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
1394                         break;
1395                 case RTE_FLOW_ITEM_TYPE_IPV6:
1396                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
1397                         if (!vlan && !eth)
1398                                 return rte_flow_error_set(error, EINVAL,
1399                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1400                                                 (void *)items->type,
1401                                                 "neither eth nor vlan"
1402                                                 " header found");
1403                         if (vlan && !vlan->eth_proto)
1404                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
1405                         else if (eth && !eth->ether_type)
1406                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
1407                         if (!ipv6->vtc_flow)
1408                                 ipv6->vtc_flow =
1409                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
1410                         if (!ipv6->hop_limits)
1411                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
1412                         break;
1413                 case RTE_FLOW_ITEM_TYPE_UDP:
1414                         udp = (struct rte_udp_hdr *)&buf[temp_size];
1415                         if (!ipv4 && !ipv6)
1416                                 return rte_flow_error_set(error, EINVAL,
1417                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1418                                                 (void *)items->type,
1419                                                 "ip header not found");
1420                         if (ipv4 && !ipv4->next_proto_id)
1421                                 ipv4->next_proto_id = IPPROTO_UDP;
1422                         else if (ipv6 && !ipv6->proto)
1423                                 ipv6->proto = IPPROTO_UDP;
1424                         break;
1425                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1426                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
1427                         if (!udp)
1428                                 return rte_flow_error_set(error, EINVAL,
1429                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1430                                                 (void *)items->type,
1431                                                 "udp header not found");
1432                         if (!udp->dst_port)
1433                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
1434                         if (!vxlan->vx_flags)
1435                                 vxlan->vx_flags =
1436                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
1437                         break;
1438                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1439                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
1440                         if (!udp)
1441                                 return rte_flow_error_set(error, EINVAL,
1442                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1443                                                 (void *)items->type,
1444                                                 "udp header not found");
1445                         if (!vxlan_gpe->proto)
1446                                 return rte_flow_error_set(error, EINVAL,
1447                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1448                                                 (void *)items->type,
1449                                                 "next protocol not found");
1450                         if (!udp->dst_port)
1451                                 udp->dst_port =
1452                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
1453                         if (!vxlan_gpe->vx_flags)
1454                                 vxlan_gpe->vx_flags =
1455                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
1456                         break;
1457                 case RTE_FLOW_ITEM_TYPE_GRE:
1458                 case RTE_FLOW_ITEM_TYPE_NVGRE:
1459                         gre = (struct rte_gre_hdr *)&buf[temp_size];
1460                         if (!gre->proto)
1461                                 return rte_flow_error_set(error, EINVAL,
1462                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1463                                                 (void *)items->type,
1464                                                 "next protocol not found");
1465                         if (!ipv4 && !ipv6)
1466                                 return rte_flow_error_set(error, EINVAL,
1467                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1468                                                 (void *)items->type,
1469                                                 "ip header not found");
1470                         if (ipv4 && !ipv4->next_proto_id)
1471                                 ipv4->next_proto_id = IPPROTO_GRE;
1472                         else if (ipv6 && !ipv6->proto)
1473                                 ipv6->proto = IPPROTO_GRE;
1474                         break;
1475                 case RTE_FLOW_ITEM_TYPE_VOID:
1476                         break;
1477                 default:
1478                         return rte_flow_error_set(error, EINVAL,
1479                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1480                                                   (void *)items->type,
1481                                                   "unsupported item type");
1482                         break;
1483                 }
1484                 temp_size += len;
1485         }
1486         *size = temp_size;
1487         return 0;
1488 }
1489
1490 static int
1491 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
1492 {
1493         struct rte_ether_hdr *eth = NULL;
1494         struct rte_vlan_hdr *vlan = NULL;
1495         struct rte_ipv6_hdr *ipv6 = NULL;
1496         struct rte_udp_hdr *udp = NULL;
1497         char *next_hdr;
1498         uint16_t proto;
1499
1500         eth = (struct rte_ether_hdr *)data;
1501         next_hdr = (char *)(eth + 1);
1502         proto = RTE_BE16(eth->ether_type);
1503
1504         /* VLAN skipping */
1505         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
1506                 next_hdr += sizeof(struct rte_vlan_hdr);
1507                 vlan = (struct rte_vlan_hdr *)next_hdr;
1508                 proto = RTE_BE16(vlan->eth_proto);
1509         }
1510
1511         /* HW calculates IPv4 csum. no need to proceed */
1512         if (proto == RTE_ETHER_TYPE_IPV4)
1513                 return 0;
1514
1515         /* non IPv4/IPv6 header. not supported */
1516         if (proto != RTE_ETHER_TYPE_IPV6) {
1517                 return rte_flow_error_set(error, ENOTSUP,
1518                                           RTE_FLOW_ERROR_TYPE_ACTION,
1519                                           NULL, "Cannot offload non IPv4/IPv6");
1520         }
1521
1522         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
1523
1524         /* ignore non UDP */
1525         if (ipv6->proto != IPPROTO_UDP)
1526                 return 0;
1527
1528         udp = (struct rte_udp_hdr *)(ipv6 + 1);
1529         udp->dgram_cksum = 0;
1530
1531         return 0;
1532 }
1533
1534 /**
1535  * Convert L2 encap action to DV specification.
1536  *
1537  * @param[in] dev
1538  *   Pointer to rte_eth_dev structure.
1539  * @param[in] action
1540  *   Pointer to action structure.
1541  * @param[in, out] dev_flow
1542  *   Pointer to the mlx5_flow.
1543  * @param[in] transfer
1544  *   Mark if the flow is E-Switch flow.
1545  * @param[out] error
1546  *   Pointer to the error structure.
1547  *
1548  * @return
1549  *   0 on success, a negative errno value otherwise and rte_errno is set.
1550  */
1551 static int
1552 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
1553                                const struct rte_flow_action *action,
1554                                struct mlx5_flow *dev_flow,
1555                                uint8_t transfer,
1556                                struct rte_flow_error *error)
1557 {
1558         const struct rte_flow_item *encap_data;
1559         const struct rte_flow_action_raw_encap *raw_encap_data;
1560         struct mlx5_flow_dv_encap_decap_resource res = {
1561                 .reformat_type =
1562                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
1563                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
1564                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
1565         };
1566
1567         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
1568                 raw_encap_data =
1569                         (const struct rte_flow_action_raw_encap *)action->conf;
1570                 res.size = raw_encap_data->size;
1571                 memcpy(res.buf, raw_encap_data->data, res.size);
1572                 if (flow_dv_zero_encap_udp_csum(res.buf, error))
1573                         return -rte_errno;
1574         } else {
1575                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
1576                         encap_data =
1577                                 ((const struct rte_flow_action_vxlan_encap *)
1578                                                 action->conf)->definition;
1579                 else
1580                         encap_data =
1581                                 ((const struct rte_flow_action_nvgre_encap *)
1582                                                 action->conf)->definition;
1583                 if (flow_dv_convert_encap_data(encap_data, res.buf,
1584                                                &res.size, error))
1585                         return -rte_errno;
1586         }
1587         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1588                 return rte_flow_error_set(error, EINVAL,
1589                                           RTE_FLOW_ERROR_TYPE_ACTION,
1590                                           NULL, "can't create L2 encap action");
1591         return 0;
1592 }
1593
1594 /**
1595  * Convert L2 decap action to DV specification.
1596  *
1597  * @param[in] dev
1598  *   Pointer to rte_eth_dev structure.
1599  * @param[in, out] dev_flow
1600  *   Pointer to the mlx5_flow.
1601  * @param[in] transfer
1602  *   Mark if the flow is E-Switch flow.
1603  * @param[out] error
1604  *   Pointer to the error structure.
1605  *
1606  * @return
1607  *   0 on success, a negative errno value otherwise and rte_errno is set.
1608  */
1609 static int
1610 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
1611                                struct mlx5_flow *dev_flow,
1612                                uint8_t transfer,
1613                                struct rte_flow_error *error)
1614 {
1615         struct mlx5_flow_dv_encap_decap_resource res = {
1616                 .size = 0,
1617                 .reformat_type =
1618                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
1619                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
1620                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
1621         };
1622
1623         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1624                 return rte_flow_error_set(error, EINVAL,
1625                                           RTE_FLOW_ERROR_TYPE_ACTION,
1626                                           NULL, "can't create L2 decap action");
1627         return 0;
1628 }
1629
1630 /**
1631  * Convert raw decap/encap (L3 tunnel) action to DV specification.
1632  *
1633  * @param[in] dev
1634  *   Pointer to rte_eth_dev structure.
1635  * @param[in] action
1636  *   Pointer to action structure.
1637  * @param[in, out] dev_flow
1638  *   Pointer to the mlx5_flow.
1639  * @param[in] attr
1640  *   Pointer to the flow attributes.
1641  * @param[out] error
1642  *   Pointer to the error structure.
1643  *
1644  * @return
1645  *   0 on success, a negative errno value otherwise and rte_errno is set.
1646  */
1647 static int
1648 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
1649                                 const struct rte_flow_action *action,
1650                                 struct mlx5_flow *dev_flow,
1651                                 const struct rte_flow_attr *attr,
1652                                 struct rte_flow_error *error)
1653 {
1654         const struct rte_flow_action_raw_encap *encap_data;
1655         struct mlx5_flow_dv_encap_decap_resource res;
1656
1657         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
1658         res.size = encap_data->size;
1659         memcpy(res.buf, encap_data->data, res.size);
1660         res.reformat_type = attr->egress ?
1661                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL :
1662                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
1663         if (attr->transfer)
1664                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
1665         else
1666                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
1667                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
1668         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1669                 return rte_flow_error_set(error, EINVAL,
1670                                           RTE_FLOW_ERROR_TYPE_ACTION,
1671                                           NULL, "can't create encap action");
1672         return 0;
1673 }
1674
1675 /**
1676  * Validate the modify-header actions.
1677  *
1678  * @param[in] action_flags
1679  *   Holds the actions detected until now.
1680  * @param[in] action
1681  *   Pointer to the modify action.
1682  * @param[out] error
1683  *   Pointer to error structure.
1684  *
1685  * @return
1686  *   0 on success, a negative errno value otherwise and rte_errno is set.
1687  */
1688 static int
1689 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
1690                                    const struct rte_flow_action *action,
1691                                    struct rte_flow_error *error)
1692 {
1693         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
1694                 return rte_flow_error_set(error, EINVAL,
1695                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1696                                           NULL, "action configuration not set");
1697         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
1698                 return rte_flow_error_set(error, EINVAL,
1699                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1700                                           "can't have encap action before"
1701                                           " modify action");
1702         return 0;
1703 }
1704
1705 /**
1706  * Validate the modify-header MAC address actions.
1707  *
1708  * @param[in] action_flags
1709  *   Holds the actions detected until now.
1710  * @param[in] action
1711  *   Pointer to the modify action.
1712  * @param[in] item_flags
1713  *   Holds the items detected.
1714  * @param[out] error
1715  *   Pointer to error structure.
1716  *
1717  * @return
1718  *   0 on success, a negative errno value otherwise and rte_errno is set.
1719  */
1720 static int
1721 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
1722                                    const struct rte_flow_action *action,
1723                                    const uint64_t item_flags,
1724                                    struct rte_flow_error *error)
1725 {
1726         int ret = 0;
1727
1728         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1729         if (!ret) {
1730                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
1731                         return rte_flow_error_set(error, EINVAL,
1732                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1733                                                   NULL,
1734                                                   "no L2 item in pattern");
1735         }
1736         return ret;
1737 }
1738
1739 /**
1740  * Validate the modify-header IPv4 address actions.
1741  *
1742  * @param[in] action_flags
1743  *   Holds the actions detected until now.
1744  * @param[in] action
1745  *   Pointer to the modify action.
1746  * @param[in] item_flags
1747  *   Holds the items detected.
1748  * @param[out] error
1749  *   Pointer to error structure.
1750  *
1751  * @return
1752  *   0 on success, a negative errno value otherwise and rte_errno is set.
1753  */
1754 static int
1755 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
1756                                     const struct rte_flow_action *action,
1757                                     const uint64_t item_flags,
1758                                     struct rte_flow_error *error)
1759 {
1760         int ret = 0;
1761
1762         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1763         if (!ret) {
1764                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
1765                         return rte_flow_error_set(error, EINVAL,
1766                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1767                                                   NULL,
1768                                                   "no ipv4 item in pattern");
1769         }
1770         return ret;
1771 }
1772
1773 /**
1774  * Validate the modify-header IPv6 address actions.
1775  *
1776  * @param[in] action_flags
1777  *   Holds the actions detected until now.
1778  * @param[in] action
1779  *   Pointer to the modify action.
1780  * @param[in] item_flags
1781  *   Holds the items detected.
1782  * @param[out] error
1783  *   Pointer to error structure.
1784  *
1785  * @return
1786  *   0 on success, a negative errno value otherwise and rte_errno is set.
1787  */
1788 static int
1789 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
1790                                     const struct rte_flow_action *action,
1791                                     const uint64_t item_flags,
1792                                     struct rte_flow_error *error)
1793 {
1794         int ret = 0;
1795
1796         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1797         if (!ret) {
1798                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
1799                         return rte_flow_error_set(error, EINVAL,
1800                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1801                                                   NULL,
1802                                                   "no ipv6 item in pattern");
1803         }
1804         return ret;
1805 }
1806
1807 /**
1808  * Validate the modify-header TP actions.
1809  *
1810  * @param[in] action_flags
1811  *   Holds the actions detected until now.
1812  * @param[in] action
1813  *   Pointer to the modify action.
1814  * @param[in] item_flags
1815  *   Holds the items detected.
1816  * @param[out] error
1817  *   Pointer to error structure.
1818  *
1819  * @return
1820  *   0 on success, a negative errno value otherwise and rte_errno is set.
1821  */
1822 static int
1823 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
1824                                   const struct rte_flow_action *action,
1825                                   const uint64_t item_flags,
1826                                   struct rte_flow_error *error)
1827 {
1828         int ret = 0;
1829
1830         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1831         if (!ret) {
1832                 if (!(item_flags & MLX5_FLOW_LAYER_L4))
1833                         return rte_flow_error_set(error, EINVAL,
1834                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1835                                                   NULL, "no transport layer "
1836                                                   "in pattern");
1837         }
1838         return ret;
1839 }
1840
1841 /**
1842  * Validate the modify-header actions of increment/decrement
1843  * TCP Sequence-number.
1844  *
1845  * @param[in] action_flags
1846  *   Holds the actions detected until now.
1847  * @param[in] action
1848  *   Pointer to the modify action.
1849  * @param[in] item_flags
1850  *   Holds the items detected.
1851  * @param[out] error
1852  *   Pointer to error structure.
1853  *
1854  * @return
1855  *   0 on success, a negative errno value otherwise and rte_errno is set.
1856  */
1857 static int
1858 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
1859                                        const struct rte_flow_action *action,
1860                                        const uint64_t item_flags,
1861                                        struct rte_flow_error *error)
1862 {
1863         int ret = 0;
1864
1865         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1866         if (!ret) {
1867                 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
1868                         return rte_flow_error_set(error, EINVAL,
1869                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1870                                                   NULL, "no TCP item in"
1871                                                   " pattern");
1872                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
1873                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
1874                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
1875                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
1876                         return rte_flow_error_set(error, EINVAL,
1877                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1878                                                   NULL,
1879                                                   "cannot decrease and increase"
1880                                                   " TCP sequence number"
1881                                                   " at the same time");
1882         }
1883         return ret;
1884 }
1885
1886 /**
1887  * Validate the modify-header actions of increment/decrement
1888  * TCP Acknowledgment number.
1889  *
1890  * @param[in] action_flags
1891  *   Holds the actions detected until now.
1892  * @param[in] action
1893  *   Pointer to the modify action.
1894  * @param[in] item_flags
1895  *   Holds the items detected.
1896  * @param[out] error
1897  *   Pointer to error structure.
1898  *
1899  * @return
1900  *   0 on success, a negative errno value otherwise and rte_errno is set.
1901  */
1902 static int
1903 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
1904                                        const struct rte_flow_action *action,
1905                                        const uint64_t item_flags,
1906                                        struct rte_flow_error *error)
1907 {
1908         int ret = 0;
1909
1910         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1911         if (!ret) {
1912                 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
1913                         return rte_flow_error_set(error, EINVAL,
1914                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1915                                                   NULL, "no TCP item in"
1916                                                   " pattern");
1917                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
1918                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
1919                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
1920                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
1921                         return rte_flow_error_set(error, EINVAL,
1922                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1923                                                   NULL,
1924                                                   "cannot decrease and increase"
1925                                                   " TCP acknowledgment number"
1926                                                   " at the same time");
1927         }
1928         return ret;
1929 }
1930
1931 /**
1932  * Validate the modify-header TTL actions.
1933  *
1934  * @param[in] action_flags
1935  *   Holds the actions detected until now.
1936  * @param[in] action
1937  *   Pointer to the modify action.
1938  * @param[in] item_flags
1939  *   Holds the items detected.
1940  * @param[out] error
1941  *   Pointer to error structure.
1942  *
1943  * @return
1944  *   0 on success, a negative errno value otherwise and rte_errno is set.
1945  */
1946 static int
1947 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
1948                                    const struct rte_flow_action *action,
1949                                    const uint64_t item_flags,
1950                                    struct rte_flow_error *error)
1951 {
1952         int ret = 0;
1953
1954         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1955         if (!ret) {
1956                 if (!(item_flags & MLX5_FLOW_LAYER_L3))
1957                         return rte_flow_error_set(error, EINVAL,
1958                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1959                                                   NULL,
1960                                                   "no IP protocol in pattern");
1961         }
1962         return ret;
1963 }
1964
1965 /**
1966  * Validate jump action.
1967  *
1968  * @param[in] action
1969  *   Pointer to the modify action.
1970  * @param[in] group
1971  *   The group of the current flow.
1972  * @param[out] error
1973  *   Pointer to error structure.
1974  *
1975  * @return
1976  *   0 on success, a negative errno value otherwise and rte_errno is set.
1977  */
1978 static int
1979 flow_dv_validate_action_jump(const struct rte_flow_action *action,
1980                              uint32_t group,
1981                              struct rte_flow_error *error)
1982 {
1983         if (action->type != RTE_FLOW_ACTION_TYPE_JUMP && !action->conf)
1984                 return rte_flow_error_set(error, EINVAL,
1985                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1986                                           NULL, "action configuration not set");
1987         if (group >= ((const struct rte_flow_action_jump *)action->conf)->group)
1988                 return rte_flow_error_set(error, EINVAL,
1989                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1990                                           "target group must be higher then"
1991                                           " the current flow group");
1992         return 0;
1993 }
1994
1995 /*
1996  * Validate the port_id action.
1997  *
1998  * @param[in] dev
1999  *   Pointer to rte_eth_dev structure.
2000  * @param[in] action_flags
2001  *   Bit-fields that holds the actions detected until now.
2002  * @param[in] action
2003  *   Port_id RTE action structure.
2004  * @param[in] attr
2005  *   Attributes of flow that includes this action.
2006  * @param[out] error
2007  *   Pointer to error structure.
2008  *
2009  * @return
2010  *   0 on success, a negative errno value otherwise and rte_errno is set.
2011  */
2012 static int
2013 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
2014                                 uint64_t action_flags,
2015                                 const struct rte_flow_action *action,
2016                                 const struct rte_flow_attr *attr,
2017                                 struct rte_flow_error *error)
2018 {
2019         const struct rte_flow_action_port_id *port_id;
2020         uint16_t port;
2021         uint16_t esw_domain_id;
2022         uint16_t act_port_domain_id;
2023         int ret;
2024
2025         if (!attr->transfer)
2026                 return rte_flow_error_set(error, ENOTSUP,
2027                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2028                                           NULL,
2029                                           "port id action is valid in transfer"
2030                                           " mode only");
2031         if (!action || !action->conf)
2032                 return rte_flow_error_set(error, ENOTSUP,
2033                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2034                                           NULL,
2035                                           "port id action parameters must be"
2036                                           " specified");
2037         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
2038                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
2039                 return rte_flow_error_set(error, EINVAL,
2040                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2041                                           "can have only one fate actions in"
2042                                           " a flow");
2043         ret = mlx5_port_to_eswitch_info(dev->data->port_id,
2044                                         &esw_domain_id, NULL);
2045         if (ret < 0)
2046                 return rte_flow_error_set(error, -ret,
2047                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2048                                           NULL,
2049                                           "failed to obtain E-Switch info");
2050         port_id = action->conf;
2051         port = port_id->original ? dev->data->port_id : port_id->id;
2052         ret = mlx5_port_to_eswitch_info(port, &act_port_domain_id, NULL);
2053         if (ret)
2054                 return rte_flow_error_set
2055                                 (error, -ret,
2056                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
2057                                  "failed to obtain E-Switch port id for port");
2058         if (act_port_domain_id != esw_domain_id)
2059                 return rte_flow_error_set
2060                                 (error, -ret,
2061                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2062                                  "port does not belong to"
2063                                  " E-Switch being configured");
2064         return 0;
2065 }
2066
2067 /**
2068  * Find existing modify-header resource or create and register a new one.
2069  *
2070  * @param dev[in, out]
2071  *   Pointer to rte_eth_dev structure.
2072  * @param[in, out] resource
2073  *   Pointer to modify-header resource.
2074  * @parm[in, out] dev_flow
2075  *   Pointer to the dev_flow.
2076  * @param[out] error
2077  *   pointer to error structure.
2078  *
2079  * @return
2080  *   0 on success otherwise -errno and errno is set.
2081  */
2082 static int
2083 flow_dv_modify_hdr_resource_register
2084                         (struct rte_eth_dev *dev,
2085                          struct mlx5_flow_dv_modify_hdr_resource *resource,
2086                          struct mlx5_flow *dev_flow,
2087                          struct rte_flow_error *error)
2088 {
2089         struct mlx5_priv *priv = dev->data->dev_private;
2090         struct mlx5_ibv_shared *sh = priv->sh;
2091         struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
2092         struct mlx5dv_dr_domain *ns;
2093
2094         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
2095                 ns = sh->fdb_domain;
2096         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
2097                 ns = sh->tx_domain;
2098         else
2099                 ns = sh->rx_domain;
2100         resource->flags =
2101                 dev_flow->flow->group ? 0 : MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
2102         /* Lookup a matching resource from cache. */
2103         LIST_FOREACH(cache_resource, &sh->modify_cmds, next) {
2104                 if (resource->ft_type == cache_resource->ft_type &&
2105                     resource->actions_num == cache_resource->actions_num &&
2106                     resource->flags == cache_resource->flags &&
2107                     !memcmp((const void *)resource->actions,
2108                             (const void *)cache_resource->actions,
2109                             (resource->actions_num *
2110                                             sizeof(resource->actions[0])))) {
2111                         DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
2112                                 (void *)cache_resource,
2113                                 rte_atomic32_read(&cache_resource->refcnt));
2114                         rte_atomic32_inc(&cache_resource->refcnt);
2115                         dev_flow->dv.modify_hdr = cache_resource;
2116                         return 0;
2117                 }
2118         }
2119         /* Register new modify-header resource. */
2120         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
2121         if (!cache_resource)
2122                 return rte_flow_error_set(error, ENOMEM,
2123                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2124                                           "cannot allocate resource memory");
2125         *cache_resource = *resource;
2126         cache_resource->verbs_action =
2127                 mlx5_glue->dv_create_flow_action_modify_header
2128                                         (sh->ctx, cache_resource->ft_type,
2129                                          ns, cache_resource->flags,
2130                                          cache_resource->actions_num *
2131                                          sizeof(cache_resource->actions[0]),
2132                                          (uint64_t *)cache_resource->actions);
2133         if (!cache_resource->verbs_action) {
2134                 rte_free(cache_resource);
2135                 return rte_flow_error_set(error, ENOMEM,
2136                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2137                                           NULL, "cannot create action");
2138         }
2139         rte_atomic32_init(&cache_resource->refcnt);
2140         rte_atomic32_inc(&cache_resource->refcnt);
2141         LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
2142         dev_flow->dv.modify_hdr = cache_resource;
2143         DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
2144                 (void *)cache_resource,
2145                 rte_atomic32_read(&cache_resource->refcnt));
2146         return 0;
2147 }
2148
2149 /**
2150  * Get or create a flow counter.
2151  *
2152  * @param[in] dev
2153  *   Pointer to the Ethernet device structure.
2154  * @param[in] shared
2155  *   Indicate if this counter is shared with other flows.
2156  * @param[in] id
2157  *   Counter identifier.
2158  *
2159  * @return
2160  *   pointer to flow counter on success, NULL otherwise and rte_errno is set.
2161  */
2162 static struct mlx5_flow_counter *
2163 flow_dv_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
2164 {
2165         struct mlx5_priv *priv = dev->data->dev_private;
2166         struct mlx5_flow_counter *cnt = NULL;
2167         struct mlx5_devx_counter_set *dcs = NULL;
2168         int ret;
2169
2170         if (!priv->config.devx) {
2171                 ret = -ENOTSUP;
2172                 goto error_exit;
2173         }
2174         if (shared) {
2175                 LIST_FOREACH(cnt, &priv->flow_counters, next) {
2176                         if (cnt->shared && cnt->id == id) {
2177                                 cnt->ref_cnt++;
2178                                 return cnt;
2179                         }
2180                 }
2181         }
2182         cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
2183         dcs = rte_calloc(__func__, 1, sizeof(*dcs), 0);
2184         if (!dcs || !cnt) {
2185                 ret = -ENOMEM;
2186                 goto error_exit;
2187         }
2188         ret = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, dcs);
2189         if (ret)
2190                 goto error_exit;
2191         struct mlx5_flow_counter tmpl = {
2192                 .shared = shared,
2193                 .ref_cnt = 1,
2194                 .id = id,
2195                 .dcs = dcs,
2196         };
2197         tmpl.action = mlx5_glue->dv_create_flow_action_counter(dcs->obj, 0);
2198         if (!tmpl.action) {
2199                 ret = errno;
2200                 goto error_exit;
2201         }
2202         *cnt = tmpl;
2203         LIST_INSERT_HEAD(&priv->flow_counters, cnt, next);
2204         return cnt;
2205 error_exit:
2206         rte_free(cnt);
2207         rte_free(dcs);
2208         rte_errno = -ret;
2209         return NULL;
2210 }
2211
2212 /**
2213  * Release a flow counter.
2214  *
2215  * @param[in] counter
2216  *   Pointer to the counter handler.
2217  */
2218 static void
2219 flow_dv_counter_release(struct mlx5_flow_counter *counter)
2220 {
2221         int ret;
2222
2223         if (!counter)
2224                 return;
2225         if (--counter->ref_cnt == 0) {
2226                 ret = mlx5_devx_cmd_flow_counter_free(counter->dcs->obj);
2227                 if (ret)
2228                         DRV_LOG(ERR, "Failed to free devx counters, %d", ret);
2229                 LIST_REMOVE(counter, next);
2230                 rte_free(counter->dcs);
2231                 rte_free(counter);
2232         }
2233 }
2234
2235 /**
2236  * Verify the @p attributes will be correctly understood by the NIC and store
2237  * them in the @p flow if everything is correct.
2238  *
2239  * @param[in] dev
2240  *   Pointer to dev struct.
2241  * @param[in] attributes
2242  *   Pointer to flow attributes
2243  * @param[out] error
2244  *   Pointer to error structure.
2245  *
2246  * @return
2247  *   0 on success, a negative errno value otherwise and rte_errno is set.
2248  */
2249 static int
2250 flow_dv_validate_attributes(struct rte_eth_dev *dev,
2251                             const struct rte_flow_attr *attributes,
2252                             struct rte_flow_error *error)
2253 {
2254         struct mlx5_priv *priv = dev->data->dev_private;
2255         uint32_t priority_max = priv->config.flow_prio - 1;
2256
2257 #ifndef HAVE_MLX5DV_DR
2258         if (attributes->group)
2259                 return rte_flow_error_set(error, ENOTSUP,
2260                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
2261                                           NULL,
2262                                           "groups is not supported");
2263 #endif
2264         if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
2265             attributes->priority >= priority_max)
2266                 return rte_flow_error_set(error, ENOTSUP,
2267                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
2268                                           NULL,
2269                                           "priority out of range");
2270         if (attributes->transfer) {
2271                 if (!priv->config.dv_esw_en)
2272                         return rte_flow_error_set
2273                                 (error, ENOTSUP,
2274                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2275                                  "E-Switch dr is not supported");
2276                 if (!(priv->representor || priv->master))
2277                         return rte_flow_error_set
2278                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2279                                  NULL, "E-Switch configurationd can only be"
2280                                  " done by a master or a representor device");
2281                 if (attributes->egress)
2282                         return rte_flow_error_set
2283                                 (error, ENOTSUP,
2284                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
2285                                  "egress is not supported");
2286                 if (attributes->group >= MLX5_MAX_TABLES_FDB)
2287                         return rte_flow_error_set
2288                                 (error, EINVAL,
2289                                  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
2290                                  NULL, "group must be smaller than "
2291                                  RTE_STR(MLX5_MAX_FDB_TABLES));
2292         }
2293         if (!(attributes->egress ^ attributes->ingress))
2294                 return rte_flow_error_set(error, ENOTSUP,
2295                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
2296                                           "must specify exactly one of "
2297                                           "ingress or egress");
2298         return 0;
2299 }
2300
2301 /**
2302  * Internal validation function. For validating both actions and items.
2303  *
2304  * @param[in] dev
2305  *   Pointer to the rte_eth_dev structure.
2306  * @param[in] attr
2307  *   Pointer to the flow attributes.
2308  * @param[in] items
2309  *   Pointer to the list of items.
2310  * @param[in] actions
2311  *   Pointer to the list of actions.
2312  * @param[out] error
2313  *   Pointer to the error structure.
2314  *
2315  * @return
2316  *   0 on success, a negative errno value otherwise and rte_errno is set.
2317  */
2318 static int
2319 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
2320                  const struct rte_flow_item items[],
2321                  const struct rte_flow_action actions[],
2322                  struct rte_flow_error *error)
2323 {
2324         int ret;
2325         uint64_t action_flags = 0;
2326         uint64_t item_flags = 0;
2327         uint64_t last_item = 0;
2328         uint8_t next_protocol = 0xff;
2329         int actions_n = 0;
2330         const struct rte_flow_item *gre_item = NULL;
2331         struct rte_flow_item_tcp nic_tcp_mask = {
2332                 .hdr = {
2333                         .tcp_flags = 0xFF,
2334                         .src_port = RTE_BE16(UINT16_MAX),
2335                         .dst_port = RTE_BE16(UINT16_MAX),
2336                 }
2337         };
2338
2339         if (items == NULL)
2340                 return -1;
2341         ret = flow_dv_validate_attributes(dev, attr, error);
2342         if (ret < 0)
2343                 return ret;
2344         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2345                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2346                 switch (items->type) {
2347                 case RTE_FLOW_ITEM_TYPE_VOID:
2348                         break;
2349                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
2350                         ret = flow_dv_validate_item_port_id
2351                                         (dev, items, attr, item_flags, error);
2352                         if (ret < 0)
2353                                 return ret;
2354                         last_item |= MLX5_FLOW_ITEM_PORT_ID;
2355                         break;
2356                 case RTE_FLOW_ITEM_TYPE_ETH:
2357                         ret = mlx5_flow_validate_item_eth(items, item_flags,
2358                                                           error);
2359                         if (ret < 0)
2360                                 return ret;
2361                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
2362                                              MLX5_FLOW_LAYER_OUTER_L2;
2363                         break;
2364                 case RTE_FLOW_ITEM_TYPE_VLAN:
2365                         ret = mlx5_flow_validate_item_vlan(items, item_flags,
2366                                                            error);
2367                         if (ret < 0)
2368                                 return ret;
2369                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
2370                                              MLX5_FLOW_LAYER_OUTER_VLAN;
2371                         break;
2372                 case RTE_FLOW_ITEM_TYPE_IPV4:
2373                         ret = mlx5_flow_validate_item_ipv4(items, item_flags,
2374                                                            NULL, error);
2375                         if (ret < 0)
2376                                 return ret;
2377                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
2378                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
2379                         if (items->mask != NULL &&
2380                             ((const struct rte_flow_item_ipv4 *)
2381                              items->mask)->hdr.next_proto_id) {
2382                                 next_protocol =
2383                                         ((const struct rte_flow_item_ipv4 *)
2384                                          (items->spec))->hdr.next_proto_id;
2385                                 next_protocol &=
2386                                         ((const struct rte_flow_item_ipv4 *)
2387                                          (items->mask))->hdr.next_proto_id;
2388                         } else {
2389                                 /* Reset for inner layer. */
2390                                 next_protocol = 0xff;
2391                         }
2392                         mlx5_flow_tunnel_ip_check(items, &last_item);
2393                         break;
2394                 case RTE_FLOW_ITEM_TYPE_IPV6:
2395                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
2396                                                            NULL, error);
2397                         if (ret < 0)
2398                                 return ret;
2399                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
2400                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
2401                         if (items->mask != NULL &&
2402                             ((const struct rte_flow_item_ipv6 *)
2403                              items->mask)->hdr.proto) {
2404                                 next_protocol =
2405                                         ((const struct rte_flow_item_ipv6 *)
2406                                          items->spec)->hdr.proto;
2407                                 next_protocol &=
2408                                         ((const struct rte_flow_item_ipv6 *)
2409                                          items->mask)->hdr.proto;
2410                         } else {
2411                                 /* Reset for inner layer. */
2412                                 next_protocol = 0xff;
2413                         }
2414                         mlx5_flow_tunnel_ip_check(items, &last_item);
2415                         break;
2416                 case RTE_FLOW_ITEM_TYPE_TCP:
2417                         ret = mlx5_flow_validate_item_tcp
2418                                                 (items, item_flags,
2419                                                  next_protocol,
2420                                                  &nic_tcp_mask,
2421                                                  error);
2422                         if (ret < 0)
2423                                 return ret;
2424                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
2425                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
2426                         break;
2427                 case RTE_FLOW_ITEM_TYPE_UDP:
2428                         ret = mlx5_flow_validate_item_udp(items, item_flags,
2429                                                           next_protocol,
2430                                                           error);
2431                         if (ret < 0)
2432                                 return ret;
2433                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
2434                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
2435                         break;
2436                 case RTE_FLOW_ITEM_TYPE_GRE:
2437                 case RTE_FLOW_ITEM_TYPE_NVGRE:
2438                         ret = mlx5_flow_validate_item_gre(items, item_flags,
2439                                                           next_protocol, error);
2440                         if (ret < 0)
2441                                 return ret;
2442                         gre_item = items;
2443                         last_item = MLX5_FLOW_LAYER_GRE;
2444                         break;
2445                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
2446                         ret = mlx5_flow_validate_item_gre_key
2447                                 (items, item_flags, gre_item, error);
2448                         if (ret < 0)
2449                                 return ret;
2450                         item_flags |= MLX5_FLOW_LAYER_GRE_KEY;
2451                         break;
2452                 case RTE_FLOW_ITEM_TYPE_VXLAN:
2453                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
2454                                                             error);
2455                         if (ret < 0)
2456                                 return ret;
2457                         last_item = MLX5_FLOW_LAYER_VXLAN;
2458                         break;
2459                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
2460                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
2461                                                                 item_flags, dev,
2462                                                                 error);
2463                         if (ret < 0)
2464                                 return ret;
2465                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
2466                         break;
2467                 case RTE_FLOW_ITEM_TYPE_MPLS:
2468                         ret = mlx5_flow_validate_item_mpls(dev, items,
2469                                                            item_flags,
2470                                                            last_item, error);
2471                         if (ret < 0)
2472                                 return ret;
2473                         last_item = MLX5_FLOW_LAYER_MPLS;
2474                         break;
2475                 case RTE_FLOW_ITEM_TYPE_META:
2476                         ret = flow_dv_validate_item_meta(dev, items, attr,
2477                                                          error);
2478                         if (ret < 0)
2479                                 return ret;
2480                         last_item = MLX5_FLOW_ITEM_METADATA;
2481                         break;
2482                 case RTE_FLOW_ITEM_TYPE_ICMP:
2483                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
2484                                                            next_protocol,
2485                                                            error);
2486                         if (ret < 0)
2487                                 return ret;
2488                         item_flags |= MLX5_FLOW_LAYER_ICMP;
2489                         break;
2490                 case RTE_FLOW_ITEM_TYPE_ICMP6:
2491                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
2492                                                             next_protocol,
2493                                                             error);
2494                         if (ret < 0)
2495                                 return ret;
2496                         item_flags |= MLX5_FLOW_LAYER_ICMP6;
2497                         break;
2498                 default:
2499                         return rte_flow_error_set(error, ENOTSUP,
2500                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2501                                                   NULL, "item not supported");
2502                 }
2503                 item_flags |= last_item;
2504         }
2505         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2506                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
2507                         return rte_flow_error_set(error, ENOTSUP,
2508                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2509                                                   actions, "too many actions");
2510                 switch (actions->type) {
2511                 case RTE_FLOW_ACTION_TYPE_VOID:
2512                         break;
2513                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
2514                         ret = flow_dv_validate_action_port_id(dev,
2515                                                               action_flags,
2516                                                               actions,
2517                                                               attr,
2518                                                               error);
2519                         if (ret)
2520                                 return ret;
2521                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
2522                         ++actions_n;
2523                         break;
2524                 case RTE_FLOW_ACTION_TYPE_FLAG:
2525                         ret = mlx5_flow_validate_action_flag(action_flags,
2526                                                              attr, error);
2527                         if (ret < 0)
2528                                 return ret;
2529                         action_flags |= MLX5_FLOW_ACTION_FLAG;
2530                         ++actions_n;
2531                         break;
2532                 case RTE_FLOW_ACTION_TYPE_MARK:
2533                         ret = mlx5_flow_validate_action_mark(actions,
2534                                                              action_flags,
2535                                                              attr, error);
2536                         if (ret < 0)
2537                                 return ret;
2538                         action_flags |= MLX5_FLOW_ACTION_MARK;
2539                         ++actions_n;
2540                         break;
2541                 case RTE_FLOW_ACTION_TYPE_DROP:
2542                         ret = mlx5_flow_validate_action_drop(action_flags,
2543                                                              attr, error);
2544                         if (ret < 0)
2545                                 return ret;
2546                         action_flags |= MLX5_FLOW_ACTION_DROP;
2547                         ++actions_n;
2548                         break;
2549                 case RTE_FLOW_ACTION_TYPE_QUEUE:
2550                         ret = mlx5_flow_validate_action_queue(actions,
2551                                                               action_flags, dev,
2552                                                               attr, error);
2553                         if (ret < 0)
2554                                 return ret;
2555                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
2556                         ++actions_n;
2557                         break;
2558                 case RTE_FLOW_ACTION_TYPE_RSS:
2559                         ret = mlx5_flow_validate_action_rss(actions,
2560                                                             action_flags, dev,
2561                                                             attr, item_flags,
2562                                                             error);
2563                         if (ret < 0)
2564                                 return ret;
2565                         action_flags |= MLX5_FLOW_ACTION_RSS;
2566                         ++actions_n;
2567                         break;
2568                 case RTE_FLOW_ACTION_TYPE_COUNT:
2569                         ret = flow_dv_validate_action_count(dev, error);
2570                         if (ret < 0)
2571                                 return ret;
2572                         action_flags |= MLX5_FLOW_ACTION_COUNT;
2573                         ++actions_n;
2574                         break;
2575                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
2576                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
2577                         ret = flow_dv_validate_action_l2_encap(action_flags,
2578                                                                actions, attr,
2579                                                                error);
2580                         if (ret < 0)
2581                                 return ret;
2582                         action_flags |= actions->type ==
2583                                         RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
2584                                         MLX5_FLOW_ACTION_VXLAN_ENCAP :
2585                                         MLX5_FLOW_ACTION_NVGRE_ENCAP;
2586                         ++actions_n;
2587                         break;
2588                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
2589                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
2590                         ret = flow_dv_validate_action_l2_decap(action_flags,
2591                                                                attr, error);
2592                         if (ret < 0)
2593                                 return ret;
2594                         action_flags |= actions->type ==
2595                                         RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
2596                                         MLX5_FLOW_ACTION_VXLAN_DECAP :
2597                                         MLX5_FLOW_ACTION_NVGRE_DECAP;
2598                         ++actions_n;
2599                         break;
2600                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
2601                         ret = flow_dv_validate_action_raw_encap(action_flags,
2602                                                                 actions, attr,
2603                                                                 error);
2604                         if (ret < 0)
2605                                 return ret;
2606                         action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
2607                         ++actions_n;
2608                         break;
2609                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
2610                         ret = flow_dv_validate_action_raw_decap(action_flags,
2611                                                                 actions, attr,
2612                                                                 error);
2613                         if (ret < 0)
2614                                 return ret;
2615                         action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
2616                         ++actions_n;
2617                         break;
2618                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
2619                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
2620                         ret = flow_dv_validate_action_modify_mac(action_flags,
2621                                                                  actions,
2622                                                                  item_flags,
2623                                                                  error);
2624                         if (ret < 0)
2625                                 return ret;
2626                         /* Count all modify-header actions as one action. */
2627                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2628                                 ++actions_n;
2629                         action_flags |= actions->type ==
2630                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
2631                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
2632                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
2633                         break;
2634
2635                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
2636                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
2637                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
2638                                                                   actions,
2639                                                                   item_flags,
2640                                                                   error);
2641                         if (ret < 0)
2642                                 return ret;
2643                         /* Count all modify-header actions as one action. */
2644                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2645                                 ++actions_n;
2646                         action_flags |= actions->type ==
2647                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
2648                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
2649                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
2650                         break;
2651                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
2652                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
2653                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
2654                                                                   actions,
2655                                                                   item_flags,
2656                                                                   error);
2657                         if (ret < 0)
2658                                 return ret;
2659                         /* Count all modify-header actions as one action. */
2660                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2661                                 ++actions_n;
2662                         action_flags |= actions->type ==
2663                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
2664                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
2665                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
2666                         break;
2667                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
2668                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
2669                         ret = flow_dv_validate_action_modify_tp(action_flags,
2670                                                                 actions,
2671                                                                 item_flags,
2672                                                                 error);
2673                         if (ret < 0)
2674                                 return ret;
2675                         /* Count all modify-header actions as one action. */
2676                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2677                                 ++actions_n;
2678                         action_flags |= actions->type ==
2679                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
2680                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
2681                                                 MLX5_FLOW_ACTION_SET_TP_DST;
2682                         break;
2683                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
2684                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
2685                         ret = flow_dv_validate_action_modify_ttl(action_flags,
2686                                                                  actions,
2687                                                                  item_flags,
2688                                                                  error);
2689                         if (ret < 0)
2690                                 return ret;
2691                         /* Count all modify-header actions as one action. */
2692                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2693                                 ++actions_n;
2694                         action_flags |= actions->type ==
2695                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
2696                                                 MLX5_FLOW_ACTION_SET_TTL :
2697                                                 MLX5_FLOW_ACTION_DEC_TTL;
2698                         break;
2699                 case RTE_FLOW_ACTION_TYPE_JUMP:
2700                         ret = flow_dv_validate_action_jump(actions,
2701                                                            attr->group, error);
2702                         if (ret)
2703                                 return ret;
2704                         ++actions_n;
2705                         action_flags |= MLX5_FLOW_ACTION_JUMP;
2706                         break;
2707                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
2708                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
2709                         ret = flow_dv_validate_action_modify_tcp_seq
2710                                                                 (action_flags,
2711                                                                  actions,
2712                                                                  item_flags,
2713                                                                  error);
2714                         if (ret < 0)
2715                                 return ret;
2716                         /* Count all modify-header actions as one action. */
2717                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2718                                 ++actions_n;
2719                         action_flags |= actions->type ==
2720                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
2721                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
2722                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
2723                         break;
2724                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
2725                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
2726                         ret = flow_dv_validate_action_modify_tcp_ack
2727                                                                 (action_flags,
2728                                                                  actions,
2729                                                                  item_flags,
2730                                                                  error);
2731                         if (ret < 0)
2732                                 return ret;
2733                         /* Count all modify-header actions as one action. */
2734                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2735                                 ++actions_n;
2736                         action_flags |= actions->type ==
2737                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
2738                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
2739                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
2740                         break;
2741                 default:
2742                         return rte_flow_error_set(error, ENOTSUP,
2743                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2744                                                   actions,
2745                                                   "action not supported");
2746                 }
2747         }
2748         /* Eswitch has few restrictions on using items and actions */
2749         if (attr->transfer) {
2750                 if (action_flags & MLX5_FLOW_ACTION_FLAG)
2751                         return rte_flow_error_set(error, ENOTSUP,
2752                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2753                                                   NULL,
2754                                                   "unsupported action FLAG");
2755                 if (action_flags & MLX5_FLOW_ACTION_MARK)
2756                         return rte_flow_error_set(error, ENOTSUP,
2757                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2758                                                   NULL,
2759                                                   "unsupported action MARK");
2760                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
2761                         return rte_flow_error_set(error, ENOTSUP,
2762                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2763                                                   NULL,
2764                                                   "unsupported action QUEUE");
2765                 if (action_flags & MLX5_FLOW_ACTION_RSS)
2766                         return rte_flow_error_set(error, ENOTSUP,
2767                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2768                                                   NULL,
2769                                                   "unsupported action RSS");
2770                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
2771                         return rte_flow_error_set(error, EINVAL,
2772                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2773                                                   actions,
2774                                                   "no fate action is found");
2775         } else {
2776                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
2777                         return rte_flow_error_set(error, EINVAL,
2778                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2779                                                   actions,
2780                                                   "no fate action is found");
2781         }
2782         return 0;
2783 }
2784
2785 /**
2786  * Internal preparation function. Allocates the DV flow size,
2787  * this size is constant.
2788  *
2789  * @param[in] attr
2790  *   Pointer to the flow attributes.
2791  * @param[in] items
2792  *   Pointer to the list of items.
2793  * @param[in] actions
2794  *   Pointer to the list of actions.
2795  * @param[out] error
2796  *   Pointer to the error structure.
2797  *
2798  * @return
2799  *   Pointer to mlx5_flow object on success,
2800  *   otherwise NULL and rte_errno is set.
2801  */
2802 static struct mlx5_flow *
2803 flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
2804                 const struct rte_flow_item items[] __rte_unused,
2805                 const struct rte_flow_action actions[] __rte_unused,
2806                 struct rte_flow_error *error)
2807 {
2808         uint32_t size = sizeof(struct mlx5_flow);
2809         struct mlx5_flow *flow;
2810
2811         flow = rte_calloc(__func__, 1, size, 0);
2812         if (!flow) {
2813                 rte_flow_error_set(error, ENOMEM,
2814                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2815                                    "not enough memory to create flow");
2816                 return NULL;
2817         }
2818         flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
2819         return flow;
2820 }
2821
2822 #ifndef NDEBUG
2823 /**
2824  * Sanity check for match mask and value. Similar to check_valid_spec() in
2825  * kernel driver. If unmasked bit is present in value, it returns failure.
2826  *
2827  * @param match_mask
2828  *   pointer to match mask buffer.
2829  * @param match_value
2830  *   pointer to match value buffer.
2831  *
2832  * @return
2833  *   0 if valid, -EINVAL otherwise.
2834  */
2835 static int
2836 flow_dv_check_valid_spec(void *match_mask, void *match_value)
2837 {
2838         uint8_t *m = match_mask;
2839         uint8_t *v = match_value;
2840         unsigned int i;
2841
2842         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
2843                 if (v[i] & ~m[i]) {
2844                         DRV_LOG(ERR,
2845                                 "match_value differs from match_criteria"
2846                                 " %p[%u] != %p[%u]",
2847                                 match_value, i, match_mask, i);
2848                         return -EINVAL;
2849                 }
2850         }
2851         return 0;
2852 }
2853 #endif
2854
2855 /**
2856  * Add Ethernet item to matcher and to the value.
2857  *
2858  * @param[in, out] matcher
2859  *   Flow matcher.
2860  * @param[in, out] key
2861  *   Flow matcher value.
2862  * @param[in] item
2863  *   Flow pattern to translate.
2864  * @param[in] inner
2865  *   Item is inner pattern.
2866  */
2867 static void
2868 flow_dv_translate_item_eth(void *matcher, void *key,
2869                            const struct rte_flow_item *item, int inner)
2870 {
2871         const struct rte_flow_item_eth *eth_m = item->mask;
2872         const struct rte_flow_item_eth *eth_v = item->spec;
2873         const struct rte_flow_item_eth nic_mask = {
2874                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
2875                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
2876                 .type = RTE_BE16(0xffff),
2877         };
2878         void *headers_m;
2879         void *headers_v;
2880         char *l24_v;
2881         unsigned int i;
2882
2883         if (!eth_v)
2884                 return;
2885         if (!eth_m)
2886                 eth_m = &nic_mask;
2887         if (inner) {
2888                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2889                                          inner_headers);
2890                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2891         } else {
2892                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2893                                          outer_headers);
2894                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2895         }
2896         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
2897                &eth_m->dst, sizeof(eth_m->dst));
2898         /* The value must be in the range of the mask. */
2899         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
2900         for (i = 0; i < sizeof(eth_m->dst); ++i)
2901                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
2902         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
2903                &eth_m->src, sizeof(eth_m->src));
2904         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
2905         /* The value must be in the range of the mask. */
2906         for (i = 0; i < sizeof(eth_m->dst); ++i)
2907                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
2908         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
2909                  rte_be_to_cpu_16(eth_m->type));
2910         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
2911         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
2912 }
2913
2914 /**
2915  * Add VLAN item to matcher and to the value.
2916  *
2917  * @param[in, out] matcher
2918  *   Flow matcher.
2919  * @param[in, out] key
2920  *   Flow matcher value.
2921  * @param[in] item
2922  *   Flow pattern to translate.
2923  * @param[in] inner
2924  *   Item is inner pattern.
2925  */
2926 static void
2927 flow_dv_translate_item_vlan(void *matcher, void *key,
2928                             const struct rte_flow_item *item,
2929                             int inner)
2930 {
2931         const struct rte_flow_item_vlan *vlan_m = item->mask;
2932         const struct rte_flow_item_vlan *vlan_v = item->spec;
2933         const struct rte_flow_item_vlan nic_mask = {
2934                 .tci = RTE_BE16(0x0fff),
2935                 .inner_type = RTE_BE16(0xffff),
2936         };
2937         void *headers_m;
2938         void *headers_v;
2939         uint16_t tci_m;
2940         uint16_t tci_v;
2941
2942         if (!vlan_v)
2943                 return;
2944         if (!vlan_m)
2945                 vlan_m = &nic_mask;
2946         if (inner) {
2947                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2948                                          inner_headers);
2949                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2950         } else {
2951                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2952                                          outer_headers);
2953                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2954         }
2955         tci_m = rte_be_to_cpu_16(vlan_m->tci);
2956         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
2957         MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
2958         MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
2959         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
2960         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
2961         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
2962         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
2963         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
2964         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
2965 }
2966
2967 /**
2968  * Add IPV4 item to matcher and to the value.
2969  *
2970  * @param[in, out] matcher
2971  *   Flow matcher.
2972  * @param[in, out] key
2973  *   Flow matcher value.
2974  * @param[in] item
2975  *   Flow pattern to translate.
2976  * @param[in] inner
2977  *   Item is inner pattern.
2978  * @param[in] group
2979  *   The group to insert the rule.
2980  */
2981 static void
2982 flow_dv_translate_item_ipv4(void *matcher, void *key,
2983                             const struct rte_flow_item *item,
2984                             int inner, uint32_t group)
2985 {
2986         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
2987         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
2988         const struct rte_flow_item_ipv4 nic_mask = {
2989                 .hdr = {
2990                         .src_addr = RTE_BE32(0xffffffff),
2991                         .dst_addr = RTE_BE32(0xffffffff),
2992                         .type_of_service = 0xff,
2993                         .next_proto_id = 0xff,
2994                 },
2995         };
2996         void *headers_m;
2997         void *headers_v;
2998         char *l24_m;
2999         char *l24_v;
3000         uint8_t tos;
3001
3002         if (inner) {
3003                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3004                                          inner_headers);
3005                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3006         } else {
3007                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3008                                          outer_headers);
3009                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3010         }
3011         if (group == 0)
3012                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
3013         else
3014                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x4);
3015         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
3016         if (!ipv4_v)
3017                 return;
3018         if (!ipv4_m)
3019                 ipv4_m = &nic_mask;
3020         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
3021                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
3022         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
3023                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
3024         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
3025         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
3026         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
3027                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
3028         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
3029                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
3030         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
3031         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
3032         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
3033         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
3034                  ipv4_m->hdr.type_of_service);
3035         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
3036         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
3037                  ipv4_m->hdr.type_of_service >> 2);
3038         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
3039         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
3040                  ipv4_m->hdr.next_proto_id);
3041         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
3042                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
3043 }
3044
3045 /**
3046  * Add IPV6 item to matcher and to the value.
3047  *
3048  * @param[in, out] matcher
3049  *   Flow matcher.
3050  * @param[in, out] key
3051  *   Flow matcher value.
3052  * @param[in] item
3053  *   Flow pattern to translate.
3054  * @param[in] inner
3055  *   Item is inner pattern.
3056  * @param[in] group
3057  *   The group to insert the rule.
3058  */
3059 static void
3060 flow_dv_translate_item_ipv6(void *matcher, void *key,
3061                             const struct rte_flow_item *item,
3062                             int inner, uint32_t group)
3063 {
3064         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
3065         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
3066         const struct rte_flow_item_ipv6 nic_mask = {
3067                 .hdr = {
3068                         .src_addr =
3069                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
3070                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
3071                         .dst_addr =
3072                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
3073                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
3074                         .vtc_flow = RTE_BE32(0xffffffff),
3075                         .proto = 0xff,
3076                         .hop_limits = 0xff,
3077                 },
3078         };
3079         void *headers_m;
3080         void *headers_v;
3081         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3082         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3083         char *l24_m;
3084         char *l24_v;
3085         uint32_t vtc_m;
3086         uint32_t vtc_v;
3087         int i;
3088         int size;
3089
3090         if (inner) {
3091                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3092                                          inner_headers);
3093                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3094         } else {
3095                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3096                                          outer_headers);
3097                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3098         }
3099         if (group == 0)
3100                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
3101         else
3102                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x6);
3103         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
3104         if (!ipv6_v)
3105                 return;
3106         if (!ipv6_m)
3107                 ipv6_m = &nic_mask;
3108         size = sizeof(ipv6_m->hdr.dst_addr);
3109         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
3110                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
3111         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
3112                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
3113         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
3114         for (i = 0; i < size; ++i)
3115                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
3116         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
3117                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
3118         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
3119                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
3120         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
3121         for (i = 0; i < size; ++i)
3122                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
3123         /* TOS. */
3124         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
3125         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
3126         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
3127         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
3128         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
3129         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
3130         /* Label. */
3131         if (inner) {
3132                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
3133                          vtc_m);
3134                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
3135                          vtc_v);
3136         } else {
3137                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
3138                          vtc_m);
3139                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
3140                          vtc_v);
3141         }
3142         /* Protocol. */
3143         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
3144                  ipv6_m->hdr.proto);
3145         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
3146                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
3147 }
3148
3149 /**
3150  * Add TCP item to matcher and to the value.
3151  *
3152  * @param[in, out] matcher
3153  *   Flow matcher.
3154  * @param[in, out] key
3155  *   Flow matcher value.
3156  * @param[in] item
3157  *   Flow pattern to translate.
3158  * @param[in] inner
3159  *   Item is inner pattern.
3160  */
3161 static void
3162 flow_dv_translate_item_tcp(void *matcher, void *key,
3163                            const struct rte_flow_item *item,
3164                            int inner)
3165 {
3166         const struct rte_flow_item_tcp *tcp_m = item->mask;
3167         const struct rte_flow_item_tcp *tcp_v = item->spec;
3168         void *headers_m;
3169         void *headers_v;
3170
3171         if (inner) {
3172                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3173                                          inner_headers);
3174                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3175         } else {
3176                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3177                                          outer_headers);
3178                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3179         }
3180         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
3181         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
3182         if (!tcp_v)
3183                 return;
3184         if (!tcp_m)
3185                 tcp_m = &rte_flow_item_tcp_mask;
3186         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
3187                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
3188         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
3189                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
3190         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
3191                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
3192         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
3193                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
3194         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
3195                  tcp_m->hdr.tcp_flags);
3196         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
3197                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
3198 }
3199
3200 /**
3201  * Add UDP item to matcher and to the value.
3202  *
3203  * @param[in, out] matcher
3204  *   Flow matcher.
3205  * @param[in, out] key
3206  *   Flow matcher value.
3207  * @param[in] item
3208  *   Flow pattern to translate.
3209  * @param[in] inner
3210  *   Item is inner pattern.
3211  */
3212 static void
3213 flow_dv_translate_item_udp(void *matcher, void *key,
3214                            const struct rte_flow_item *item,
3215                            int inner)
3216 {
3217         const struct rte_flow_item_udp *udp_m = item->mask;
3218         const struct rte_flow_item_udp *udp_v = item->spec;
3219         void *headers_m;
3220         void *headers_v;
3221
3222         if (inner) {
3223                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3224                                          inner_headers);
3225                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3226         } else {
3227                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3228                                          outer_headers);
3229                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3230         }
3231         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
3232         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
3233         if (!udp_v)
3234                 return;
3235         if (!udp_m)
3236                 udp_m = &rte_flow_item_udp_mask;
3237         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
3238                  rte_be_to_cpu_16(udp_m->hdr.src_port));
3239         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
3240                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
3241         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
3242                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
3243         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
3244                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
3245 }
3246
3247 /**
3248  * Add GRE optional Key item to matcher and to the value.
3249  *
3250  * @param[in, out] matcher
3251  *   Flow matcher.
3252  * @param[in, out] key
3253  *   Flow matcher value.
3254  * @param[in] item
3255  *   Flow pattern to translate.
3256  * @param[in] inner
3257  *   Item is inner pattern.
3258  */
3259 static void
3260 flow_dv_translate_item_gre_key(void *matcher, void *key,
3261                                    const struct rte_flow_item *item)
3262 {
3263         const rte_be32_t *key_m = item->mask;
3264         const rte_be32_t *key_v = item->spec;
3265         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3266         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3267         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
3268
3269         if (!key_v)
3270                 return;
3271         if (!key_m)
3272                 key_m = &gre_key_default_mask;
3273         /* GRE K bit must be on and should already be validated */
3274         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
3275         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
3276         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
3277                  rte_be_to_cpu_32(*key_m) >> 8);
3278         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
3279                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
3280         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
3281                  rte_be_to_cpu_32(*key_m) & 0xFF);
3282         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
3283                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
3284 }
3285
3286 /**
3287  * Add GRE item to matcher and to the value.
3288  *
3289  * @param[in, out] matcher
3290  *   Flow matcher.
3291  * @param[in, out] key
3292  *   Flow matcher value.
3293  * @param[in] item
3294  *   Flow pattern to translate.
3295  * @param[in] inner
3296  *   Item is inner pattern.
3297  */
3298 static void
3299 flow_dv_translate_item_gre(void *matcher, void *key,
3300                            const struct rte_flow_item *item,
3301                            int inner)
3302 {
3303         const struct rte_flow_item_gre *gre_m = item->mask;
3304         const struct rte_flow_item_gre *gre_v = item->spec;
3305         void *headers_m;
3306         void *headers_v;
3307         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3308         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3309         struct {
3310                 union {
3311                         __extension__
3312                         struct {
3313                                 uint16_t version:3;
3314                                 uint16_t rsvd0:9;
3315                                 uint16_t s_present:1;
3316                                 uint16_t k_present:1;
3317                                 uint16_t rsvd_bit1:1;
3318                                 uint16_t c_present:1;
3319                         };
3320                         uint16_t value;
3321                 };
3322         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
3323
3324         if (inner) {
3325                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3326                                          inner_headers);
3327                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3328         } else {
3329                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3330                                          outer_headers);
3331                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3332         }
3333         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
3334         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
3335         if (!gre_v)
3336                 return;
3337         if (!gre_m)
3338                 gre_m = &rte_flow_item_gre_mask;
3339         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
3340                  rte_be_to_cpu_16(gre_m->protocol));
3341         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
3342                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
3343         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
3344         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
3345         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
3346                  gre_crks_rsvd0_ver_m.c_present);
3347         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
3348                  gre_crks_rsvd0_ver_v.c_present &
3349                  gre_crks_rsvd0_ver_m.c_present);
3350         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
3351                  gre_crks_rsvd0_ver_m.k_present);
3352         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
3353                  gre_crks_rsvd0_ver_v.k_present &
3354                  gre_crks_rsvd0_ver_m.k_present);
3355         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
3356                  gre_crks_rsvd0_ver_m.s_present);
3357         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
3358                  gre_crks_rsvd0_ver_v.s_present &
3359                  gre_crks_rsvd0_ver_m.s_present);
3360 }
3361
3362 /**
3363  * Add NVGRE item to matcher and to the value.
3364  *
3365  * @param[in, out] matcher
3366  *   Flow matcher.
3367  * @param[in, out] key
3368  *   Flow matcher value.
3369  * @param[in] item
3370  *   Flow pattern to translate.
3371  * @param[in] inner
3372  *   Item is inner pattern.
3373  */
3374 static void
3375 flow_dv_translate_item_nvgre(void *matcher, void *key,
3376                              const struct rte_flow_item *item,
3377                              int inner)
3378 {
3379         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
3380         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
3381         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3382         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3383         const char *tni_flow_id_m = (const char *)nvgre_m->tni;
3384         const char *tni_flow_id_v = (const char *)nvgre_v->tni;
3385         char *gre_key_m;
3386         char *gre_key_v;
3387         int size;
3388         int i;
3389
3390         flow_dv_translate_item_gre(matcher, key, item, inner);
3391         if (!nvgre_v)
3392                 return;
3393         if (!nvgre_m)
3394                 nvgre_m = &rte_flow_item_nvgre_mask;
3395         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
3396         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
3397         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
3398         memcpy(gre_key_m, tni_flow_id_m, size);
3399         for (i = 0; i < size; ++i)
3400                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
3401 }
3402
3403 /**
3404  * Add VXLAN item to matcher and to the value.
3405  *
3406  * @param[in, out] matcher
3407  *   Flow matcher.
3408  * @param[in, out] key
3409  *   Flow matcher value.
3410  * @param[in] item
3411  *   Flow pattern to translate.
3412  * @param[in] inner
3413  *   Item is inner pattern.
3414  */
3415 static void
3416 flow_dv_translate_item_vxlan(void *matcher, void *key,
3417                              const struct rte_flow_item *item,
3418                              int inner)
3419 {
3420         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
3421         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
3422         void *headers_m;
3423         void *headers_v;
3424         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3425         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3426         char *vni_m;
3427         char *vni_v;
3428         uint16_t dport;
3429         int size;
3430         int i;
3431
3432         if (inner) {
3433                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3434                                          inner_headers);
3435                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3436         } else {
3437                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3438                                          outer_headers);
3439                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3440         }
3441         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
3442                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
3443         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
3444                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
3445                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
3446         }
3447         if (!vxlan_v)
3448                 return;
3449         if (!vxlan_m)
3450                 vxlan_m = &rte_flow_item_vxlan_mask;
3451         size = sizeof(vxlan_m->vni);
3452         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
3453         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
3454         memcpy(vni_m, vxlan_m->vni, size);
3455         for (i = 0; i < size; ++i)
3456                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
3457 }
3458
3459 /**
3460  * Add MPLS item to matcher and to the value.
3461  *
3462  * @param[in, out] matcher
3463  *   Flow matcher.
3464  * @param[in, out] key
3465  *   Flow matcher value.
3466  * @param[in] item
3467  *   Flow pattern to translate.
3468  * @param[in] prev_layer
3469  *   The protocol layer indicated in previous item.
3470  * @param[in] inner
3471  *   Item is inner pattern.
3472  */
3473 static void
3474 flow_dv_translate_item_mpls(void *matcher, void *key,
3475                             const struct rte_flow_item *item,
3476                             uint64_t prev_layer,
3477                             int inner)
3478 {
3479         const uint32_t *in_mpls_m = item->mask;
3480         const uint32_t *in_mpls_v = item->spec;
3481         uint32_t *out_mpls_m = 0;
3482         uint32_t *out_mpls_v = 0;
3483         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3484         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3485         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
3486                                      misc_parameters_2);
3487         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
3488         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
3489         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3490
3491         switch (prev_layer) {
3492         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
3493                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
3494                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
3495                          MLX5_UDP_PORT_MPLS);
3496                 break;
3497         case MLX5_FLOW_LAYER_GRE:
3498                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
3499                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
3500                          RTE_ETHER_TYPE_MPLS);
3501                 break;
3502         default:
3503                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
3504                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
3505                          IPPROTO_MPLS);
3506                 break;
3507         }
3508         if (!in_mpls_v)
3509                 return;
3510         if (!in_mpls_m)
3511                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
3512         switch (prev_layer) {
3513         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
3514                 out_mpls_m =
3515                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
3516                                                  outer_first_mpls_over_udp);
3517                 out_mpls_v =
3518                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
3519                                                  outer_first_mpls_over_udp);
3520                 break;
3521         case MLX5_FLOW_LAYER_GRE:
3522                 out_mpls_m =
3523                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
3524                                                  outer_first_mpls_over_gre);
3525                 out_mpls_v =
3526                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
3527                                                  outer_first_mpls_over_gre);
3528                 break;
3529         default:
3530                 /* Inner MPLS not over GRE is not supported. */
3531                 if (!inner) {
3532                         out_mpls_m =
3533                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
3534                                                          misc2_m,
3535                                                          outer_first_mpls);
3536                         out_mpls_v =
3537                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
3538                                                          misc2_v,
3539                                                          outer_first_mpls);
3540                 }
3541                 break;
3542         }
3543         if (out_mpls_m && out_mpls_v) {
3544                 *out_mpls_m = *in_mpls_m;
3545                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
3546         }
3547 }
3548
3549 /**
3550  * Add META item to matcher
3551  *
3552  * @param[in, out] matcher
3553  *   Flow matcher.
3554  * @param[in, out] key
3555  *   Flow matcher value.
3556  * @param[in] item
3557  *   Flow pattern to translate.
3558  * @param[in] inner
3559  *   Item is inner pattern.
3560  */
3561 static void
3562 flow_dv_translate_item_meta(void *matcher, void *key,
3563                             const struct rte_flow_item *item)
3564 {
3565         const struct rte_flow_item_meta *meta_m;
3566         const struct rte_flow_item_meta *meta_v;
3567         void *misc2_m =
3568                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
3569         void *misc2_v =
3570                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
3571
3572         meta_m = (const void *)item->mask;
3573         if (!meta_m)
3574                 meta_m = &rte_flow_item_meta_mask;
3575         meta_v = (const void *)item->spec;
3576         if (meta_v) {
3577                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a,
3578                          rte_be_to_cpu_32(meta_m->data));
3579                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a,
3580                          rte_be_to_cpu_32(meta_v->data & meta_m->data));
3581         }
3582 }
3583
3584 /**
3585  * Add source vport match to the specified matcher.
3586  *
3587  * @param[in, out] matcher
3588  *   Flow matcher.
3589  * @param[in, out] key
3590  *   Flow matcher value.
3591  * @param[in] port
3592  *   Source vport value to match
3593  * @param[in] mask
3594  *   Mask
3595  */
3596 static void
3597 flow_dv_translate_item_source_vport(void *matcher, void *key,
3598                                     int16_t port, uint16_t mask)
3599 {
3600         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3601         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3602
3603         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
3604         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
3605 }
3606
3607 /**
3608  * Translate port-id item to eswitch match on  port-id.
3609  *
3610  * @param[in] dev
3611  *   The devich to configure through.
3612  * @param[in, out] matcher
3613  *   Flow matcher.
3614  * @param[in, out] key
3615  *   Flow matcher value.
3616  * @param[in] item
3617  *   Flow pattern to translate.
3618  *
3619  * @return
3620  *   0 on success, a negative errno value otherwise.
3621  */
3622 static int
3623 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
3624                                void *key, const struct rte_flow_item *item)
3625 {
3626         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
3627         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
3628         uint16_t mask, val, id;
3629         int ret;
3630
3631         mask = pid_m ? pid_m->id : 0xffff;
3632         id = pid_v ? pid_v->id : dev->data->port_id;
3633         ret = mlx5_port_to_eswitch_info(id, NULL, &val);
3634         if (ret)
3635                 return ret;
3636         flow_dv_translate_item_source_vport(matcher, key, val, mask);
3637         return 0;
3638 }
3639
3640 /**
3641  * Add ICMP6 item to matcher and to the value.
3642  *
3643  * @param[in, out] matcher
3644  *   Flow matcher.
3645  * @param[in, out] key
3646  *   Flow matcher value.
3647  * @param[in] item
3648  *   Flow pattern to translate.
3649  * @param[in] inner
3650  *   Item is inner pattern.
3651  */
3652 static void
3653 flow_dv_translate_item_icmp6(void *matcher, void *key,
3654                               const struct rte_flow_item *item,
3655                               int inner)
3656 {
3657         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
3658         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
3659         void *headers_m;
3660         void *headers_v;
3661         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
3662                                      misc_parameters_3);
3663         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
3664         if (inner) {
3665                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3666                                          inner_headers);
3667                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3668         } else {
3669                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3670                                          outer_headers);
3671                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3672         }
3673         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
3674         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
3675         if (!icmp6_v)
3676                 return;
3677         if (!icmp6_m)
3678                 icmp6_m = &rte_flow_item_icmp6_mask;
3679         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
3680         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
3681                  icmp6_v->type & icmp6_m->type);
3682         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
3683         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
3684                  icmp6_v->code & icmp6_m->code);
3685 }
3686
3687 /**
3688  * Add ICMP item to matcher and to the value.
3689  *
3690  * @param[in, out] matcher
3691  *   Flow matcher.
3692  * @param[in, out] key
3693  *   Flow matcher value.
3694  * @param[in] item
3695  *   Flow pattern to translate.
3696  * @param[in] inner
3697  *   Item is inner pattern.
3698  */
3699 static void
3700 flow_dv_translate_item_icmp(void *matcher, void *key,
3701                             const struct rte_flow_item *item,
3702                             int inner)
3703 {
3704         const struct rte_flow_item_icmp *icmp_m = item->mask;
3705         const struct rte_flow_item_icmp *icmp_v = item->spec;
3706         void *headers_m;
3707         void *headers_v;
3708         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
3709                                      misc_parameters_3);
3710         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
3711         if (inner) {
3712                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3713                                          inner_headers);
3714                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3715         } else {
3716                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3717                                          outer_headers);
3718                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3719         }
3720         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
3721         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
3722         if (!icmp_v)
3723                 return;
3724         if (!icmp_m)
3725                 icmp_m = &rte_flow_item_icmp_mask;
3726         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
3727                  icmp_m->hdr.icmp_type);
3728         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
3729                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
3730         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
3731                  icmp_m->hdr.icmp_code);
3732         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
3733                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
3734 }
3735
3736 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
3737
3738 #define HEADER_IS_ZERO(match_criteria, headers)                              \
3739         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
3740                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
3741
3742 /**
3743  * Calculate flow matcher enable bitmap.
3744  *
3745  * @param match_criteria
3746  *   Pointer to flow matcher criteria.
3747  *
3748  * @return
3749  *   Bitmap of enabled fields.
3750  */
3751 static uint8_t
3752 flow_dv_matcher_enable(uint32_t *match_criteria)
3753 {
3754         uint8_t match_criteria_enable;
3755
3756         match_criteria_enable =
3757                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
3758                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
3759         match_criteria_enable |=
3760                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
3761                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
3762         match_criteria_enable |=
3763                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
3764                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
3765         match_criteria_enable |=
3766                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
3767                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
3768 #ifdef HAVE_MLX5DV_DR
3769         match_criteria_enable |=
3770                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
3771                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
3772 #endif
3773         return match_criteria_enable;
3774 }
3775
3776
3777 /**
3778  * Get a flow table.
3779  *
3780  * @param dev[in, out]
3781  *   Pointer to rte_eth_dev structure.
3782  * @param[in] table_id
3783  *   Table id to use.
3784  * @param[in] egress
3785  *   Direction of the table.
3786  * @param[in] transfer
3787  *   E-Switch or NIC flow.
3788  * @param[out] error
3789  *   pointer to error structure.
3790  *
3791  * @return
3792  *   Returns tables resource based on the index, NULL in case of failed.
3793  */
3794 static struct mlx5_flow_tbl_resource *
3795 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
3796                          uint32_t table_id, uint8_t egress,
3797                          uint8_t transfer,
3798                          struct rte_flow_error *error)
3799 {
3800         struct mlx5_priv *priv = dev->data->dev_private;
3801         struct mlx5_ibv_shared *sh = priv->sh;
3802         struct mlx5_flow_tbl_resource *tbl;
3803
3804 #ifdef HAVE_MLX5DV_DR
3805         if (transfer) {
3806                 tbl = &sh->fdb_tbl[table_id];
3807                 if (!tbl->obj)
3808                         tbl->obj = mlx5_glue->dr_create_flow_tbl
3809                                 (sh->fdb_domain, table_id);
3810         } else if (egress) {
3811                 tbl = &sh->tx_tbl[table_id];
3812                 if (!tbl->obj)
3813                         tbl->obj = mlx5_glue->dr_create_flow_tbl
3814                                 (sh->tx_domain, table_id);
3815         } else {
3816                 tbl = &sh->rx_tbl[table_id];
3817                 if (!tbl->obj)
3818                         tbl->obj = mlx5_glue->dr_create_flow_tbl
3819                                 (sh->rx_domain, table_id);
3820         }
3821         if (!tbl->obj) {
3822                 rte_flow_error_set(error, ENOMEM,
3823                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3824                                    NULL, "cannot create table");
3825                 return NULL;
3826         }
3827         rte_atomic32_inc(&tbl->refcnt);
3828         return tbl;
3829 #else
3830         (void)error;
3831         (void)tbl;
3832         if (transfer)
3833                 return &sh->fdb_tbl[table_id];
3834         else if (egress)
3835                 return &sh->tx_tbl[table_id];
3836         else
3837                 return &sh->rx_tbl[table_id];
3838 #endif
3839 }
3840
3841 /**
3842  * Release a flow table.
3843  *
3844  * @param[in] tbl
3845  *   Table resource to be released.
3846  *
3847  * @return
3848  *   Returns 0 if table was released, else return 1;
3849  */
3850 static int
3851 flow_dv_tbl_resource_release(struct mlx5_flow_tbl_resource *tbl)
3852 {
3853         if (!tbl)
3854                 return 0;
3855         if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
3856                 mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
3857                 tbl->obj = NULL;
3858                 return 0;
3859         }
3860         return 1;
3861 }
3862
3863 /**
3864  * Register the flow matcher.
3865  *
3866  * @param dev[in, out]
3867  *   Pointer to rte_eth_dev structure.
3868  * @param[in, out] matcher
3869  *   Pointer to flow matcher.
3870  * @parm[in, out] dev_flow
3871  *   Pointer to the dev_flow.
3872  * @param[out] error
3873  *   pointer to error structure.
3874  *
3875  * @return
3876  *   0 on success otherwise -errno and errno is set.
3877  */
3878 static int
3879 flow_dv_matcher_register(struct rte_eth_dev *dev,
3880                          struct mlx5_flow_dv_matcher *matcher,
3881                          struct mlx5_flow *dev_flow,
3882                          struct rte_flow_error *error)
3883 {
3884         struct mlx5_priv *priv = dev->data->dev_private;
3885         struct mlx5_ibv_shared *sh = priv->sh;
3886         struct mlx5_flow_dv_matcher *cache_matcher;
3887         struct mlx5dv_flow_matcher_attr dv_attr = {
3888                 .type = IBV_FLOW_ATTR_NORMAL,
3889                 .match_mask = (void *)&matcher->mask,
3890         };
3891         struct mlx5_flow_tbl_resource *tbl = NULL;
3892
3893         /* Lookup from cache. */
3894         LIST_FOREACH(cache_matcher, &sh->matchers, next) {
3895                 if (matcher->crc == cache_matcher->crc &&
3896                     matcher->priority == cache_matcher->priority &&
3897                     matcher->egress == cache_matcher->egress &&
3898                     matcher->group == cache_matcher->group &&
3899                     matcher->transfer == cache_matcher->transfer &&
3900                     !memcmp((const void *)matcher->mask.buf,
3901                             (const void *)cache_matcher->mask.buf,
3902                             cache_matcher->mask.size)) {
3903                         DRV_LOG(DEBUG,
3904                                 "priority %hd use %s matcher %p: refcnt %d++",
3905                                 cache_matcher->priority,
3906                                 cache_matcher->egress ? "tx" : "rx",
3907                                 (void *)cache_matcher,
3908                                 rte_atomic32_read(&cache_matcher->refcnt));
3909                         rte_atomic32_inc(&cache_matcher->refcnt);
3910                         dev_flow->dv.matcher = cache_matcher;
3911                         return 0;
3912                 }
3913         }
3914         /* Register new matcher. */
3915         cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
3916         if (!cache_matcher)
3917                 return rte_flow_error_set(error, ENOMEM,
3918                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3919                                           "cannot allocate matcher memory");
3920         tbl = flow_dv_tbl_resource_get(dev, matcher->group * MLX5_GROUP_FACTOR,
3921                                        matcher->egress, matcher->transfer,
3922                                        error);
3923         if (!tbl) {
3924                 rte_free(cache_matcher);
3925                 return rte_flow_error_set(error, ENOMEM,
3926                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3927                                           NULL, "cannot create table");
3928         }
3929         *cache_matcher = *matcher;
3930         dv_attr.match_criteria_enable =
3931                 flow_dv_matcher_enable(cache_matcher->mask.buf);
3932         dv_attr.priority = matcher->priority;
3933         if (matcher->egress)
3934                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
3935         cache_matcher->matcher_object =
3936                 mlx5_glue->dv_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj);
3937         if (!cache_matcher->matcher_object) {
3938                 rte_free(cache_matcher);
3939 #ifdef HAVE_MLX5DV_DR
3940                 flow_dv_tbl_resource_release(tbl);
3941 #endif
3942                 return rte_flow_error_set(error, ENOMEM,
3943                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3944                                           NULL, "cannot create matcher");
3945         }
3946         rte_atomic32_inc(&cache_matcher->refcnt);
3947         LIST_INSERT_HEAD(&sh->matchers, cache_matcher, next);
3948         dev_flow->dv.matcher = cache_matcher;
3949         DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
3950                 cache_matcher->priority,
3951                 cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
3952                 rte_atomic32_read(&cache_matcher->refcnt));
3953         rte_atomic32_inc(&tbl->refcnt);
3954         return 0;
3955 }
3956
3957 /**
3958  * Find existing tag resource or create and register a new one.
3959  *
3960  * @param dev[in, out]
3961  *   Pointer to rte_eth_dev structure.
3962  * @param[in, out] resource
3963  *   Pointer to tag resource.
3964  * @parm[in, out] dev_flow
3965  *   Pointer to the dev_flow.
3966  * @param[out] error
3967  *   pointer to error structure.
3968  *
3969  * @return
3970  *   0 on success otherwise -errno and errno is set.
3971  */
3972 static int
3973 flow_dv_tag_resource_register
3974                         (struct rte_eth_dev *dev,
3975                          struct mlx5_flow_dv_tag_resource *resource,
3976                          struct mlx5_flow *dev_flow,
3977                          struct rte_flow_error *error)
3978 {
3979         struct mlx5_priv *priv = dev->data->dev_private;
3980         struct mlx5_ibv_shared *sh = priv->sh;
3981         struct mlx5_flow_dv_tag_resource *cache_resource;
3982
3983         /* Lookup a matching resource from cache. */
3984         LIST_FOREACH(cache_resource, &sh->tags, next) {
3985                 if (resource->tag == cache_resource->tag) {
3986                         DRV_LOG(DEBUG, "tag resource %p: refcnt %d++",
3987                                 (void *)cache_resource,
3988                                 rte_atomic32_read(&cache_resource->refcnt));
3989                         rte_atomic32_inc(&cache_resource->refcnt);
3990                         dev_flow->flow->tag_resource = cache_resource;
3991                         return 0;
3992                 }
3993         }
3994         /* Register new  resource. */
3995         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
3996         if (!cache_resource)
3997                 return rte_flow_error_set(error, ENOMEM,
3998                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3999                                           "cannot allocate resource memory");
4000         *cache_resource = *resource;
4001         cache_resource->action = mlx5_glue->dv_create_flow_action_tag
4002                 (resource->tag);
4003         if (!cache_resource->action) {
4004                 rte_free(cache_resource);
4005                 return rte_flow_error_set(error, ENOMEM,
4006                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4007                                           NULL, "cannot create action");
4008         }
4009         rte_atomic32_init(&cache_resource->refcnt);
4010         rte_atomic32_inc(&cache_resource->refcnt);
4011         LIST_INSERT_HEAD(&sh->tags, cache_resource, next);
4012         dev_flow->flow->tag_resource = cache_resource;
4013         DRV_LOG(DEBUG, "new tag resource %p: refcnt %d++",
4014                 (void *)cache_resource,
4015                 rte_atomic32_read(&cache_resource->refcnt));
4016         return 0;
4017 }
4018
4019 /**
4020  * Release the tag.
4021  *
4022  * @param dev
4023  *   Pointer to Ethernet device.
4024  * @param flow
4025  *   Pointer to mlx5_flow.
4026  *
4027  * @return
4028  *   1 while a reference on it exists, 0 when freed.
4029  */
4030 static int
4031 flow_dv_tag_release(struct rte_eth_dev *dev,
4032                     struct mlx5_flow_dv_tag_resource *tag)
4033 {
4034         assert(tag);
4035         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
4036                 dev->data->port_id, (void *)tag,
4037                 rte_atomic32_read(&tag->refcnt));
4038         if (rte_atomic32_dec_and_test(&tag->refcnt)) {
4039                 claim_zero(mlx5_glue->destroy_flow_action(tag->action));
4040                 LIST_REMOVE(tag, next);
4041                 DRV_LOG(DEBUG, "port %u tag %p: removed",
4042                         dev->data->port_id, (void *)tag);
4043                 rte_free(tag);
4044                 return 0;
4045         }
4046         return 1;
4047 }
4048
4049 /**
4050  * Translate port ID action to vport.
4051  *
4052  * @param[in] dev
4053  *   Pointer to rte_eth_dev structure.
4054  * @param[in] action
4055  *   Pointer to the port ID action.
4056  * @param[out] dst_port_id
4057  *   The target port ID.
4058  * @param[out] error
4059  *   Pointer to the error structure.
4060  *
4061  * @return
4062  *   0 on success, a negative errno value otherwise and rte_errno is set.
4063  */
4064 static int
4065 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
4066                                  const struct rte_flow_action *action,
4067                                  uint32_t *dst_port_id,
4068                                  struct rte_flow_error *error)
4069 {
4070         uint32_t port;
4071         uint16_t port_id;
4072         int ret;
4073         const struct rte_flow_action_port_id *conf =
4074                         (const struct rte_flow_action_port_id *)action->conf;
4075
4076         port = conf->original ? dev->data->port_id : conf->id;
4077         ret = mlx5_port_to_eswitch_info(port, NULL, &port_id);
4078         if (ret)
4079                 return rte_flow_error_set(error, -ret,
4080                                           RTE_FLOW_ERROR_TYPE_ACTION,
4081                                           NULL,
4082                                           "No eswitch info was found for port");
4083         *dst_port_id = port_id;
4084         return 0;
4085 }
4086
4087 /**
4088  * Fill the flow with DV spec.
4089  *
4090  * @param[in] dev
4091  *   Pointer to rte_eth_dev structure.
4092  * @param[in, out] dev_flow
4093  *   Pointer to the sub flow.
4094  * @param[in] attr
4095  *   Pointer to the flow attributes.
4096  * @param[in] items
4097  *   Pointer to the list of items.
4098  * @param[in] actions
4099  *   Pointer to the list of actions.
4100  * @param[out] error
4101  *   Pointer to the error structure.
4102  *
4103  * @return
4104  *   0 on success, a negative errno value otherwise and rte_errno is set.
4105  */
4106 static int
4107 flow_dv_translate(struct rte_eth_dev *dev,
4108                   struct mlx5_flow *dev_flow,
4109                   const struct rte_flow_attr *attr,
4110                   const struct rte_flow_item items[],
4111                   const struct rte_flow_action actions[],
4112                   struct rte_flow_error *error)
4113 {
4114         struct mlx5_priv *priv = dev->data->dev_private;
4115         struct rte_flow *flow = dev_flow->flow;
4116         uint64_t item_flags = 0;
4117         uint64_t last_item = 0;
4118         uint64_t action_flags = 0;
4119         uint64_t priority = attr->priority;
4120         struct mlx5_flow_dv_matcher matcher = {
4121                 .mask = {
4122                         .size = sizeof(matcher.mask.buf),
4123                 },
4124         };
4125         int actions_n = 0;
4126         bool actions_end = false;
4127         struct mlx5_flow_dv_modify_hdr_resource res = {
4128                 .ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4129                                           MLX5DV_FLOW_TABLE_TYPE_NIC_RX
4130         };
4131         union flow_dv_attr flow_attr = { .attr = 0 };
4132         struct mlx5_flow_dv_tag_resource tag_resource;
4133         uint32_t modify_action_position = UINT32_MAX;
4134         void *match_mask = matcher.mask.buf;
4135         void *match_value = dev_flow->dv.value.buf;
4136
4137         flow->group = attr->group;
4138         if (attr->transfer)
4139                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4140         if (priority == MLX5_FLOW_PRIO_RSVD)
4141                 priority = priv->config.flow_prio - 1;
4142         for (; !actions_end ; actions++) {
4143                 const struct rte_flow_action_queue *queue;
4144                 const struct rte_flow_action_rss *rss;
4145                 const struct rte_flow_action *action = actions;
4146                 const struct rte_flow_action_count *count = action->conf;
4147                 const uint8_t *rss_key;
4148                 const struct rte_flow_action_jump *jump_data;
4149                 struct mlx5_flow_dv_jump_tbl_resource jump_tbl_resource;
4150                 struct mlx5_flow_tbl_resource *tbl;
4151                 uint32_t port_id = 0;
4152                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
4153
4154                 switch (actions->type) {
4155                 case RTE_FLOW_ACTION_TYPE_VOID:
4156                         break;
4157                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
4158                         if (flow_dv_translate_action_port_id(dev, action,
4159                                                              &port_id, error))
4160                                 return -rte_errno;
4161                         port_id_resource.port_id = port_id;
4162                         if (flow_dv_port_id_action_resource_register
4163                             (dev, &port_id_resource, dev_flow, error))
4164                                 return -rte_errno;
4165                         dev_flow->dv.actions[actions_n++] =
4166                                 dev_flow->dv.port_id_action->action;
4167                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
4168                         break;
4169                 case RTE_FLOW_ACTION_TYPE_FLAG:
4170                         tag_resource.tag =
4171                                 mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
4172                         if (!flow->tag_resource)
4173                                 if (flow_dv_tag_resource_register
4174                                     (dev, &tag_resource, dev_flow, error))
4175                                         return errno;
4176                         dev_flow->dv.actions[actions_n++] =
4177                                 flow->tag_resource->action;
4178                         action_flags |= MLX5_FLOW_ACTION_FLAG;
4179                         break;
4180                 case RTE_FLOW_ACTION_TYPE_MARK:
4181                         tag_resource.tag = mlx5_flow_mark_set
4182                               (((const struct rte_flow_action_mark *)
4183                                (actions->conf))->id);
4184                         if (!flow->tag_resource)
4185                                 if (flow_dv_tag_resource_register
4186                                     (dev, &tag_resource, dev_flow, error))
4187                                         return errno;
4188                         dev_flow->dv.actions[actions_n++] =
4189                                 flow->tag_resource->action;
4190                         action_flags |= MLX5_FLOW_ACTION_MARK;
4191                         break;
4192                 case RTE_FLOW_ACTION_TYPE_DROP:
4193                         action_flags |= MLX5_FLOW_ACTION_DROP;
4194                         break;
4195                 case RTE_FLOW_ACTION_TYPE_QUEUE:
4196                         queue = actions->conf;
4197                         flow->rss.queue_num = 1;
4198                         (*flow->queue)[0] = queue->index;
4199                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
4200                         break;
4201                 case RTE_FLOW_ACTION_TYPE_RSS:
4202                         rss = actions->conf;
4203                         if (flow->queue)
4204                                 memcpy((*flow->queue), rss->queue,
4205                                        rss->queue_num * sizeof(uint16_t));
4206                         flow->rss.queue_num = rss->queue_num;
4207                         /* NULL RSS key indicates default RSS key. */
4208                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
4209                         memcpy(flow->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
4210                         /* RSS type 0 indicates default RSS type ETH_RSS_IP. */
4211                         flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
4212                         flow->rss.level = rss->level;
4213                         action_flags |= MLX5_FLOW_ACTION_RSS;
4214                         break;
4215                 case RTE_FLOW_ACTION_TYPE_COUNT:
4216                         if (!priv->config.devx) {
4217                                 rte_errno = ENOTSUP;
4218                                 goto cnt_err;
4219                         }
4220                         flow->counter = flow_dv_counter_new(dev, count->shared,
4221                                                             count->id);
4222                         if (flow->counter == NULL)
4223                                 goto cnt_err;
4224                         dev_flow->dv.actions[actions_n++] =
4225                                 flow->counter->action;
4226                         action_flags |= MLX5_FLOW_ACTION_COUNT;
4227                         break;
4228 cnt_err:
4229                         if (rte_errno == ENOTSUP)
4230                                 return rte_flow_error_set
4231                                               (error, ENOTSUP,
4232                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4233                                                NULL,
4234                                                "count action not supported");
4235                         else
4236                                 return rte_flow_error_set
4237                                                 (error, rte_errno,
4238                                                  RTE_FLOW_ERROR_TYPE_ACTION,
4239                                                  action,
4240                                                  "cannot create counter"
4241                                                   " object.");
4242                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
4243                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
4244                         if (flow_dv_create_action_l2_encap(dev, actions,
4245                                                            dev_flow,
4246                                                            attr->transfer,
4247                                                            error))
4248                                 return -rte_errno;
4249                         dev_flow->dv.actions[actions_n++] =
4250                                 dev_flow->dv.encap_decap->verbs_action;
4251                         action_flags |= actions->type ==
4252                                         RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
4253                                         MLX5_FLOW_ACTION_VXLAN_ENCAP :
4254                                         MLX5_FLOW_ACTION_NVGRE_ENCAP;
4255                         break;
4256                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
4257                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
4258                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
4259                                                            attr->transfer,
4260                                                            error))
4261                                 return -rte_errno;
4262                         dev_flow->dv.actions[actions_n++] =
4263                                 dev_flow->dv.encap_decap->verbs_action;
4264                         action_flags |= actions->type ==
4265                                         RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
4266                                         MLX5_FLOW_ACTION_VXLAN_DECAP :
4267                                         MLX5_FLOW_ACTION_NVGRE_DECAP;
4268                         break;
4269                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
4270                         /* Handle encap with preceding decap. */
4271                         if (action_flags & MLX5_FLOW_ACTION_RAW_DECAP) {
4272                                 if (flow_dv_create_action_raw_encap
4273                                         (dev, actions, dev_flow, attr, error))
4274                                         return -rte_errno;
4275                                 dev_flow->dv.actions[actions_n++] =
4276                                         dev_flow->dv.encap_decap->verbs_action;
4277                         } else {
4278                                 /* Handle encap without preceding decap. */
4279                                 if (flow_dv_create_action_l2_encap
4280                                     (dev, actions, dev_flow, attr->transfer,
4281                                      error))
4282                                         return -rte_errno;
4283                                 dev_flow->dv.actions[actions_n++] =
4284                                         dev_flow->dv.encap_decap->verbs_action;
4285                         }
4286                         action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
4287                         break;
4288                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
4289                         /* Check if this decap is followed by encap. */
4290                         for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
4291                                action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
4292                                action++) {
4293                         }
4294                         /* Handle decap only if it isn't followed by encap. */
4295                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
4296                                 if (flow_dv_create_action_l2_decap
4297                                     (dev, dev_flow, attr->transfer, error))
4298                                         return -rte_errno;
4299                                 dev_flow->dv.actions[actions_n++] =
4300                                         dev_flow->dv.encap_decap->verbs_action;
4301                         }
4302                         /* If decap is followed by encap, handle it at encap. */
4303                         action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
4304                         break;
4305                 case RTE_FLOW_ACTION_TYPE_JUMP:
4306                         jump_data = action->conf;
4307                         tbl = flow_dv_tbl_resource_get(dev, jump_data->group *
4308                                                        MLX5_GROUP_FACTOR,
4309                                                        attr->egress,
4310                                                        attr->transfer, error);
4311                         if (!tbl)
4312                                 return rte_flow_error_set
4313                                                 (error, errno,
4314                                                  RTE_FLOW_ERROR_TYPE_ACTION,
4315                                                  NULL,
4316                                                  "cannot create jump action.");
4317                         jump_tbl_resource.tbl = tbl;
4318                         if (flow_dv_jump_tbl_resource_register
4319                             (dev, &jump_tbl_resource, dev_flow, error)) {
4320                                 flow_dv_tbl_resource_release(tbl);
4321                                 return rte_flow_error_set
4322                                                 (error, errno,
4323                                                  RTE_FLOW_ERROR_TYPE_ACTION,
4324                                                  NULL,
4325                                                  "cannot create jump action.");
4326                         }
4327                         dev_flow->dv.actions[actions_n++] =
4328                                 dev_flow->dv.jump->action;
4329                         action_flags |= MLX5_FLOW_ACTION_JUMP;
4330                         break;
4331                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
4332                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
4333                         if (flow_dv_convert_action_modify_mac(&res, actions,
4334                                                               error))
4335                                 return -rte_errno;
4336                         action_flags |= actions->type ==
4337                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
4338                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
4339                                         MLX5_FLOW_ACTION_SET_MAC_DST;
4340                         break;
4341                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
4342                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
4343                         if (flow_dv_convert_action_modify_ipv4(&res, actions,
4344                                                                error))
4345                                 return -rte_errno;
4346                         action_flags |= actions->type ==
4347                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
4348                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
4349                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
4350                         break;
4351                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
4352                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
4353                         if (flow_dv_convert_action_modify_ipv6(&res, actions,
4354                                                                error))
4355                                 return -rte_errno;
4356                         action_flags |= actions->type ==
4357                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
4358                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
4359                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
4360                         break;
4361                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
4362                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
4363                         if (flow_dv_convert_action_modify_tp(&res, actions,
4364                                                              items, &flow_attr,
4365                                                              error))
4366                                 return -rte_errno;
4367                         action_flags |= actions->type ==
4368                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
4369                                         MLX5_FLOW_ACTION_SET_TP_SRC :
4370                                         MLX5_FLOW_ACTION_SET_TP_DST;
4371                         break;
4372                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
4373                         if (flow_dv_convert_action_modify_dec_ttl(&res, items,
4374                                                                   &flow_attr,
4375                                                                   error))
4376                                 return -rte_errno;
4377                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
4378                         break;
4379                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
4380                         if (flow_dv_convert_action_modify_ttl(&res, actions,
4381                                                              items, &flow_attr,
4382                                                              error))
4383                                 return -rte_errno;
4384                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
4385                         break;
4386                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
4387                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
4388                         if (flow_dv_convert_action_modify_tcp_seq(&res, actions,
4389                                                                   error))
4390                                 return -rte_errno;
4391                         action_flags |= actions->type ==
4392                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
4393                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
4394                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
4395                         break;
4396
4397                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
4398                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
4399                         if (flow_dv_convert_action_modify_tcp_ack(&res, actions,
4400                                                                   error))
4401                                 return -rte_errno;
4402                         action_flags |= actions->type ==
4403                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
4404                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
4405                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
4406                         break;
4407                 case RTE_FLOW_ACTION_TYPE_END:
4408                         actions_end = true;
4409                         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) {
4410                                 /* create modify action if needed. */
4411                                 if (flow_dv_modify_hdr_resource_register
4412                                                                 (dev, &res,
4413                                                                  dev_flow,
4414                                                                  error))
4415                                         return -rte_errno;
4416                                 dev_flow->dv.actions[modify_action_position] =
4417                                         dev_flow->dv.modify_hdr->verbs_action;
4418                         }
4419                         break;
4420                 default:
4421                         break;
4422                 }
4423                 if ((action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) &&
4424                     modify_action_position == UINT32_MAX)
4425                         modify_action_position = actions_n++;
4426         }
4427         dev_flow->dv.actions_n = actions_n;
4428         flow->actions = action_flags;
4429         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
4430                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
4431
4432                 switch (items->type) {
4433                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
4434                         flow_dv_translate_item_port_id(dev, match_mask,
4435                                                        match_value, items);
4436                         last_item = MLX5_FLOW_ITEM_PORT_ID;
4437                         break;
4438                 case RTE_FLOW_ITEM_TYPE_ETH:
4439                         flow_dv_translate_item_eth(match_mask, match_value,
4440                                                    items, tunnel);
4441                         matcher.priority = MLX5_PRIORITY_MAP_L2;
4442                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
4443                                              MLX5_FLOW_LAYER_OUTER_L2;
4444                         break;
4445                 case RTE_FLOW_ITEM_TYPE_VLAN:
4446                         flow_dv_translate_item_vlan(match_mask, match_value,
4447                                                     items, tunnel);
4448                         matcher.priority = MLX5_PRIORITY_MAP_L2;
4449                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
4450                                               MLX5_FLOW_LAYER_INNER_VLAN) :
4451                                              (MLX5_FLOW_LAYER_OUTER_L2 |
4452                                               MLX5_FLOW_LAYER_OUTER_VLAN);
4453                         break;
4454                 case RTE_FLOW_ITEM_TYPE_IPV4:
4455                         flow_dv_translate_item_ipv4(match_mask, match_value,
4456                                                     items, tunnel, attr->group);
4457                         matcher.priority = MLX5_PRIORITY_MAP_L3;
4458                         dev_flow->dv.hash_fields |=
4459                                 mlx5_flow_hashfields_adjust
4460                                         (dev_flow, tunnel,
4461                                          MLX5_IPV4_LAYER_TYPES,
4462                                          MLX5_IPV4_IBV_RX_HASH);
4463                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
4464                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
4465                         mlx5_flow_tunnel_ip_check(items, &last_item);
4466                         break;
4467                 case RTE_FLOW_ITEM_TYPE_IPV6:
4468                         flow_dv_translate_item_ipv6(match_mask, match_value,
4469                                                     items, tunnel, attr->group);
4470                         matcher.priority = MLX5_PRIORITY_MAP_L3;
4471                         dev_flow->dv.hash_fields |=
4472                                 mlx5_flow_hashfields_adjust
4473                                         (dev_flow, tunnel,
4474                                          MLX5_IPV6_LAYER_TYPES,
4475                                          MLX5_IPV6_IBV_RX_HASH);
4476                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
4477                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
4478                         mlx5_flow_tunnel_ip_check(items, &last_item);
4479                         break;
4480                 case RTE_FLOW_ITEM_TYPE_TCP:
4481                         flow_dv_translate_item_tcp(match_mask, match_value,
4482                                                    items, tunnel);
4483                         matcher.priority = MLX5_PRIORITY_MAP_L4;
4484                         dev_flow->dv.hash_fields |=
4485                                 mlx5_flow_hashfields_adjust
4486                                         (dev_flow, tunnel, ETH_RSS_TCP,
4487                                          IBV_RX_HASH_SRC_PORT_TCP |
4488                                          IBV_RX_HASH_DST_PORT_TCP);
4489                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
4490                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
4491                         break;
4492                 case RTE_FLOW_ITEM_TYPE_UDP:
4493                         flow_dv_translate_item_udp(match_mask, match_value,
4494                                                    items, tunnel);
4495                         matcher.priority = MLX5_PRIORITY_MAP_L4;
4496                         dev_flow->dv.hash_fields |=
4497                                 mlx5_flow_hashfields_adjust
4498                                         (dev_flow, tunnel, ETH_RSS_UDP,
4499                                          IBV_RX_HASH_SRC_PORT_UDP |
4500                                          IBV_RX_HASH_DST_PORT_UDP);
4501                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
4502                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
4503                         break;
4504                 case RTE_FLOW_ITEM_TYPE_GRE:
4505                         flow_dv_translate_item_gre(match_mask, match_value,
4506                                                    items, tunnel);
4507                         last_item = MLX5_FLOW_LAYER_GRE;
4508                         break;
4509                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
4510                         flow_dv_translate_item_gre_key(match_mask,
4511                                                        match_value, items);
4512                         item_flags |= MLX5_FLOW_LAYER_GRE_KEY;
4513                         break;
4514                 case RTE_FLOW_ITEM_TYPE_NVGRE:
4515                         flow_dv_translate_item_nvgre(match_mask, match_value,
4516                                                      items, tunnel);
4517                         last_item = MLX5_FLOW_LAYER_GRE;
4518                         break;
4519                 case RTE_FLOW_ITEM_TYPE_VXLAN:
4520                         flow_dv_translate_item_vxlan(match_mask, match_value,
4521                                                      items, tunnel);
4522                         last_item = MLX5_FLOW_LAYER_VXLAN;
4523                         break;
4524                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4525                         flow_dv_translate_item_vxlan(match_mask, match_value,
4526                                                      items, tunnel);
4527                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
4528                         break;
4529                 case RTE_FLOW_ITEM_TYPE_MPLS:
4530                         flow_dv_translate_item_mpls(match_mask, match_value,
4531                                                     items, last_item, tunnel);
4532                         last_item = MLX5_FLOW_LAYER_MPLS;
4533                         break;
4534                 case RTE_FLOW_ITEM_TYPE_META:
4535                         flow_dv_translate_item_meta(match_mask, match_value,
4536                                                     items);
4537                         last_item = MLX5_FLOW_ITEM_METADATA;
4538                         break;
4539                 case RTE_FLOW_ITEM_TYPE_ICMP:
4540                         flow_dv_translate_item_icmp(match_mask, match_value,
4541                                                     items, tunnel);
4542                         item_flags |= MLX5_FLOW_LAYER_ICMP;
4543                         break;
4544                 case RTE_FLOW_ITEM_TYPE_ICMP6:
4545                         flow_dv_translate_item_icmp6(match_mask, match_value,
4546                                                       items, tunnel);
4547                         item_flags |= MLX5_FLOW_LAYER_ICMP6;
4548                         break;
4549                 default:
4550                         break;
4551                 }
4552                 item_flags |= last_item;
4553         }
4554         /*
4555          * In case of ingress traffic when E-Switch mode is enabled,
4556          * we have two cases where we need to set the source port manually.
4557          * The first one, is in case of Nic steering rule, and the second is
4558          * E-Switch rule where no port_id item was found. In both cases
4559          * the source port is set according the current port in use.
4560          */
4561         if ((attr->ingress && !(item_flags & MLX5_FLOW_ITEM_PORT_ID)) &&
4562             (priv->representor || priv->master)) {
4563                 if (flow_dv_translate_item_port_id(dev, match_mask,
4564                                                    match_value, NULL))
4565                         return -rte_errno;
4566         }
4567         assert(!flow_dv_check_valid_spec(matcher.mask.buf,
4568                                          dev_flow->dv.value.buf));
4569         dev_flow->layers = item_flags;
4570         /* Register matcher. */
4571         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
4572                                     matcher.mask.size);
4573         matcher.priority = mlx5_flow_adjust_priority(dev, priority,
4574                                                      matcher.priority);
4575         matcher.egress = attr->egress;
4576         matcher.group = attr->group;
4577         matcher.transfer = attr->transfer;
4578         if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
4579                 return -rte_errno;
4580         return 0;
4581 }
4582
4583 /**
4584  * Apply the flow to the NIC.
4585  *
4586  * @param[in] dev
4587  *   Pointer to the Ethernet device structure.
4588  * @param[in, out] flow
4589  *   Pointer to flow structure.
4590  * @param[out] error
4591  *   Pointer to error structure.
4592  *
4593  * @return
4594  *   0 on success, a negative errno value otherwise and rte_errno is set.
4595  */
4596 static int
4597 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
4598               struct rte_flow_error *error)
4599 {
4600         struct mlx5_flow_dv *dv;
4601         struct mlx5_flow *dev_flow;
4602         struct mlx5_priv *priv = dev->data->dev_private;
4603         int n;
4604         int err;
4605
4606         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
4607                 dv = &dev_flow->dv;
4608                 n = dv->actions_n;
4609                 if (flow->actions & MLX5_FLOW_ACTION_DROP) {
4610                         if (flow->transfer) {
4611                                 dv->actions[n++] = priv->sh->esw_drop_action;
4612                         } else {
4613                                 dv->hrxq = mlx5_hrxq_drop_new(dev);
4614                                 if (!dv->hrxq) {
4615                                         rte_flow_error_set
4616                                                 (error, errno,
4617                                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4618                                                  NULL,
4619                                                  "cannot get drop hash queue");
4620                                         goto error;
4621                                 }
4622                                 dv->actions[n++] = dv->hrxq->action;
4623                         }
4624                 } else if (flow->actions &
4625                            (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
4626                         struct mlx5_hrxq *hrxq;
4627
4628                         hrxq = mlx5_hrxq_get(dev, flow->key,
4629                                              MLX5_RSS_HASH_KEY_LEN,
4630                                              dv->hash_fields,
4631                                              (*flow->queue),
4632                                              flow->rss.queue_num);
4633                         if (!hrxq)
4634                                 hrxq = mlx5_hrxq_new
4635                                         (dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
4636                                          dv->hash_fields, (*flow->queue),
4637                                          flow->rss.queue_num,
4638                                          !!(dev_flow->layers &
4639                                             MLX5_FLOW_LAYER_TUNNEL));
4640                         if (!hrxq) {
4641                                 rte_flow_error_set
4642                                         (error, rte_errno,
4643                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4644                                          "cannot get hash queue");
4645                                 goto error;
4646                         }
4647                         dv->hrxq = hrxq;
4648                         dv->actions[n++] = dv->hrxq->action;
4649                 }
4650                 dv->flow =
4651                         mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
4652                                                   (void *)&dv->value, n,
4653                                                   dv->actions);
4654                 if (!dv->flow) {
4655                         rte_flow_error_set(error, errno,
4656                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4657                                            NULL,
4658                                            "hardware refuses to create flow");
4659                         goto error;
4660                 }
4661         }
4662         return 0;
4663 error:
4664         err = rte_errno; /* Save rte_errno before cleanup. */
4665         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
4666                 struct mlx5_flow_dv *dv = &dev_flow->dv;
4667                 if (dv->hrxq) {
4668                         if (flow->actions & MLX5_FLOW_ACTION_DROP)
4669                                 mlx5_hrxq_drop_release(dev);
4670                         else
4671                                 mlx5_hrxq_release(dev, dv->hrxq);
4672                         dv->hrxq = NULL;
4673                 }
4674         }
4675         rte_errno = err; /* Restore rte_errno. */
4676         return -rte_errno;
4677 }
4678
4679 /**
4680  * Release the flow matcher.
4681  *
4682  * @param dev
4683  *   Pointer to Ethernet device.
4684  * @param flow
4685  *   Pointer to mlx5_flow.
4686  *
4687  * @return
4688  *   1 while a reference on it exists, 0 when freed.
4689  */
4690 static int
4691 flow_dv_matcher_release(struct rte_eth_dev *dev,
4692                         struct mlx5_flow *flow)
4693 {
4694         struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
4695         struct mlx5_priv *priv = dev->data->dev_private;
4696         struct mlx5_ibv_shared *sh = priv->sh;
4697         struct mlx5_flow_tbl_resource *tbl;
4698
4699         assert(matcher->matcher_object);
4700         DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
4701                 dev->data->port_id, (void *)matcher,
4702                 rte_atomic32_read(&matcher->refcnt));
4703         if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
4704                 claim_zero(mlx5_glue->dv_destroy_flow_matcher
4705                            (matcher->matcher_object));
4706                 LIST_REMOVE(matcher, next);
4707                 if (matcher->egress)
4708                         tbl = &sh->tx_tbl[matcher->group];
4709                 else
4710                         tbl = &sh->rx_tbl[matcher->group];
4711                 flow_dv_tbl_resource_release(tbl);
4712                 rte_free(matcher);
4713                 DRV_LOG(DEBUG, "port %u matcher %p: removed",
4714                         dev->data->port_id, (void *)matcher);
4715                 return 0;
4716         }
4717         return 1;
4718 }
4719
4720 /**
4721  * Release an encap/decap resource.
4722  *
4723  * @param flow
4724  *   Pointer to mlx5_flow.
4725  *
4726  * @return
4727  *   1 while a reference on it exists, 0 when freed.
4728  */
4729 static int
4730 flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
4731 {
4732         struct mlx5_flow_dv_encap_decap_resource *cache_resource =
4733                                                 flow->dv.encap_decap;
4734
4735         assert(cache_resource->verbs_action);
4736         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
4737                 (void *)cache_resource,
4738                 rte_atomic32_read(&cache_resource->refcnt));
4739         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
4740                 claim_zero(mlx5_glue->destroy_flow_action
4741                                 (cache_resource->verbs_action));
4742                 LIST_REMOVE(cache_resource, next);
4743                 rte_free(cache_resource);
4744                 DRV_LOG(DEBUG, "encap/decap resource %p: removed",
4745                         (void *)cache_resource);
4746                 return 0;
4747         }
4748         return 1;
4749 }
4750
4751 /**
4752  * Release an jump to table action resource.
4753  *
4754  * @param flow
4755  *   Pointer to mlx5_flow.
4756  *
4757  * @return
4758  *   1 while a reference on it exists, 0 when freed.
4759  */
4760 static int
4761 flow_dv_jump_tbl_resource_release(struct mlx5_flow *flow)
4762 {
4763         struct mlx5_flow_dv_jump_tbl_resource *cache_resource =
4764                                                 flow->dv.jump;
4765
4766         assert(cache_resource->action);
4767         DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--",
4768                 (void *)cache_resource,
4769                 rte_atomic32_read(&cache_resource->refcnt));
4770         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
4771                 claim_zero(mlx5_glue->destroy_flow_action
4772                                 (cache_resource->action));
4773                 LIST_REMOVE(cache_resource, next);
4774                 flow_dv_tbl_resource_release(cache_resource->tbl);
4775                 rte_free(cache_resource);
4776                 DRV_LOG(DEBUG, "jump table resource %p: removed",
4777                         (void *)cache_resource);
4778                 return 0;
4779         }
4780         return 1;
4781 }
4782
4783 /**
4784  * Release a modify-header resource.
4785  *
4786  * @param flow
4787  *   Pointer to mlx5_flow.
4788  *
4789  * @return
4790  *   1 while a reference on it exists, 0 when freed.
4791  */
4792 static int
4793 flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
4794 {
4795         struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
4796                                                 flow->dv.modify_hdr;
4797
4798         assert(cache_resource->verbs_action);
4799         DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
4800                 (void *)cache_resource,
4801                 rte_atomic32_read(&cache_resource->refcnt));
4802         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
4803                 claim_zero(mlx5_glue->destroy_flow_action
4804                                 (cache_resource->verbs_action));
4805                 LIST_REMOVE(cache_resource, next);
4806                 rte_free(cache_resource);
4807                 DRV_LOG(DEBUG, "modify-header resource %p: removed",
4808                         (void *)cache_resource);
4809                 return 0;
4810         }
4811         return 1;
4812 }
4813
4814 /**
4815  * Release port ID action resource.
4816  *
4817  * @param flow
4818  *   Pointer to mlx5_flow.
4819  *
4820  * @return
4821  *   1 while a reference on it exists, 0 when freed.
4822  */
4823 static int
4824 flow_dv_port_id_action_resource_release(struct mlx5_flow *flow)
4825 {
4826         struct mlx5_flow_dv_port_id_action_resource *cache_resource =
4827                 flow->dv.port_id_action;
4828
4829         assert(cache_resource->action);
4830         DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--",
4831                 (void *)cache_resource,
4832                 rte_atomic32_read(&cache_resource->refcnt));
4833         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
4834                 claim_zero(mlx5_glue->destroy_flow_action
4835                                 (cache_resource->action));
4836                 LIST_REMOVE(cache_resource, next);
4837                 rte_free(cache_resource);
4838                 DRV_LOG(DEBUG, "port id action resource %p: removed",
4839                         (void *)cache_resource);
4840                 return 0;
4841         }
4842         return 1;
4843 }
4844
4845 /**
4846  * Remove the flow from the NIC but keeps it in memory.
4847  *
4848  * @param[in] dev
4849  *   Pointer to Ethernet device.
4850  * @param[in, out] flow
4851  *   Pointer to flow structure.
4852  */
4853 static void
4854 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
4855 {
4856         struct mlx5_flow_dv *dv;
4857         struct mlx5_flow *dev_flow;
4858
4859         if (!flow)
4860                 return;
4861         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
4862                 dv = &dev_flow->dv;
4863                 if (dv->flow) {
4864                         claim_zero(mlx5_glue->dv_destroy_flow(dv->flow));
4865                         dv->flow = NULL;
4866                 }
4867                 if (dv->hrxq) {
4868                         if (flow->actions & MLX5_FLOW_ACTION_DROP)
4869                                 mlx5_hrxq_drop_release(dev);
4870                         else
4871                                 mlx5_hrxq_release(dev, dv->hrxq);
4872                         dv->hrxq = NULL;
4873                 }
4874         }
4875 }
4876
4877 /**
4878  * Remove the flow from the NIC and the memory.
4879  *
4880  * @param[in] dev
4881  *   Pointer to the Ethernet device structure.
4882  * @param[in, out] flow
4883  *   Pointer to flow structure.
4884  */
4885 static void
4886 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
4887 {
4888         struct mlx5_flow *dev_flow;
4889
4890         if (!flow)
4891                 return;
4892         flow_dv_remove(dev, flow);
4893         if (flow->counter) {
4894                 flow_dv_counter_release(flow->counter);
4895                 flow->counter = NULL;
4896         }
4897         if (flow->tag_resource) {
4898                 flow_dv_tag_release(dev, flow->tag_resource);
4899                 flow->tag_resource = NULL;
4900         }
4901         while (!LIST_EMPTY(&flow->dev_flows)) {
4902                 dev_flow = LIST_FIRST(&flow->dev_flows);
4903                 LIST_REMOVE(dev_flow, next);
4904                 if (dev_flow->dv.matcher)
4905                         flow_dv_matcher_release(dev, dev_flow);
4906                 if (dev_flow->dv.encap_decap)
4907                         flow_dv_encap_decap_resource_release(dev_flow);
4908                 if (dev_flow->dv.modify_hdr)
4909                         flow_dv_modify_hdr_resource_release(dev_flow);
4910                 if (dev_flow->dv.jump)
4911                         flow_dv_jump_tbl_resource_release(dev_flow);
4912                 if (dev_flow->dv.port_id_action)
4913                         flow_dv_port_id_action_resource_release(dev_flow);
4914                 rte_free(dev_flow);
4915         }
4916 }
4917
4918 /**
4919  * Query a dv flow  rule for its statistics via devx.
4920  *
4921  * @param[in] dev
4922  *   Pointer to Ethernet device.
4923  * @param[in] flow
4924  *   Pointer to the sub flow.
4925  * @param[out] data
4926  *   data retrieved by the query.
4927  * @param[out] error
4928  *   Perform verbose error reporting if not NULL.
4929  *
4930  * @return
4931  *   0 on success, a negative errno value otherwise and rte_errno is set.
4932  */
4933 static int
4934 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
4935                     void *data, struct rte_flow_error *error)
4936 {
4937         struct mlx5_priv *priv = dev->data->dev_private;
4938         struct rte_flow_query_count *qc = data;
4939         uint64_t pkts = 0;
4940         uint64_t bytes = 0;
4941         int err;
4942
4943         if (!priv->config.devx)
4944                 return rte_flow_error_set(error, ENOTSUP,
4945                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4946                                           NULL,
4947                                           "counters are not supported");
4948         if (flow->counter) {
4949                 err = mlx5_devx_cmd_flow_counter_query
4950                                                 (flow->counter->dcs,
4951                                                  qc->reset, &pkts, &bytes);
4952                 if (err)
4953                         return rte_flow_error_set
4954                                 (error, err,
4955                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4956                                  NULL,
4957                                  "cannot read counters");
4958                 qc->hits_set = 1;
4959                 qc->bytes_set = 1;
4960                 qc->hits = pkts - flow->counter->hits;
4961                 qc->bytes = bytes - flow->counter->bytes;
4962                 if (qc->reset) {
4963                         flow->counter->hits = pkts;
4964                         flow->counter->bytes = bytes;
4965                 }
4966                 return 0;
4967         }
4968         return rte_flow_error_set(error, EINVAL,
4969                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4970                                   NULL,
4971                                   "counters are not available");
4972 }
4973
4974 /**
4975  * Query a flow.
4976  *
4977  * @see rte_flow_query()
4978  * @see rte_flow_ops
4979  */
4980 static int
4981 flow_dv_query(struct rte_eth_dev *dev,
4982               struct rte_flow *flow __rte_unused,
4983               const struct rte_flow_action *actions __rte_unused,
4984               void *data __rte_unused,
4985               struct rte_flow_error *error __rte_unused)
4986 {
4987         int ret = -EINVAL;
4988
4989         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
4990                 switch (actions->type) {
4991                 case RTE_FLOW_ACTION_TYPE_VOID:
4992                         break;
4993                 case RTE_FLOW_ACTION_TYPE_COUNT:
4994                         ret = flow_dv_query_count(dev, flow, data, error);
4995                         break;
4996                 default:
4997                         return rte_flow_error_set(error, ENOTSUP,
4998                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4999                                                   actions,
5000                                                   "action not supported");
5001                 }
5002         }
5003         return ret;
5004 }
5005
5006 /*
5007  * Mutex-protected thunk to flow_dv_translate().
5008  */
5009 static int
5010 flow_d_translate(struct rte_eth_dev *dev,
5011                  struct mlx5_flow *dev_flow,
5012                  const struct rte_flow_attr *attr,
5013                  const struct rte_flow_item items[],
5014                  const struct rte_flow_action actions[],
5015                  struct rte_flow_error *error)
5016 {
5017         int ret;
5018
5019         flow_d_shared_lock(dev);
5020         ret = flow_dv_translate(dev, dev_flow, attr, items, actions, error);
5021         flow_d_shared_unlock(dev);
5022         return ret;
5023 }
5024
5025 /*
5026  * Mutex-protected thunk to flow_dv_apply().
5027  */
5028 static int
5029 flow_d_apply(struct rte_eth_dev *dev,
5030              struct rte_flow *flow,
5031              struct rte_flow_error *error)
5032 {
5033         int ret;
5034
5035         flow_d_shared_lock(dev);
5036         ret = flow_dv_apply(dev, flow, error);
5037         flow_d_shared_unlock(dev);
5038         return ret;
5039 }
5040
5041 /*
5042  * Mutex-protected thunk to flow_dv_remove().
5043  */
5044 static void
5045 flow_d_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
5046 {
5047         flow_d_shared_lock(dev);
5048         flow_dv_remove(dev, flow);
5049         flow_d_shared_unlock(dev);
5050 }
5051
5052 /*
5053  * Mutex-protected thunk to flow_dv_destroy().
5054  */
5055 static void
5056 flow_d_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
5057 {
5058         flow_d_shared_lock(dev);
5059         flow_dv_destroy(dev, flow);
5060         flow_d_shared_unlock(dev);
5061 }
5062
5063 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
5064         .validate = flow_dv_validate,
5065         .prepare = flow_dv_prepare,
5066         .translate = flow_d_translate,
5067         .apply = flow_d_apply,
5068         .remove = flow_d_remove,
5069         .destroy = flow_d_destroy,
5070         .query = flow_dv_query,
5071 };
5072
5073 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */