net/mlx5: fix UDP checksum zeroing
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 /* Verbs header. */
12 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
13 #ifdef PEDANTIC
14 #pragma GCC diagnostic ignored "-Wpedantic"
15 #endif
16 #include <infiniband/verbs.h>
17 #ifdef PEDANTIC
18 #pragma GCC diagnostic error "-Wpedantic"
19 #endif
20
21 #include <rte_common.h>
22 #include <rte_ether.h>
23 #include <rte_ethdev_driver.h>
24 #include <rte_flow.h>
25 #include <rte_flow_driver.h>
26 #include <rte_malloc.h>
27 #include <rte_ip.h>
28 #include <rte_gre.h>
29
30 #include "mlx5.h"
31 #include "mlx5_defs.h"
32 #include "mlx5_glue.h"
33 #include "mlx5_flow.h"
34 #include "mlx5_prm.h"
35 #include "mlx5_rxtx.h"
36
37 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
38
39 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
40 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
41 #endif
42
43 #ifndef HAVE_MLX5DV_DR_ESWITCH
44 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
45 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
46 #endif
47 #endif
48
49 #ifndef HAVE_MLX5DV_DR
50 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
51 #endif
52
53 union flow_dv_attr {
54         struct {
55                 uint32_t valid:1;
56                 uint32_t ipv4:1;
57                 uint32_t ipv6:1;
58                 uint32_t tcp:1;
59                 uint32_t udp:1;
60                 uint32_t reserved:27;
61         };
62         uint32_t attr;
63 };
64
65 /**
66  * Initialize flow attributes structure according to flow items' types.
67  *
68  * @param[in] item
69  *   Pointer to item specification.
70  * @param[out] attr
71  *   Pointer to flow attributes structure.
72  */
73 static void
74 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr)
75 {
76         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
77                 switch (item->type) {
78                 case RTE_FLOW_ITEM_TYPE_IPV4:
79                         attr->ipv4 = 1;
80                         break;
81                 case RTE_FLOW_ITEM_TYPE_IPV6:
82                         attr->ipv6 = 1;
83                         break;
84                 case RTE_FLOW_ITEM_TYPE_UDP:
85                         attr->udp = 1;
86                         break;
87                 case RTE_FLOW_ITEM_TYPE_TCP:
88                         attr->tcp = 1;
89                         break;
90                 default:
91                         break;
92                 }
93         }
94         attr->valid = 1;
95 }
96
97 struct field_modify_info {
98         uint32_t size; /* Size of field in protocol header, in bytes. */
99         uint32_t offset; /* Offset of field in protocol header, in bytes. */
100         enum mlx5_modification_field id;
101 };
102
103 struct field_modify_info modify_eth[] = {
104         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
105         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
106         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
107         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
108         {0, 0, 0},
109 };
110
111 struct field_modify_info modify_ipv4[] = {
112         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
113         {4, 12, MLX5_MODI_OUT_SIPV4},
114         {4, 16, MLX5_MODI_OUT_DIPV4},
115         {0, 0, 0},
116 };
117
118 struct field_modify_info modify_ipv6[] = {
119         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
120         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
121         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
122         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
123         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
124         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
125         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
126         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
127         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
128         {0, 0, 0},
129 };
130
131 struct field_modify_info modify_udp[] = {
132         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
133         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
134         {0, 0, 0},
135 };
136
137 struct field_modify_info modify_tcp[] = {
138         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
139         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
140         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
141         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
142         {0, 0, 0},
143 };
144
145 static void
146 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item, uint64_t *flags)
147 {
148         uint8_t next_protocol = 0xFF;
149
150         if (item->mask != NULL) {
151                 switch (item->type) {
152                 case RTE_FLOW_ITEM_TYPE_IPV4:
153                         next_protocol =
154                                 ((const struct rte_flow_item_ipv4 *)
155                                  (item->spec))->hdr.next_proto_id;
156                         next_protocol &=
157                                 ((const struct rte_flow_item_ipv4 *)
158                                  (item->mask))->hdr.next_proto_id;
159                         break;
160                 case RTE_FLOW_ITEM_TYPE_IPV6:
161                         next_protocol =
162                                 ((const struct rte_flow_item_ipv6 *)
163                                  (item->spec))->hdr.proto;
164                         next_protocol &=
165                                 ((const struct rte_flow_item_ipv6 *)
166                                  (item->mask))->hdr.proto;
167                         break;
168                 default:
169                         break;
170                 }
171         }
172         if (next_protocol == IPPROTO_IPIP)
173                 *flags |= MLX5_FLOW_LAYER_IPIP;
174         if (next_protocol == IPPROTO_IPV6)
175                 *flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
176 }
177
178 /**
179  * Acquire the synchronizing object to protect multithreaded access
180  * to shared dv context. Lock occurs only if context is actually
181  * shared, i.e. we have multiport IB device and representors are
182  * created.
183  *
184  * @param[in] dev
185  *   Pointer to the rte_eth_dev structure.
186  */
187 static void
188 flow_d_shared_lock(struct rte_eth_dev *dev)
189 {
190         struct mlx5_priv *priv = dev->data->dev_private;
191         struct mlx5_ibv_shared *sh = priv->sh;
192
193         if (sh->dv_refcnt > 1) {
194                 int ret;
195
196                 ret = pthread_mutex_lock(&sh->dv_mutex);
197                 assert(!ret);
198                 (void)ret;
199         }
200 }
201
202 static void
203 flow_d_shared_unlock(struct rte_eth_dev *dev)
204 {
205         struct mlx5_priv *priv = dev->data->dev_private;
206         struct mlx5_ibv_shared *sh = priv->sh;
207
208         if (sh->dv_refcnt > 1) {
209                 int ret;
210
211                 ret = pthread_mutex_unlock(&sh->dv_mutex);
212                 assert(!ret);
213                 (void)ret;
214         }
215 }
216
217 /**
218  * Convert modify-header action to DV specification.
219  *
220  * @param[in] item
221  *   Pointer to item specification.
222  * @param[in] field
223  *   Pointer to field modification information.
224  * @param[in,out] resource
225  *   Pointer to the modify-header resource.
226  * @param[in] type
227  *   Type of modification.
228  * @param[out] error
229  *   Pointer to the error structure.
230  *
231  * @return
232  *   0 on success, a negative errno value otherwise and rte_errno is set.
233  */
234 static int
235 flow_dv_convert_modify_action(struct rte_flow_item *item,
236                               struct field_modify_info *field,
237                               struct mlx5_flow_dv_modify_hdr_resource *resource,
238                               uint32_t type,
239                               struct rte_flow_error *error)
240 {
241         uint32_t i = resource->actions_num;
242         struct mlx5_modification_cmd *actions = resource->actions;
243         const uint8_t *spec = item->spec;
244         const uint8_t *mask = item->mask;
245         uint32_t set;
246
247         while (field->size) {
248                 set = 0;
249                 /* Generate modify command for each mask segment. */
250                 memcpy(&set, &mask[field->offset], field->size);
251                 if (set) {
252                         if (i >= MLX5_MODIFY_NUM)
253                                 return rte_flow_error_set(error, EINVAL,
254                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
255                                          "too many items to modify");
256                         actions[i].action_type = type;
257                         actions[i].field = field->id;
258                         actions[i].length = field->size ==
259                                         4 ? 0 : field->size * 8;
260                         rte_memcpy(&actions[i].data[4 - field->size],
261                                    &spec[field->offset], field->size);
262                         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
263                         ++i;
264                 }
265                 if (resource->actions_num != i)
266                         resource->actions_num = i;
267                 field++;
268         }
269         if (!resource->actions_num)
270                 return rte_flow_error_set(error, EINVAL,
271                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
272                                           "invalid modification flow item");
273         return 0;
274 }
275
276 /**
277  * Convert modify-header set IPv4 address action to DV specification.
278  *
279  * @param[in,out] resource
280  *   Pointer to the modify-header resource.
281  * @param[in] action
282  *   Pointer to action specification.
283  * @param[out] error
284  *   Pointer to the error structure.
285  *
286  * @return
287  *   0 on success, a negative errno value otherwise and rte_errno is set.
288  */
289 static int
290 flow_dv_convert_action_modify_ipv4
291                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
292                          const struct rte_flow_action *action,
293                          struct rte_flow_error *error)
294 {
295         const struct rte_flow_action_set_ipv4 *conf =
296                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
297         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
298         struct rte_flow_item_ipv4 ipv4;
299         struct rte_flow_item_ipv4 ipv4_mask;
300
301         memset(&ipv4, 0, sizeof(ipv4));
302         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
303         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
304                 ipv4.hdr.src_addr = conf->ipv4_addr;
305                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
306         } else {
307                 ipv4.hdr.dst_addr = conf->ipv4_addr;
308                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
309         }
310         item.spec = &ipv4;
311         item.mask = &ipv4_mask;
312         return flow_dv_convert_modify_action(&item, modify_ipv4, resource,
313                                              MLX5_MODIFICATION_TYPE_SET, error);
314 }
315
316 /**
317  * Convert modify-header set IPv6 address action to DV specification.
318  *
319  * @param[in,out] resource
320  *   Pointer to the modify-header resource.
321  * @param[in] action
322  *   Pointer to action specification.
323  * @param[out] error
324  *   Pointer to the error structure.
325  *
326  * @return
327  *   0 on success, a negative errno value otherwise and rte_errno is set.
328  */
329 static int
330 flow_dv_convert_action_modify_ipv6
331                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
332                          const struct rte_flow_action *action,
333                          struct rte_flow_error *error)
334 {
335         const struct rte_flow_action_set_ipv6 *conf =
336                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
337         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
338         struct rte_flow_item_ipv6 ipv6;
339         struct rte_flow_item_ipv6 ipv6_mask;
340
341         memset(&ipv6, 0, sizeof(ipv6));
342         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
343         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
344                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
345                        sizeof(ipv6.hdr.src_addr));
346                 memcpy(&ipv6_mask.hdr.src_addr,
347                        &rte_flow_item_ipv6_mask.hdr.src_addr,
348                        sizeof(ipv6.hdr.src_addr));
349         } else {
350                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
351                        sizeof(ipv6.hdr.dst_addr));
352                 memcpy(&ipv6_mask.hdr.dst_addr,
353                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
354                        sizeof(ipv6.hdr.dst_addr));
355         }
356         item.spec = &ipv6;
357         item.mask = &ipv6_mask;
358         return flow_dv_convert_modify_action(&item, modify_ipv6, resource,
359                                              MLX5_MODIFICATION_TYPE_SET, error);
360 }
361
362 /**
363  * Convert modify-header set MAC address action to DV specification.
364  *
365  * @param[in,out] resource
366  *   Pointer to the modify-header resource.
367  * @param[in] action
368  *   Pointer to action specification.
369  * @param[out] error
370  *   Pointer to the error structure.
371  *
372  * @return
373  *   0 on success, a negative errno value otherwise and rte_errno is set.
374  */
375 static int
376 flow_dv_convert_action_modify_mac
377                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
378                          const struct rte_flow_action *action,
379                          struct rte_flow_error *error)
380 {
381         const struct rte_flow_action_set_mac *conf =
382                 (const struct rte_flow_action_set_mac *)(action->conf);
383         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
384         struct rte_flow_item_eth eth;
385         struct rte_flow_item_eth eth_mask;
386
387         memset(&eth, 0, sizeof(eth));
388         memset(&eth_mask, 0, sizeof(eth_mask));
389         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
390                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
391                        sizeof(eth.src.addr_bytes));
392                 memcpy(&eth_mask.src.addr_bytes,
393                        &rte_flow_item_eth_mask.src.addr_bytes,
394                        sizeof(eth_mask.src.addr_bytes));
395         } else {
396                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
397                        sizeof(eth.dst.addr_bytes));
398                 memcpy(&eth_mask.dst.addr_bytes,
399                        &rte_flow_item_eth_mask.dst.addr_bytes,
400                        sizeof(eth_mask.dst.addr_bytes));
401         }
402         item.spec = &eth;
403         item.mask = &eth_mask;
404         return flow_dv_convert_modify_action(&item, modify_eth, resource,
405                                              MLX5_MODIFICATION_TYPE_SET, error);
406 }
407
408 /**
409  * Convert modify-header set TP action to DV specification.
410  *
411  * @param[in,out] resource
412  *   Pointer to the modify-header resource.
413  * @param[in] action
414  *   Pointer to action specification.
415  * @param[in] items
416  *   Pointer to rte_flow_item objects list.
417  * @param[in] attr
418  *   Pointer to flow attributes structure.
419  * @param[out] error
420  *   Pointer to the error structure.
421  *
422  * @return
423  *   0 on success, a negative errno value otherwise and rte_errno is set.
424  */
425 static int
426 flow_dv_convert_action_modify_tp
427                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
428                          const struct rte_flow_action *action,
429                          const struct rte_flow_item *items,
430                          union flow_dv_attr *attr,
431                          struct rte_flow_error *error)
432 {
433         const struct rte_flow_action_set_tp *conf =
434                 (const struct rte_flow_action_set_tp *)(action->conf);
435         struct rte_flow_item item;
436         struct rte_flow_item_udp udp;
437         struct rte_flow_item_udp udp_mask;
438         struct rte_flow_item_tcp tcp;
439         struct rte_flow_item_tcp tcp_mask;
440         struct field_modify_info *field;
441
442         if (!attr->valid)
443                 flow_dv_attr_init(items, attr);
444         if (attr->udp) {
445                 memset(&udp, 0, sizeof(udp));
446                 memset(&udp_mask, 0, sizeof(udp_mask));
447                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
448                         udp.hdr.src_port = conf->port;
449                         udp_mask.hdr.src_port =
450                                         rte_flow_item_udp_mask.hdr.src_port;
451                 } else {
452                         udp.hdr.dst_port = conf->port;
453                         udp_mask.hdr.dst_port =
454                                         rte_flow_item_udp_mask.hdr.dst_port;
455                 }
456                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
457                 item.spec = &udp;
458                 item.mask = &udp_mask;
459                 field = modify_udp;
460         }
461         if (attr->tcp) {
462                 memset(&tcp, 0, sizeof(tcp));
463                 memset(&tcp_mask, 0, sizeof(tcp_mask));
464                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
465                         tcp.hdr.src_port = conf->port;
466                         tcp_mask.hdr.src_port =
467                                         rte_flow_item_tcp_mask.hdr.src_port;
468                 } else {
469                         tcp.hdr.dst_port = conf->port;
470                         tcp_mask.hdr.dst_port =
471                                         rte_flow_item_tcp_mask.hdr.dst_port;
472                 }
473                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
474                 item.spec = &tcp;
475                 item.mask = &tcp_mask;
476                 field = modify_tcp;
477         }
478         return flow_dv_convert_modify_action(&item, field, resource,
479                                              MLX5_MODIFICATION_TYPE_SET, error);
480 }
481
482 /**
483  * Convert modify-header set TTL action to DV specification.
484  *
485  * @param[in,out] resource
486  *   Pointer to the modify-header resource.
487  * @param[in] action
488  *   Pointer to action specification.
489  * @param[in] items
490  *   Pointer to rte_flow_item objects list.
491  * @param[in] attr
492  *   Pointer to flow attributes structure.
493  * @param[out] error
494  *   Pointer to the error structure.
495  *
496  * @return
497  *   0 on success, a negative errno value otherwise and rte_errno is set.
498  */
499 static int
500 flow_dv_convert_action_modify_ttl
501                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
502                          const struct rte_flow_action *action,
503                          const struct rte_flow_item *items,
504                          union flow_dv_attr *attr,
505                          struct rte_flow_error *error)
506 {
507         const struct rte_flow_action_set_ttl *conf =
508                 (const struct rte_flow_action_set_ttl *)(action->conf);
509         struct rte_flow_item item;
510         struct rte_flow_item_ipv4 ipv4;
511         struct rte_flow_item_ipv4 ipv4_mask;
512         struct rte_flow_item_ipv6 ipv6;
513         struct rte_flow_item_ipv6 ipv6_mask;
514         struct field_modify_info *field;
515
516         if (!attr->valid)
517                 flow_dv_attr_init(items, attr);
518         if (attr->ipv4) {
519                 memset(&ipv4, 0, sizeof(ipv4));
520                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
521                 ipv4.hdr.time_to_live = conf->ttl_value;
522                 ipv4_mask.hdr.time_to_live = 0xFF;
523                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
524                 item.spec = &ipv4;
525                 item.mask = &ipv4_mask;
526                 field = modify_ipv4;
527         }
528         if (attr->ipv6) {
529                 memset(&ipv6, 0, sizeof(ipv6));
530                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
531                 ipv6.hdr.hop_limits = conf->ttl_value;
532                 ipv6_mask.hdr.hop_limits = 0xFF;
533                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
534                 item.spec = &ipv6;
535                 item.mask = &ipv6_mask;
536                 field = modify_ipv6;
537         }
538         return flow_dv_convert_modify_action(&item, field, resource,
539                                              MLX5_MODIFICATION_TYPE_SET, error);
540 }
541
542 /**
543  * Convert modify-header decrement TTL action to DV specification.
544  *
545  * @param[in,out] resource
546  *   Pointer to the modify-header resource.
547  * @param[in] action
548  *   Pointer to action specification.
549  * @param[in] items
550  *   Pointer to rte_flow_item objects list.
551  * @param[in] attr
552  *   Pointer to flow attributes structure.
553  * @param[out] error
554  *   Pointer to the error structure.
555  *
556  * @return
557  *   0 on success, a negative errno value otherwise and rte_errno is set.
558  */
559 static int
560 flow_dv_convert_action_modify_dec_ttl
561                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
562                          const struct rte_flow_item *items,
563                          union flow_dv_attr *attr,
564                          struct rte_flow_error *error)
565 {
566         struct rte_flow_item item;
567         struct rte_flow_item_ipv4 ipv4;
568         struct rte_flow_item_ipv4 ipv4_mask;
569         struct rte_flow_item_ipv6 ipv6;
570         struct rte_flow_item_ipv6 ipv6_mask;
571         struct field_modify_info *field;
572
573         if (!attr->valid)
574                 flow_dv_attr_init(items, attr);
575         if (attr->ipv4) {
576                 memset(&ipv4, 0, sizeof(ipv4));
577                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
578                 ipv4.hdr.time_to_live = 0xFF;
579                 ipv4_mask.hdr.time_to_live = 0xFF;
580                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
581                 item.spec = &ipv4;
582                 item.mask = &ipv4_mask;
583                 field = modify_ipv4;
584         }
585         if (attr->ipv6) {
586                 memset(&ipv6, 0, sizeof(ipv6));
587                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
588                 ipv6.hdr.hop_limits = 0xFF;
589                 ipv6_mask.hdr.hop_limits = 0xFF;
590                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
591                 item.spec = &ipv6;
592                 item.mask = &ipv6_mask;
593                 field = modify_ipv6;
594         }
595         return flow_dv_convert_modify_action(&item, field, resource,
596                                              MLX5_MODIFICATION_TYPE_ADD, error);
597 }
598
599 /**
600  * Convert modify-header increment/decrement TCP Sequence number
601  * to DV specification.
602  *
603  * @param[in,out] resource
604  *   Pointer to the modify-header resource.
605  * @param[in] action
606  *   Pointer to action specification.
607  * @param[out] error
608  *   Pointer to the error structure.
609  *
610  * @return
611  *   0 on success, a negative errno value otherwise and rte_errno is set.
612  */
613 static int
614 flow_dv_convert_action_modify_tcp_seq
615                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
616                          const struct rte_flow_action *action,
617                          struct rte_flow_error *error)
618 {
619         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
620         uint64_t value = rte_be_to_cpu_32(*conf);
621         struct rte_flow_item item;
622         struct rte_flow_item_tcp tcp;
623         struct rte_flow_item_tcp tcp_mask;
624
625         memset(&tcp, 0, sizeof(tcp));
626         memset(&tcp_mask, 0, sizeof(tcp_mask));
627         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
628                 /*
629                  * The HW has no decrement operation, only increment operation.
630                  * To simulate decrement X from Y using increment operation
631                  * we need to add UINT32_MAX X times to Y.
632                  * Each adding of UINT32_MAX decrements Y by 1.
633                  */
634                 value *= UINT32_MAX;
635         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
636         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
637         item.type = RTE_FLOW_ITEM_TYPE_TCP;
638         item.spec = &tcp;
639         item.mask = &tcp_mask;
640         return flow_dv_convert_modify_action(&item, modify_tcp, resource,
641                                              MLX5_MODIFICATION_TYPE_ADD, error);
642 }
643
644 /**
645  * Convert modify-header increment/decrement TCP Acknowledgment number
646  * to DV specification.
647  *
648  * @param[in,out] resource
649  *   Pointer to the modify-header resource.
650  * @param[in] action
651  *   Pointer to action specification.
652  * @param[out] error
653  *   Pointer to the error structure.
654  *
655  * @return
656  *   0 on success, a negative errno value otherwise and rte_errno is set.
657  */
658 static int
659 flow_dv_convert_action_modify_tcp_ack
660                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
661                          const struct rte_flow_action *action,
662                          struct rte_flow_error *error)
663 {
664         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
665         uint64_t value = rte_be_to_cpu_32(*conf);
666         struct rte_flow_item item;
667         struct rte_flow_item_tcp tcp;
668         struct rte_flow_item_tcp tcp_mask;
669
670         memset(&tcp, 0, sizeof(tcp));
671         memset(&tcp_mask, 0, sizeof(tcp_mask));
672         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
673                 /*
674                  * The HW has no decrement operation, only increment operation.
675                  * To simulate decrement X from Y using increment operation
676                  * we need to add UINT32_MAX X times to Y.
677                  * Each adding of UINT32_MAX decrements Y by 1.
678                  */
679                 value *= UINT32_MAX;
680         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
681         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
682         item.type = RTE_FLOW_ITEM_TYPE_TCP;
683         item.spec = &tcp;
684         item.mask = &tcp_mask;
685         return flow_dv_convert_modify_action(&item, modify_tcp, resource,
686                                              MLX5_MODIFICATION_TYPE_ADD, error);
687 }
688
689 /**
690  * Validate META item.
691  *
692  * @param[in] dev
693  *   Pointer to the rte_eth_dev structure.
694  * @param[in] item
695  *   Item specification.
696  * @param[in] attr
697  *   Attributes of flow that includes this item.
698  * @param[out] error
699  *   Pointer to error structure.
700  *
701  * @return
702  *   0 on success, a negative errno value otherwise and rte_errno is set.
703  */
704 static int
705 flow_dv_validate_item_meta(struct rte_eth_dev *dev,
706                            const struct rte_flow_item *item,
707                            const struct rte_flow_attr *attr,
708                            struct rte_flow_error *error)
709 {
710         const struct rte_flow_item_meta *spec = item->spec;
711         const struct rte_flow_item_meta *mask = item->mask;
712         const struct rte_flow_item_meta nic_mask = {
713                 .data = RTE_BE32(UINT32_MAX)
714         };
715         int ret;
716         uint64_t offloads = dev->data->dev_conf.txmode.offloads;
717
718         if (!(offloads & DEV_TX_OFFLOAD_MATCH_METADATA))
719                 return rte_flow_error_set(error, EPERM,
720                                           RTE_FLOW_ERROR_TYPE_ITEM,
721                                           NULL,
722                                           "match on metadata offload "
723                                           "configuration is off for this port");
724         if (!spec)
725                 return rte_flow_error_set(error, EINVAL,
726                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
727                                           item->spec,
728                                           "data cannot be empty");
729         if (!spec->data)
730                 return rte_flow_error_set(error, EINVAL,
731                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
732                                           NULL,
733                                           "data cannot be zero");
734         if (!mask)
735                 mask = &rte_flow_item_meta_mask;
736         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
737                                         (const uint8_t *)&nic_mask,
738                                         sizeof(struct rte_flow_item_meta),
739                                         error);
740         if (ret < 0)
741                 return ret;
742         if (attr->ingress)
743                 return rte_flow_error_set(error, ENOTSUP,
744                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
745                                           NULL,
746                                           "pattern not supported for ingress");
747         return 0;
748 }
749
750 /**
751  * Validate vport item.
752  *
753  * @param[in] dev
754  *   Pointer to the rte_eth_dev structure.
755  * @param[in] item
756  *   Item specification.
757  * @param[in] attr
758  *   Attributes of flow that includes this item.
759  * @param[in] item_flags
760  *   Bit-fields that holds the items detected until now.
761  * @param[out] error
762  *   Pointer to error structure.
763  *
764  * @return
765  *   0 on success, a negative errno value otherwise and rte_errno is set.
766  */
767 static int
768 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
769                               const struct rte_flow_item *item,
770                               const struct rte_flow_attr *attr,
771                               uint64_t item_flags,
772                               struct rte_flow_error *error)
773 {
774         const struct rte_flow_item_port_id *spec = item->spec;
775         const struct rte_flow_item_port_id *mask = item->mask;
776         const struct rte_flow_item_port_id switch_mask = {
777                         .id = 0xffffffff,
778         };
779         uint16_t esw_domain_id;
780         uint16_t item_port_esw_domain_id;
781         int ret;
782
783         if (!attr->transfer)
784                 return rte_flow_error_set(error, EINVAL,
785                                           RTE_FLOW_ERROR_TYPE_ITEM,
786                                           NULL,
787                                           "match on port id is valid only"
788                                           " when transfer flag is enabled");
789         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
790                 return rte_flow_error_set(error, ENOTSUP,
791                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
792                                           "multiple source ports are not"
793                                           " supported");
794         if (!mask)
795                 mask = &switch_mask;
796         if (mask->id != 0xffffffff)
797                 return rte_flow_error_set(error, ENOTSUP,
798                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
799                                            mask,
800                                            "no support for partial mask on"
801                                            " \"id\" field");
802         ret = mlx5_flow_item_acceptable
803                                 (item, (const uint8_t *)mask,
804                                  (const uint8_t *)&rte_flow_item_port_id_mask,
805                                  sizeof(struct rte_flow_item_port_id),
806                                  error);
807         if (ret)
808                 return ret;
809         if (!spec)
810                 return 0;
811         ret = mlx5_port_to_eswitch_info(spec->id, &item_port_esw_domain_id,
812                                         NULL);
813         if (ret)
814                 return rte_flow_error_set(error, -ret,
815                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
816                                           "failed to obtain E-Switch info for"
817                                           " port");
818         ret = mlx5_port_to_eswitch_info(dev->data->port_id,
819                                         &esw_domain_id, NULL);
820         if (ret < 0)
821                 return rte_flow_error_set(error, -ret,
822                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
823                                           NULL,
824                                           "failed to obtain E-Switch info");
825         if (item_port_esw_domain_id != esw_domain_id)
826                 return rte_flow_error_set(error, -ret,
827                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
828                                           "cannot match on a port from a"
829                                           " different E-Switch");
830         return 0;
831 }
832
833 /**
834  * Validate count action.
835  *
836  * @param[in] dev
837  *   device otr.
838  * @param[out] error
839  *   Pointer to error structure.
840  *
841  * @return
842  *   0 on success, a negative errno value otherwise and rte_errno is set.
843  */
844 static int
845 flow_dv_validate_action_count(struct rte_eth_dev *dev,
846                               struct rte_flow_error *error)
847 {
848         struct mlx5_priv *priv = dev->data->dev_private;
849
850         if (!priv->config.devx)
851                 goto notsup_err;
852 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
853         return 0;
854 #endif
855 notsup_err:
856         return rte_flow_error_set
857                       (error, ENOTSUP,
858                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
859                        NULL,
860                        "count action not supported");
861 }
862
863 /**
864  * Validate the L2 encap action.
865  *
866  * @param[in] action_flags
867  *   Holds the actions detected until now.
868  * @param[in] action
869  *   Pointer to the encap action.
870  * @param[in] attr
871  *   Pointer to flow attributes
872  * @param[out] error
873  *   Pointer to error structure.
874  *
875  * @return
876  *   0 on success, a negative errno value otherwise and rte_errno is set.
877  */
878 static int
879 flow_dv_validate_action_l2_encap(uint64_t action_flags,
880                                  const struct rte_flow_action *action,
881                                  const struct rte_flow_attr *attr,
882                                  struct rte_flow_error *error)
883 {
884         if (!(action->conf))
885                 return rte_flow_error_set(error, EINVAL,
886                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
887                                           "configuration cannot be null");
888         if (action_flags & MLX5_FLOW_ACTION_DROP)
889                 return rte_flow_error_set(error, EINVAL,
890                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
891                                           "can't drop and encap in same flow");
892         if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
893                 return rte_flow_error_set(error, EINVAL,
894                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
895                                           "can only have a single encap or"
896                                           " decap action in a flow");
897         if (!attr->transfer && attr->ingress)
898                 return rte_flow_error_set(error, ENOTSUP,
899                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
900                                           NULL,
901                                           "encap action not supported for "
902                                           "ingress");
903         return 0;
904 }
905
906 /**
907  * Validate the L2 decap action.
908  *
909  * @param[in] action_flags
910  *   Holds the actions detected until now.
911  * @param[in] attr
912  *   Pointer to flow attributes
913  * @param[out] error
914  *   Pointer to error structure.
915  *
916  * @return
917  *   0 on success, a negative errno value otherwise and rte_errno is set.
918  */
919 static int
920 flow_dv_validate_action_l2_decap(uint64_t action_flags,
921                                  const struct rte_flow_attr *attr,
922                                  struct rte_flow_error *error)
923 {
924         if (action_flags & MLX5_FLOW_ACTION_DROP)
925                 return rte_flow_error_set(error, EINVAL,
926                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
927                                           "can't drop and decap in same flow");
928         if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
929                 return rte_flow_error_set(error, EINVAL,
930                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
931                                           "can only have a single encap or"
932                                           " decap action in a flow");
933         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
934                 return rte_flow_error_set(error, EINVAL,
935                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
936                                           "can't have decap action after"
937                                           " modify action");
938         if (attr->egress)
939                 return rte_flow_error_set(error, ENOTSUP,
940                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
941                                           NULL,
942                                           "decap action not supported for "
943                                           "egress");
944         return 0;
945 }
946
947 /**
948  * Validate the raw encap action.
949  *
950  * @param[in] action_flags
951  *   Holds the actions detected until now.
952  * @param[in] action
953  *   Pointer to the encap action.
954  * @param[in] attr
955  *   Pointer to flow attributes
956  * @param[out] error
957  *   Pointer to error structure.
958  *
959  * @return
960  *   0 on success, a negative errno value otherwise and rte_errno is set.
961  */
962 static int
963 flow_dv_validate_action_raw_encap(uint64_t action_flags,
964                                   const struct rte_flow_action *action,
965                                   const struct rte_flow_attr *attr,
966                                   struct rte_flow_error *error)
967 {
968         if (!(action->conf))
969                 return rte_flow_error_set(error, EINVAL,
970                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
971                                           "configuration cannot be null");
972         if (action_flags & MLX5_FLOW_ACTION_DROP)
973                 return rte_flow_error_set(error, EINVAL,
974                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
975                                           "can't drop and encap in same flow");
976         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
977                 return rte_flow_error_set(error, EINVAL,
978                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
979                                           "can only have a single encap"
980                                           " action in a flow");
981         /* encap without preceding decap is not supported for ingress */
982         if (!attr->transfer &&  attr->ingress &&
983             !(action_flags & MLX5_FLOW_ACTION_RAW_DECAP))
984                 return rte_flow_error_set(error, ENOTSUP,
985                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
986                                           NULL,
987                                           "encap action not supported for "
988                                           "ingress");
989         return 0;
990 }
991
992 /**
993  * Validate the raw decap action.
994  *
995  * @param[in] action_flags
996  *   Holds the actions detected until now.
997  * @param[in] action
998  *   Pointer to the encap action.
999  * @param[in] attr
1000  *   Pointer to flow attributes
1001  * @param[out] error
1002  *   Pointer to error structure.
1003  *
1004  * @return
1005  *   0 on success, a negative errno value otherwise and rte_errno is set.
1006  */
1007 static int
1008 flow_dv_validate_action_raw_decap(uint64_t action_flags,
1009                                   const struct rte_flow_action *action,
1010                                   const struct rte_flow_attr *attr,
1011                                   struct rte_flow_error *error)
1012 {
1013         if (action_flags & MLX5_FLOW_ACTION_DROP)
1014                 return rte_flow_error_set(error, EINVAL,
1015                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1016                                           "can't drop and decap in same flow");
1017         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
1018                 return rte_flow_error_set(error, EINVAL,
1019                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1020                                           "can't have encap action before"
1021                                           " decap action");
1022         if (action_flags & MLX5_FLOW_DECAP_ACTIONS)
1023                 return rte_flow_error_set(error, EINVAL,
1024                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1025                                           "can only have a single decap"
1026                                           " action in a flow");
1027         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
1028                 return rte_flow_error_set(error, EINVAL,
1029                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1030                                           "can't have decap action after"
1031                                           " modify action");
1032         /* decap action is valid on egress only if it is followed by encap */
1033         if (attr->egress) {
1034                 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
1035                        action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
1036                        action++) {
1037                 }
1038                 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
1039                         return rte_flow_error_set
1040                                         (error, ENOTSUP,
1041                                          RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1042                                          NULL, "decap action not supported"
1043                                          " for egress");
1044         }
1045         return 0;
1046 }
1047
1048 /**
1049  * Find existing encap/decap resource or create and register a new one.
1050  *
1051  * @param dev[in, out]
1052  *   Pointer to rte_eth_dev structure.
1053  * @param[in, out] resource
1054  *   Pointer to encap/decap resource.
1055  * @parm[in, out] dev_flow
1056  *   Pointer to the dev_flow.
1057  * @param[out] error
1058  *   pointer to error structure.
1059  *
1060  * @return
1061  *   0 on success otherwise -errno and errno is set.
1062  */
1063 static int
1064 flow_dv_encap_decap_resource_register
1065                         (struct rte_eth_dev *dev,
1066                          struct mlx5_flow_dv_encap_decap_resource *resource,
1067                          struct mlx5_flow *dev_flow,
1068                          struct rte_flow_error *error)
1069 {
1070         struct mlx5_priv *priv = dev->data->dev_private;
1071         struct mlx5_ibv_shared *sh = priv->sh;
1072         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
1073         struct rte_flow *flow = dev_flow->flow;
1074         struct mlx5dv_dr_domain *domain;
1075
1076         resource->flags = flow->group ? 0 : 1;
1077         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
1078                 domain = sh->fdb_domain;
1079         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
1080                 domain = sh->rx_domain;
1081         else
1082                 domain = sh->tx_domain;
1083
1084         /* Lookup a matching resource from cache. */
1085         LIST_FOREACH(cache_resource, &sh->encaps_decaps, next) {
1086                 if (resource->reformat_type == cache_resource->reformat_type &&
1087                     resource->ft_type == cache_resource->ft_type &&
1088                     resource->flags == cache_resource->flags &&
1089                     resource->size == cache_resource->size &&
1090                     !memcmp((const void *)resource->buf,
1091                             (const void *)cache_resource->buf,
1092                             resource->size)) {
1093                         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
1094                                 (void *)cache_resource,
1095                                 rte_atomic32_read(&cache_resource->refcnt));
1096                         rte_atomic32_inc(&cache_resource->refcnt);
1097                         dev_flow->dv.encap_decap = cache_resource;
1098                         return 0;
1099                 }
1100         }
1101         /* Register new encap/decap resource. */
1102         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1103         if (!cache_resource)
1104                 return rte_flow_error_set(error, ENOMEM,
1105                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1106                                           "cannot allocate resource memory");
1107         *cache_resource = *resource;
1108         cache_resource->verbs_action =
1109                 mlx5_glue->dv_create_flow_action_packet_reformat
1110                         (sh->ctx, cache_resource->reformat_type,
1111                          cache_resource->ft_type, domain, cache_resource->flags,
1112                          cache_resource->size,
1113                          (cache_resource->size ? cache_resource->buf : NULL));
1114         if (!cache_resource->verbs_action) {
1115                 rte_free(cache_resource);
1116                 return rte_flow_error_set(error, ENOMEM,
1117                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1118                                           NULL, "cannot create action");
1119         }
1120         rte_atomic32_init(&cache_resource->refcnt);
1121         rte_atomic32_inc(&cache_resource->refcnt);
1122         LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next);
1123         dev_flow->dv.encap_decap = cache_resource;
1124         DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
1125                 (void *)cache_resource,
1126                 rte_atomic32_read(&cache_resource->refcnt));
1127         return 0;
1128 }
1129
1130 /**
1131  * Find existing table jump resource or create and register a new one.
1132  *
1133  * @param dev[in, out]
1134  *   Pointer to rte_eth_dev structure.
1135  * @param[in, out] resource
1136  *   Pointer to jump table resource.
1137  * @parm[in, out] dev_flow
1138  *   Pointer to the dev_flow.
1139  * @param[out] error
1140  *   pointer to error structure.
1141  *
1142  * @return
1143  *   0 on success otherwise -errno and errno is set.
1144  */
1145 static int
1146 flow_dv_jump_tbl_resource_register
1147                         (struct rte_eth_dev *dev,
1148                          struct mlx5_flow_dv_jump_tbl_resource *resource,
1149                          struct mlx5_flow *dev_flow,
1150                          struct rte_flow_error *error)
1151 {
1152         struct mlx5_priv *priv = dev->data->dev_private;
1153         struct mlx5_ibv_shared *sh = priv->sh;
1154         struct mlx5_flow_dv_jump_tbl_resource *cache_resource;
1155
1156         /* Lookup a matching resource from cache. */
1157         LIST_FOREACH(cache_resource, &sh->jump_tbl, next) {
1158                 if (resource->tbl == cache_resource->tbl) {
1159                         DRV_LOG(DEBUG, "jump table resource resource %p: refcnt %d++",
1160                                 (void *)cache_resource,
1161                                 rte_atomic32_read(&cache_resource->refcnt));
1162                         rte_atomic32_inc(&cache_resource->refcnt);
1163                         dev_flow->dv.jump = cache_resource;
1164                         return 0;
1165                 }
1166         }
1167         /* Register new jump table resource. */
1168         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1169         if (!cache_resource)
1170                 return rte_flow_error_set(error, ENOMEM,
1171                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1172                                           "cannot allocate resource memory");
1173         *cache_resource = *resource;
1174         cache_resource->action =
1175                 mlx5_glue->dr_create_flow_action_dest_flow_tbl
1176                 (resource->tbl->obj);
1177         if (!cache_resource->action) {
1178                 rte_free(cache_resource);
1179                 return rte_flow_error_set(error, ENOMEM,
1180                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1181                                           NULL, "cannot create action");
1182         }
1183         rte_atomic32_init(&cache_resource->refcnt);
1184         rte_atomic32_inc(&cache_resource->refcnt);
1185         LIST_INSERT_HEAD(&sh->jump_tbl, cache_resource, next);
1186         dev_flow->dv.jump = cache_resource;
1187         DRV_LOG(DEBUG, "new jump table  resource %p: refcnt %d++",
1188                 (void *)cache_resource,
1189                 rte_atomic32_read(&cache_resource->refcnt));
1190         return 0;
1191 }
1192
1193 /**
1194  * Find existing table port ID resource or create and register a new one.
1195  *
1196  * @param dev[in, out]
1197  *   Pointer to rte_eth_dev structure.
1198  * @param[in, out] resource
1199  *   Pointer to port ID action resource.
1200  * @parm[in, out] dev_flow
1201  *   Pointer to the dev_flow.
1202  * @param[out] error
1203  *   pointer to error structure.
1204  *
1205  * @return
1206  *   0 on success otherwise -errno and errno is set.
1207  */
1208 static int
1209 flow_dv_port_id_action_resource_register
1210                         (struct rte_eth_dev *dev,
1211                          struct mlx5_flow_dv_port_id_action_resource *resource,
1212                          struct mlx5_flow *dev_flow,
1213                          struct rte_flow_error *error)
1214 {
1215         struct mlx5_priv *priv = dev->data->dev_private;
1216         struct mlx5_ibv_shared *sh = priv->sh;
1217         struct mlx5_flow_dv_port_id_action_resource *cache_resource;
1218
1219         /* Lookup a matching resource from cache. */
1220         LIST_FOREACH(cache_resource, &sh->port_id_action_list, next) {
1221                 if (resource->port_id == cache_resource->port_id) {
1222                         DRV_LOG(DEBUG, "port id action resource resource %p: "
1223                                 "refcnt %d++",
1224                                 (void *)cache_resource,
1225                                 rte_atomic32_read(&cache_resource->refcnt));
1226                         rte_atomic32_inc(&cache_resource->refcnt);
1227                         dev_flow->dv.port_id_action = cache_resource;
1228                         return 0;
1229                 }
1230         }
1231         /* Register new port id action resource. */
1232         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1233         if (!cache_resource)
1234                 return rte_flow_error_set(error, ENOMEM,
1235                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1236                                           "cannot allocate resource memory");
1237         *cache_resource = *resource;
1238         cache_resource->action =
1239                 mlx5_glue->dr_create_flow_action_dest_vport
1240                         (priv->sh->fdb_domain, resource->port_id);
1241         if (!cache_resource->action) {
1242                 rte_free(cache_resource);
1243                 return rte_flow_error_set(error, ENOMEM,
1244                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1245                                           NULL, "cannot create action");
1246         }
1247         rte_atomic32_init(&cache_resource->refcnt);
1248         rte_atomic32_inc(&cache_resource->refcnt);
1249         LIST_INSERT_HEAD(&sh->port_id_action_list, cache_resource, next);
1250         dev_flow->dv.port_id_action = cache_resource;
1251         DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
1252                 (void *)cache_resource,
1253                 rte_atomic32_read(&cache_resource->refcnt));
1254         return 0;
1255 }
1256
1257 /**
1258  * Get the size of specific rte_flow_item_type
1259  *
1260  * @param[in] item_type
1261  *   Tested rte_flow_item_type.
1262  *
1263  * @return
1264  *   sizeof struct item_type, 0 if void or irrelevant.
1265  */
1266 static size_t
1267 flow_dv_get_item_len(const enum rte_flow_item_type item_type)
1268 {
1269         size_t retval;
1270
1271         switch (item_type) {
1272         case RTE_FLOW_ITEM_TYPE_ETH:
1273                 retval = sizeof(struct rte_flow_item_eth);
1274                 break;
1275         case RTE_FLOW_ITEM_TYPE_VLAN:
1276                 retval = sizeof(struct rte_flow_item_vlan);
1277                 break;
1278         case RTE_FLOW_ITEM_TYPE_IPV4:
1279                 retval = sizeof(struct rte_flow_item_ipv4);
1280                 break;
1281         case RTE_FLOW_ITEM_TYPE_IPV6:
1282                 retval = sizeof(struct rte_flow_item_ipv6);
1283                 break;
1284         case RTE_FLOW_ITEM_TYPE_UDP:
1285                 retval = sizeof(struct rte_flow_item_udp);
1286                 break;
1287         case RTE_FLOW_ITEM_TYPE_TCP:
1288                 retval = sizeof(struct rte_flow_item_tcp);
1289                 break;
1290         case RTE_FLOW_ITEM_TYPE_VXLAN:
1291                 retval = sizeof(struct rte_flow_item_vxlan);
1292                 break;
1293         case RTE_FLOW_ITEM_TYPE_GRE:
1294                 retval = sizeof(struct rte_flow_item_gre);
1295                 break;
1296         case RTE_FLOW_ITEM_TYPE_NVGRE:
1297                 retval = sizeof(struct rte_flow_item_nvgre);
1298                 break;
1299         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1300                 retval = sizeof(struct rte_flow_item_vxlan_gpe);
1301                 break;
1302         case RTE_FLOW_ITEM_TYPE_MPLS:
1303                 retval = sizeof(struct rte_flow_item_mpls);
1304                 break;
1305         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
1306         default:
1307                 retval = 0;
1308                 break;
1309         }
1310         return retval;
1311 }
1312
1313 #define MLX5_ENCAP_IPV4_VERSION         0x40
1314 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
1315 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
1316 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
1317 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
1318 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
1319 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
1320
1321 /**
1322  * Convert the encap action data from list of rte_flow_item to raw buffer
1323  *
1324  * @param[in] items
1325  *   Pointer to rte_flow_item objects list.
1326  * @param[out] buf
1327  *   Pointer to the output buffer.
1328  * @param[out] size
1329  *   Pointer to the output buffer size.
1330  * @param[out] error
1331  *   Pointer to the error structure.
1332  *
1333  * @return
1334  *   0 on success, a negative errno value otherwise and rte_errno is set.
1335  */
1336 static int
1337 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
1338                            size_t *size, struct rte_flow_error *error)
1339 {
1340         struct rte_ether_hdr *eth = NULL;
1341         struct rte_vlan_hdr *vlan = NULL;
1342         struct rte_ipv4_hdr *ipv4 = NULL;
1343         struct rte_ipv6_hdr *ipv6 = NULL;
1344         struct rte_udp_hdr *udp = NULL;
1345         struct rte_vxlan_hdr *vxlan = NULL;
1346         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
1347         struct rte_gre_hdr *gre = NULL;
1348         size_t len;
1349         size_t temp_size = 0;
1350
1351         if (!items)
1352                 return rte_flow_error_set(error, EINVAL,
1353                                           RTE_FLOW_ERROR_TYPE_ACTION,
1354                                           NULL, "invalid empty data");
1355         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1356                 len = flow_dv_get_item_len(items->type);
1357                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
1358                         return rte_flow_error_set(error, EINVAL,
1359                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1360                                                   (void *)items->type,
1361                                                   "items total size is too big"
1362                                                   " for encap action");
1363                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
1364                 switch (items->type) {
1365                 case RTE_FLOW_ITEM_TYPE_ETH:
1366                         eth = (struct rte_ether_hdr *)&buf[temp_size];
1367                         break;
1368                 case RTE_FLOW_ITEM_TYPE_VLAN:
1369                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
1370                         if (!eth)
1371                                 return rte_flow_error_set(error, EINVAL,
1372                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1373                                                 (void *)items->type,
1374                                                 "eth header not found");
1375                         if (!eth->ether_type)
1376                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
1377                         break;
1378                 case RTE_FLOW_ITEM_TYPE_IPV4:
1379                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
1380                         if (!vlan && !eth)
1381                                 return rte_flow_error_set(error, EINVAL,
1382                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1383                                                 (void *)items->type,
1384                                                 "neither eth nor vlan"
1385                                                 " header found");
1386                         if (vlan && !vlan->eth_proto)
1387                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
1388                         else if (eth && !eth->ether_type)
1389                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
1390                         if (!ipv4->version_ihl)
1391                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
1392                                                     MLX5_ENCAP_IPV4_IHL_MIN;
1393                         if (!ipv4->time_to_live)
1394                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
1395                         break;
1396                 case RTE_FLOW_ITEM_TYPE_IPV6:
1397                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
1398                         if (!vlan && !eth)
1399                                 return rte_flow_error_set(error, EINVAL,
1400                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1401                                                 (void *)items->type,
1402                                                 "neither eth nor vlan"
1403                                                 " header found");
1404                         if (vlan && !vlan->eth_proto)
1405                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
1406                         else if (eth && !eth->ether_type)
1407                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
1408                         if (!ipv6->vtc_flow)
1409                                 ipv6->vtc_flow =
1410                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
1411                         if (!ipv6->hop_limits)
1412                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
1413                         break;
1414                 case RTE_FLOW_ITEM_TYPE_UDP:
1415                         udp = (struct rte_udp_hdr *)&buf[temp_size];
1416                         if (!ipv4 && !ipv6)
1417                                 return rte_flow_error_set(error, EINVAL,
1418                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1419                                                 (void *)items->type,
1420                                                 "ip header not found");
1421                         if (ipv4 && !ipv4->next_proto_id)
1422                                 ipv4->next_proto_id = IPPROTO_UDP;
1423                         else if (ipv6 && !ipv6->proto)
1424                                 ipv6->proto = IPPROTO_UDP;
1425                         break;
1426                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1427                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
1428                         if (!udp)
1429                                 return rte_flow_error_set(error, EINVAL,
1430                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1431                                                 (void *)items->type,
1432                                                 "udp header not found");
1433                         if (!udp->dst_port)
1434                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
1435                         if (!vxlan->vx_flags)
1436                                 vxlan->vx_flags =
1437                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
1438                         break;
1439                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1440                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
1441                         if (!udp)
1442                                 return rte_flow_error_set(error, EINVAL,
1443                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1444                                                 (void *)items->type,
1445                                                 "udp header not found");
1446                         if (!vxlan_gpe->proto)
1447                                 return rte_flow_error_set(error, EINVAL,
1448                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1449                                                 (void *)items->type,
1450                                                 "next protocol not found");
1451                         if (!udp->dst_port)
1452                                 udp->dst_port =
1453                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
1454                         if (!vxlan_gpe->vx_flags)
1455                                 vxlan_gpe->vx_flags =
1456                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
1457                         break;
1458                 case RTE_FLOW_ITEM_TYPE_GRE:
1459                 case RTE_FLOW_ITEM_TYPE_NVGRE:
1460                         gre = (struct rte_gre_hdr *)&buf[temp_size];
1461                         if (!gre->proto)
1462                                 return rte_flow_error_set(error, EINVAL,
1463                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1464                                                 (void *)items->type,
1465                                                 "next protocol not found");
1466                         if (!ipv4 && !ipv6)
1467                                 return rte_flow_error_set(error, EINVAL,
1468                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1469                                                 (void *)items->type,
1470                                                 "ip header not found");
1471                         if (ipv4 && !ipv4->next_proto_id)
1472                                 ipv4->next_proto_id = IPPROTO_GRE;
1473                         else if (ipv6 && !ipv6->proto)
1474                                 ipv6->proto = IPPROTO_GRE;
1475                         break;
1476                 case RTE_FLOW_ITEM_TYPE_VOID:
1477                         break;
1478                 default:
1479                         return rte_flow_error_set(error, EINVAL,
1480                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1481                                                   (void *)items->type,
1482                                                   "unsupported item type");
1483                         break;
1484                 }
1485                 temp_size += len;
1486         }
1487         *size = temp_size;
1488         return 0;
1489 }
1490
1491 static int
1492 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
1493 {
1494         struct rte_ether_hdr *eth = NULL;
1495         struct rte_vlan_hdr *vlan = NULL;
1496         struct rte_ipv6_hdr *ipv6 = NULL;
1497         struct rte_udp_hdr *udp = NULL;
1498         char *next_hdr;
1499         uint16_t proto;
1500
1501         eth = (struct rte_ether_hdr *)data;
1502         next_hdr = (char *)(eth + 1);
1503         proto = RTE_BE16(eth->ether_type);
1504
1505         /* VLAN skipping */
1506         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
1507                 vlan = (struct rte_vlan_hdr *)next_hdr;
1508                 proto = RTE_BE16(vlan->eth_proto);
1509                 next_hdr += sizeof(struct rte_vlan_hdr);
1510         }
1511
1512         /* HW calculates IPv4 csum. no need to proceed */
1513         if (proto == RTE_ETHER_TYPE_IPV4)
1514                 return 0;
1515
1516         /* non IPv4/IPv6 header. not supported */
1517         if (proto != RTE_ETHER_TYPE_IPV6) {
1518                 return rte_flow_error_set(error, ENOTSUP,
1519                                           RTE_FLOW_ERROR_TYPE_ACTION,
1520                                           NULL, "Cannot offload non IPv4/IPv6");
1521         }
1522
1523         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
1524
1525         /* ignore non UDP */
1526         if (ipv6->proto != IPPROTO_UDP)
1527                 return 0;
1528
1529         udp = (struct rte_udp_hdr *)(ipv6 + 1);
1530         udp->dgram_cksum = 0;
1531
1532         return 0;
1533 }
1534
1535 /**
1536  * Convert L2 encap action to DV specification.
1537  *
1538  * @param[in] dev
1539  *   Pointer to rte_eth_dev structure.
1540  * @param[in] action
1541  *   Pointer to action structure.
1542  * @param[in, out] dev_flow
1543  *   Pointer to the mlx5_flow.
1544  * @param[in] transfer
1545  *   Mark if the flow is E-Switch flow.
1546  * @param[out] error
1547  *   Pointer to the error structure.
1548  *
1549  * @return
1550  *   0 on success, a negative errno value otherwise and rte_errno is set.
1551  */
1552 static int
1553 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
1554                                const struct rte_flow_action *action,
1555                                struct mlx5_flow *dev_flow,
1556                                uint8_t transfer,
1557                                struct rte_flow_error *error)
1558 {
1559         const struct rte_flow_item *encap_data;
1560         const struct rte_flow_action_raw_encap *raw_encap_data;
1561         struct mlx5_flow_dv_encap_decap_resource res = {
1562                 .reformat_type =
1563                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
1564                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
1565                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
1566         };
1567
1568         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
1569                 raw_encap_data =
1570                         (const struct rte_flow_action_raw_encap *)action->conf;
1571                 res.size = raw_encap_data->size;
1572                 memcpy(res.buf, raw_encap_data->data, res.size);
1573                 if (flow_dv_zero_encap_udp_csum(res.buf, error))
1574                         return -rte_errno;
1575         } else {
1576                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
1577                         encap_data =
1578                                 ((const struct rte_flow_action_vxlan_encap *)
1579                                                 action->conf)->definition;
1580                 else
1581                         encap_data =
1582                                 ((const struct rte_flow_action_nvgre_encap *)
1583                                                 action->conf)->definition;
1584                 if (flow_dv_convert_encap_data(encap_data, res.buf,
1585                                                &res.size, error))
1586                         return -rte_errno;
1587         }
1588         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1589                 return rte_flow_error_set(error, EINVAL,
1590                                           RTE_FLOW_ERROR_TYPE_ACTION,
1591                                           NULL, "can't create L2 encap action");
1592         return 0;
1593 }
1594
1595 /**
1596  * Convert L2 decap action to DV specification.
1597  *
1598  * @param[in] dev
1599  *   Pointer to rte_eth_dev structure.
1600  * @param[in, out] dev_flow
1601  *   Pointer to the mlx5_flow.
1602  * @param[in] transfer
1603  *   Mark if the flow is E-Switch flow.
1604  * @param[out] error
1605  *   Pointer to the error structure.
1606  *
1607  * @return
1608  *   0 on success, a negative errno value otherwise and rte_errno is set.
1609  */
1610 static int
1611 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
1612                                struct mlx5_flow *dev_flow,
1613                                uint8_t transfer,
1614                                struct rte_flow_error *error)
1615 {
1616         struct mlx5_flow_dv_encap_decap_resource res = {
1617                 .size = 0,
1618                 .reformat_type =
1619                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
1620                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
1621                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
1622         };
1623
1624         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1625                 return rte_flow_error_set(error, EINVAL,
1626                                           RTE_FLOW_ERROR_TYPE_ACTION,
1627                                           NULL, "can't create L2 decap action");
1628         return 0;
1629 }
1630
1631 /**
1632  * Convert raw decap/encap (L3 tunnel) action to DV specification.
1633  *
1634  * @param[in] dev
1635  *   Pointer to rte_eth_dev structure.
1636  * @param[in] action
1637  *   Pointer to action structure.
1638  * @param[in, out] dev_flow
1639  *   Pointer to the mlx5_flow.
1640  * @param[in] attr
1641  *   Pointer to the flow attributes.
1642  * @param[out] error
1643  *   Pointer to the error structure.
1644  *
1645  * @return
1646  *   0 on success, a negative errno value otherwise and rte_errno is set.
1647  */
1648 static int
1649 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
1650                                 const struct rte_flow_action *action,
1651                                 struct mlx5_flow *dev_flow,
1652                                 const struct rte_flow_attr *attr,
1653                                 struct rte_flow_error *error)
1654 {
1655         const struct rte_flow_action_raw_encap *encap_data;
1656         struct mlx5_flow_dv_encap_decap_resource res;
1657
1658         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
1659         res.size = encap_data->size;
1660         memcpy(res.buf, encap_data->data, res.size);
1661         res.reformat_type = attr->egress ?
1662                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL :
1663                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
1664         if (attr->transfer)
1665                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
1666         else
1667                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
1668                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
1669         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1670                 return rte_flow_error_set(error, EINVAL,
1671                                           RTE_FLOW_ERROR_TYPE_ACTION,
1672                                           NULL, "can't create encap action");
1673         return 0;
1674 }
1675
1676 /**
1677  * Validate the modify-header actions.
1678  *
1679  * @param[in] action_flags
1680  *   Holds the actions detected until now.
1681  * @param[in] action
1682  *   Pointer to the modify action.
1683  * @param[out] error
1684  *   Pointer to error structure.
1685  *
1686  * @return
1687  *   0 on success, a negative errno value otherwise and rte_errno is set.
1688  */
1689 static int
1690 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
1691                                    const struct rte_flow_action *action,
1692                                    struct rte_flow_error *error)
1693 {
1694         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
1695                 return rte_flow_error_set(error, EINVAL,
1696                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1697                                           NULL, "action configuration not set");
1698         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
1699                 return rte_flow_error_set(error, EINVAL,
1700                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1701                                           "can't have encap action before"
1702                                           " modify action");
1703         return 0;
1704 }
1705
1706 /**
1707  * Validate the modify-header MAC address actions.
1708  *
1709  * @param[in] action_flags
1710  *   Holds the actions detected until now.
1711  * @param[in] action
1712  *   Pointer to the modify action.
1713  * @param[in] item_flags
1714  *   Holds the items detected.
1715  * @param[out] error
1716  *   Pointer to error structure.
1717  *
1718  * @return
1719  *   0 on success, a negative errno value otherwise and rte_errno is set.
1720  */
1721 static int
1722 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
1723                                    const struct rte_flow_action *action,
1724                                    const uint64_t item_flags,
1725                                    struct rte_flow_error *error)
1726 {
1727         int ret = 0;
1728
1729         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1730         if (!ret) {
1731                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
1732                         return rte_flow_error_set(error, EINVAL,
1733                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1734                                                   NULL,
1735                                                   "no L2 item in pattern");
1736         }
1737         return ret;
1738 }
1739
1740 /**
1741  * Validate the modify-header IPv4 address actions.
1742  *
1743  * @param[in] action_flags
1744  *   Holds the actions detected until now.
1745  * @param[in] action
1746  *   Pointer to the modify action.
1747  * @param[in] item_flags
1748  *   Holds the items detected.
1749  * @param[out] error
1750  *   Pointer to error structure.
1751  *
1752  * @return
1753  *   0 on success, a negative errno value otherwise and rte_errno is set.
1754  */
1755 static int
1756 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
1757                                     const struct rte_flow_action *action,
1758                                     const uint64_t item_flags,
1759                                     struct rte_flow_error *error)
1760 {
1761         int ret = 0;
1762
1763         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1764         if (!ret) {
1765                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
1766                         return rte_flow_error_set(error, EINVAL,
1767                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1768                                                   NULL,
1769                                                   "no ipv4 item in pattern");
1770         }
1771         return ret;
1772 }
1773
1774 /**
1775  * Validate the modify-header IPv6 address actions.
1776  *
1777  * @param[in] action_flags
1778  *   Holds the actions detected until now.
1779  * @param[in] action
1780  *   Pointer to the modify action.
1781  * @param[in] item_flags
1782  *   Holds the items detected.
1783  * @param[out] error
1784  *   Pointer to error structure.
1785  *
1786  * @return
1787  *   0 on success, a negative errno value otherwise and rte_errno is set.
1788  */
1789 static int
1790 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
1791                                     const struct rte_flow_action *action,
1792                                     const uint64_t item_flags,
1793                                     struct rte_flow_error *error)
1794 {
1795         int ret = 0;
1796
1797         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1798         if (!ret) {
1799                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
1800                         return rte_flow_error_set(error, EINVAL,
1801                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1802                                                   NULL,
1803                                                   "no ipv6 item in pattern");
1804         }
1805         return ret;
1806 }
1807
1808 /**
1809  * Validate the modify-header TP actions.
1810  *
1811  * @param[in] action_flags
1812  *   Holds the actions detected until now.
1813  * @param[in] action
1814  *   Pointer to the modify action.
1815  * @param[in] item_flags
1816  *   Holds the items detected.
1817  * @param[out] error
1818  *   Pointer to error structure.
1819  *
1820  * @return
1821  *   0 on success, a negative errno value otherwise and rte_errno is set.
1822  */
1823 static int
1824 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
1825                                   const struct rte_flow_action *action,
1826                                   const uint64_t item_flags,
1827                                   struct rte_flow_error *error)
1828 {
1829         int ret = 0;
1830
1831         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1832         if (!ret) {
1833                 if (!(item_flags & MLX5_FLOW_LAYER_L4))
1834                         return rte_flow_error_set(error, EINVAL,
1835                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1836                                                   NULL, "no transport layer "
1837                                                   "in pattern");
1838         }
1839         return ret;
1840 }
1841
1842 /**
1843  * Validate the modify-header actions of increment/decrement
1844  * TCP Sequence-number.
1845  *
1846  * @param[in] action_flags
1847  *   Holds the actions detected until now.
1848  * @param[in] action
1849  *   Pointer to the modify action.
1850  * @param[in] item_flags
1851  *   Holds the items detected.
1852  * @param[out] error
1853  *   Pointer to error structure.
1854  *
1855  * @return
1856  *   0 on success, a negative errno value otherwise and rte_errno is set.
1857  */
1858 static int
1859 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
1860                                        const struct rte_flow_action *action,
1861                                        const uint64_t item_flags,
1862                                        struct rte_flow_error *error)
1863 {
1864         int ret = 0;
1865
1866         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1867         if (!ret) {
1868                 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
1869                         return rte_flow_error_set(error, EINVAL,
1870                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1871                                                   NULL, "no TCP item in"
1872                                                   " pattern");
1873                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
1874                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
1875                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
1876                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
1877                         return rte_flow_error_set(error, EINVAL,
1878                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1879                                                   NULL,
1880                                                   "cannot decrease and increase"
1881                                                   " TCP sequence number"
1882                                                   " at the same time");
1883         }
1884         return ret;
1885 }
1886
1887 /**
1888  * Validate the modify-header actions of increment/decrement
1889  * TCP Acknowledgment number.
1890  *
1891  * @param[in] action_flags
1892  *   Holds the actions detected until now.
1893  * @param[in] action
1894  *   Pointer to the modify action.
1895  * @param[in] item_flags
1896  *   Holds the items detected.
1897  * @param[out] error
1898  *   Pointer to error structure.
1899  *
1900  * @return
1901  *   0 on success, a negative errno value otherwise and rte_errno is set.
1902  */
1903 static int
1904 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
1905                                        const struct rte_flow_action *action,
1906                                        const uint64_t item_flags,
1907                                        struct rte_flow_error *error)
1908 {
1909         int ret = 0;
1910
1911         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1912         if (!ret) {
1913                 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
1914                         return rte_flow_error_set(error, EINVAL,
1915                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1916                                                   NULL, "no TCP item in"
1917                                                   " pattern");
1918                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
1919                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
1920                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
1921                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
1922                         return rte_flow_error_set(error, EINVAL,
1923                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1924                                                   NULL,
1925                                                   "cannot decrease and increase"
1926                                                   " TCP acknowledgment number"
1927                                                   " at the same time");
1928         }
1929         return ret;
1930 }
1931
1932 /**
1933  * Validate the modify-header TTL actions.
1934  *
1935  * @param[in] action_flags
1936  *   Holds the actions detected until now.
1937  * @param[in] action
1938  *   Pointer to the modify action.
1939  * @param[in] item_flags
1940  *   Holds the items detected.
1941  * @param[out] error
1942  *   Pointer to error structure.
1943  *
1944  * @return
1945  *   0 on success, a negative errno value otherwise and rte_errno is set.
1946  */
1947 static int
1948 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
1949                                    const struct rte_flow_action *action,
1950                                    const uint64_t item_flags,
1951                                    struct rte_flow_error *error)
1952 {
1953         int ret = 0;
1954
1955         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1956         if (!ret) {
1957                 if (!(item_flags & MLX5_FLOW_LAYER_L3))
1958                         return rte_flow_error_set(error, EINVAL,
1959                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1960                                                   NULL,
1961                                                   "no IP protocol in pattern");
1962         }
1963         return ret;
1964 }
1965
1966 /**
1967  * Validate jump action.
1968  *
1969  * @param[in] action
1970  *   Pointer to the modify action.
1971  * @param[in] group
1972  *   The group of the current flow.
1973  * @param[out] error
1974  *   Pointer to error structure.
1975  *
1976  * @return
1977  *   0 on success, a negative errno value otherwise and rte_errno is set.
1978  */
1979 static int
1980 flow_dv_validate_action_jump(const struct rte_flow_action *action,
1981                              uint32_t group,
1982                              struct rte_flow_error *error)
1983 {
1984         if (action->type != RTE_FLOW_ACTION_TYPE_JUMP && !action->conf)
1985                 return rte_flow_error_set(error, EINVAL,
1986                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1987                                           NULL, "action configuration not set");
1988         if (group >= ((const struct rte_flow_action_jump *)action->conf)->group)
1989                 return rte_flow_error_set(error, EINVAL,
1990                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1991                                           "target group must be higher then"
1992                                           " the current flow group");
1993         return 0;
1994 }
1995
1996 /*
1997  * Validate the port_id action.
1998  *
1999  * @param[in] dev
2000  *   Pointer to rte_eth_dev structure.
2001  * @param[in] action_flags
2002  *   Bit-fields that holds the actions detected until now.
2003  * @param[in] action
2004  *   Port_id RTE action structure.
2005  * @param[in] attr
2006  *   Attributes of flow that includes this action.
2007  * @param[out] error
2008  *   Pointer to error structure.
2009  *
2010  * @return
2011  *   0 on success, a negative errno value otherwise and rte_errno is set.
2012  */
2013 static int
2014 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
2015                                 uint64_t action_flags,
2016                                 const struct rte_flow_action *action,
2017                                 const struct rte_flow_attr *attr,
2018                                 struct rte_flow_error *error)
2019 {
2020         const struct rte_flow_action_port_id *port_id;
2021         uint16_t port;
2022         uint16_t esw_domain_id;
2023         uint16_t act_port_domain_id;
2024         int ret;
2025
2026         if (!attr->transfer)
2027                 return rte_flow_error_set(error, ENOTSUP,
2028                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2029                                           NULL,
2030                                           "port id action is valid in transfer"
2031                                           " mode only");
2032         if (!action || !action->conf)
2033                 return rte_flow_error_set(error, ENOTSUP,
2034                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2035                                           NULL,
2036                                           "port id action parameters must be"
2037                                           " specified");
2038         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
2039                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
2040                 return rte_flow_error_set(error, EINVAL,
2041                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2042                                           "can have only one fate actions in"
2043                                           " a flow");
2044         ret = mlx5_port_to_eswitch_info(dev->data->port_id,
2045                                         &esw_domain_id, NULL);
2046         if (ret < 0)
2047                 return rte_flow_error_set(error, -ret,
2048                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2049                                           NULL,
2050                                           "failed to obtain E-Switch info");
2051         port_id = action->conf;
2052         port = port_id->original ? dev->data->port_id : port_id->id;
2053         ret = mlx5_port_to_eswitch_info(port, &act_port_domain_id, NULL);
2054         if (ret)
2055                 return rte_flow_error_set
2056                                 (error, -ret,
2057                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
2058                                  "failed to obtain E-Switch port id for port");
2059         if (act_port_domain_id != esw_domain_id)
2060                 return rte_flow_error_set
2061                                 (error, -ret,
2062                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2063                                  "port does not belong to"
2064                                  " E-Switch being configured");
2065         return 0;
2066 }
2067
2068 /**
2069  * Find existing modify-header resource or create and register a new one.
2070  *
2071  * @param dev[in, out]
2072  *   Pointer to rte_eth_dev structure.
2073  * @param[in, out] resource
2074  *   Pointer to modify-header resource.
2075  * @parm[in, out] dev_flow
2076  *   Pointer to the dev_flow.
2077  * @param[out] error
2078  *   pointer to error structure.
2079  *
2080  * @return
2081  *   0 on success otherwise -errno and errno is set.
2082  */
2083 static int
2084 flow_dv_modify_hdr_resource_register
2085                         (struct rte_eth_dev *dev,
2086                          struct mlx5_flow_dv_modify_hdr_resource *resource,
2087                          struct mlx5_flow *dev_flow,
2088                          struct rte_flow_error *error)
2089 {
2090         struct mlx5_priv *priv = dev->data->dev_private;
2091         struct mlx5_ibv_shared *sh = priv->sh;
2092         struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
2093         struct mlx5dv_dr_domain *ns;
2094
2095         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
2096                 ns = sh->fdb_domain;
2097         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
2098                 ns = sh->tx_domain;
2099         else
2100                 ns = sh->rx_domain;
2101         resource->flags =
2102                 dev_flow->flow->group ? 0 : MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
2103         /* Lookup a matching resource from cache. */
2104         LIST_FOREACH(cache_resource, &sh->modify_cmds, next) {
2105                 if (resource->ft_type == cache_resource->ft_type &&
2106                     resource->actions_num == cache_resource->actions_num &&
2107                     resource->flags == cache_resource->flags &&
2108                     !memcmp((const void *)resource->actions,
2109                             (const void *)cache_resource->actions,
2110                             (resource->actions_num *
2111                                             sizeof(resource->actions[0])))) {
2112                         DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
2113                                 (void *)cache_resource,
2114                                 rte_atomic32_read(&cache_resource->refcnt));
2115                         rte_atomic32_inc(&cache_resource->refcnt);
2116                         dev_flow->dv.modify_hdr = cache_resource;
2117                         return 0;
2118                 }
2119         }
2120         /* Register new modify-header resource. */
2121         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
2122         if (!cache_resource)
2123                 return rte_flow_error_set(error, ENOMEM,
2124                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2125                                           "cannot allocate resource memory");
2126         *cache_resource = *resource;
2127         cache_resource->verbs_action =
2128                 mlx5_glue->dv_create_flow_action_modify_header
2129                                         (sh->ctx, cache_resource->ft_type,
2130                                          ns, cache_resource->flags,
2131                                          cache_resource->actions_num *
2132                                          sizeof(cache_resource->actions[0]),
2133                                          (uint64_t *)cache_resource->actions);
2134         if (!cache_resource->verbs_action) {
2135                 rte_free(cache_resource);
2136                 return rte_flow_error_set(error, ENOMEM,
2137                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2138                                           NULL, "cannot create action");
2139         }
2140         rte_atomic32_init(&cache_resource->refcnt);
2141         rte_atomic32_inc(&cache_resource->refcnt);
2142         LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
2143         dev_flow->dv.modify_hdr = cache_resource;
2144         DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
2145                 (void *)cache_resource,
2146                 rte_atomic32_read(&cache_resource->refcnt));
2147         return 0;
2148 }
2149
2150 #define MLX5_CNT_CONTAINER_RESIZE 64
2151
2152 /**
2153  * Get or create a flow counter.
2154  *
2155  * @param[in] dev
2156  *   Pointer to the Ethernet device structure.
2157  * @param[in] shared
2158  *   Indicate if this counter is shared with other flows.
2159  * @param[in] id
2160  *   Counter identifier.
2161  *
2162  * @return
2163  *   pointer to flow counter on success, NULL otherwise and rte_errno is set.
2164  */
2165 static struct mlx5_flow_counter *
2166 flow_dv_counter_alloc_fallback(struct rte_eth_dev *dev, uint32_t shared,
2167                                uint32_t id)
2168 {
2169         struct mlx5_priv *priv = dev->data->dev_private;
2170         struct mlx5_flow_counter *cnt = NULL;
2171         struct mlx5_devx_obj *dcs = NULL;
2172
2173         if (!priv->config.devx) {
2174                 rte_errno = ENOTSUP;
2175                 return NULL;
2176         }
2177         if (shared) {
2178                 TAILQ_FOREACH(cnt, &priv->sh->cmng.flow_counters, next) {
2179                         if (cnt->shared && cnt->id == id) {
2180                                 cnt->ref_cnt++;
2181                                 return cnt;
2182                         }
2183                 }
2184         }
2185         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
2186         if (!dcs)
2187                 return NULL;
2188         cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
2189         if (!cnt) {
2190                 claim_zero(mlx5_devx_cmd_destroy(cnt->dcs));
2191                 rte_errno = ENOMEM;
2192                 return NULL;
2193         }
2194         struct mlx5_flow_counter tmpl = {
2195                 .shared = shared,
2196                 .ref_cnt = 1,
2197                 .id = id,
2198                 .dcs = dcs,
2199         };
2200         tmpl.action = mlx5_glue->dv_create_flow_action_counter(dcs->obj, 0);
2201         if (!tmpl.action) {
2202                 claim_zero(mlx5_devx_cmd_destroy(cnt->dcs));
2203                 rte_errno = errno;
2204                 rte_free(cnt);
2205                 return NULL;
2206         }
2207         *cnt = tmpl;
2208         TAILQ_INSERT_HEAD(&priv->sh->cmng.flow_counters, cnt, next);
2209         return cnt;
2210 }
2211
2212 /**
2213  * Release a flow counter.
2214  *
2215  * @param[in] dev
2216  *   Pointer to the Ethernet device structure.
2217  * @param[in] counter
2218  *   Pointer to the counter handler.
2219  */
2220 static void
2221 flow_dv_counter_release_fallback(struct rte_eth_dev *dev,
2222                                  struct mlx5_flow_counter *counter)
2223 {
2224         struct mlx5_priv *priv = dev->data->dev_private;
2225
2226         if (!counter)
2227                 return;
2228         if (--counter->ref_cnt == 0) {
2229                 TAILQ_REMOVE(&priv->sh->cmng.flow_counters, counter, next);
2230                 claim_zero(mlx5_devx_cmd_destroy(counter->dcs));
2231                 rte_free(counter);
2232         }
2233 }
2234
2235 /**
2236  * Query a devx flow counter.
2237  *
2238  * @param[in] dev
2239  *   Pointer to the Ethernet device structure.
2240  * @param[in] cnt
2241  *   Pointer to the flow counter.
2242  * @param[out] pkts
2243  *   The statistics value of packets.
2244  * @param[out] bytes
2245  *   The statistics value of bytes.
2246  *
2247  * @return
2248  *   0 on success, otherwise a negative errno value and rte_errno is set.
2249  */
2250 static inline int
2251 _flow_dv_query_count_fallback(struct rte_eth_dev *dev __rte_unused,
2252                      struct mlx5_flow_counter *cnt, uint64_t *pkts,
2253                      uint64_t *bytes)
2254 {
2255         return mlx5_devx_cmd_flow_counter_query(cnt->dcs, 0, 0, pkts, bytes,
2256                                                 0, NULL, NULL, 0);
2257 }
2258
2259 /**
2260  * Get a pool by a counter.
2261  *
2262  * @param[in] cnt
2263  *   Pointer to the counter.
2264  *
2265  * @return
2266  *   The counter pool.
2267  */
2268 static struct mlx5_flow_counter_pool *
2269 flow_dv_counter_pool_get(struct mlx5_flow_counter *cnt)
2270 {
2271         if (!cnt->batch) {
2272                 cnt -= cnt->dcs->id % MLX5_COUNTERS_PER_POOL;
2273                 return (struct mlx5_flow_counter_pool *)cnt - 1;
2274         }
2275         return cnt->pool;
2276 }
2277
2278 /**
2279  * Get a pool by devx counter ID.
2280  *
2281  * @param[in] cont
2282  *   Pointer to the counter container.
2283  * @param[in] id
2284  *   The counter devx ID.
2285  *
2286  * @return
2287  *   The counter pool pointer if exists, NULL otherwise,
2288  */
2289 static struct mlx5_flow_counter_pool *
2290 flow_dv_find_pool_by_id(struct mlx5_pools_container *cont, int id)
2291 {
2292         struct mlx5_flow_counter_pool *pool;
2293
2294         TAILQ_FOREACH(pool, &cont->pool_list, next) {
2295                 int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
2296                                 MLX5_COUNTERS_PER_POOL;
2297
2298                 if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
2299                         return pool;
2300         };
2301         return NULL;
2302 }
2303
2304 /**
2305  * Allocate a new memory for the counter values wrapped by all the needed
2306  * management.
2307  *
2308  * @param[in] dev
2309  *   Pointer to the Ethernet device structure.
2310  * @param[in] raws_n
2311  *   The raw memory areas - each one for MLX5_COUNTERS_PER_POOL counters.
2312  *
2313  * @return
2314  *   The new memory management pointer on success, otherwise NULL and rte_errno
2315  *   is set.
2316  */
2317 static struct mlx5_counter_stats_mem_mng *
2318 flow_dv_create_counter_stat_mem_mng(struct rte_eth_dev *dev, int raws_n)
2319 {
2320         struct mlx5_ibv_shared *sh = ((struct mlx5_priv *)
2321                                         (dev->data->dev_private))->sh;
2322         struct mlx5_devx_mkey_attr mkey_attr;
2323         struct mlx5_counter_stats_mem_mng *mem_mng;
2324         volatile struct flow_counter_stats *raw_data;
2325         int size = (sizeof(struct flow_counter_stats) *
2326                         MLX5_COUNTERS_PER_POOL +
2327                         sizeof(struct mlx5_counter_stats_raw)) * raws_n +
2328                         sizeof(struct mlx5_counter_stats_mem_mng);
2329         uint8_t *mem = rte_calloc(__func__, 1, size, sysconf(_SC_PAGESIZE));
2330         int i;
2331
2332         if (!mem) {
2333                 rte_errno = ENOMEM;
2334                 return NULL;
2335         }
2336         mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1;
2337         size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n;
2338         mem_mng->umem = mlx5_glue->devx_umem_reg(sh->ctx, mem, size,
2339                                                  IBV_ACCESS_LOCAL_WRITE);
2340         if (!mem_mng->umem) {
2341                 rte_errno = errno;
2342                 rte_free(mem);
2343                 return NULL;
2344         }
2345         mkey_attr.addr = (uintptr_t)mem;
2346         mkey_attr.size = size;
2347         mkey_attr.umem_id = mem_mng->umem->umem_id;
2348         mkey_attr.pd = sh->pdn;
2349         mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);
2350         if (!mem_mng->dm) {
2351                 mlx5_glue->devx_umem_dereg(mem_mng->umem);
2352                 rte_errno = errno;
2353                 rte_free(mem);
2354                 return NULL;
2355         }
2356         mem_mng->raws = (struct mlx5_counter_stats_raw *)(mem + size);
2357         raw_data = (volatile struct flow_counter_stats *)mem;
2358         for (i = 0; i < raws_n; ++i) {
2359                 mem_mng->raws[i].mem_mng = mem_mng;
2360                 mem_mng->raws[i].data = raw_data + i * MLX5_COUNTERS_PER_POOL;
2361         }
2362         LIST_INSERT_HEAD(&sh->cmng.mem_mngs, mem_mng, next);
2363         return mem_mng;
2364 }
2365
2366 /**
2367  * Resize a counter container.
2368  *
2369  * @param[in] dev
2370  *   Pointer to the Ethernet device structure.
2371  * @param[in] batch
2372  *   Whether the pool is for counter that was allocated by batch command.
2373  *
2374  * @return
2375  *   The new container pointer on success, otherwise NULL and rte_errno is set.
2376  */
2377 static struct mlx5_pools_container *
2378 flow_dv_container_resize(struct rte_eth_dev *dev, uint32_t batch)
2379 {
2380         struct mlx5_priv *priv = dev->data->dev_private;
2381         struct mlx5_pools_container *cont =
2382                         MLX5_CNT_CONTAINER(priv->sh, batch, 0);
2383         struct mlx5_pools_container *new_cont =
2384                         MLX5_CNT_CONTAINER_UNUSED(priv->sh, batch, 0);
2385         struct mlx5_counter_stats_mem_mng *mem_mng;
2386         uint32_t resize = cont->n + MLX5_CNT_CONTAINER_RESIZE;
2387         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
2388         int i;
2389
2390         if (cont != MLX5_CNT_CONTAINER(priv->sh, batch, 1)) {
2391                 /* The last resize still hasn't detected by the host thread. */
2392                 rte_errno = EAGAIN;
2393                 return NULL;
2394         }
2395         new_cont->pools = rte_calloc(__func__, 1, mem_size, 0);
2396         if (!new_cont->pools) {
2397                 rte_errno = ENOMEM;
2398                 return NULL;
2399         }
2400         if (cont->n)
2401                 memcpy(new_cont->pools, cont->pools, cont->n *
2402                        sizeof(struct mlx5_flow_counter_pool *));
2403         mem_mng = flow_dv_create_counter_stat_mem_mng(dev,
2404                 MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES);
2405         if (!mem_mng) {
2406                 rte_free(new_cont->pools);
2407                 return NULL;
2408         }
2409         for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i)
2410                 LIST_INSERT_HEAD(&priv->sh->cmng.free_stat_raws,
2411                                  mem_mng->raws + MLX5_CNT_CONTAINER_RESIZE +
2412                                  i, next);
2413         new_cont->n = resize;
2414         rte_atomic16_set(&new_cont->n_valid, rte_atomic16_read(&cont->n_valid));
2415         TAILQ_INIT(&new_cont->pool_list);
2416         TAILQ_CONCAT(&new_cont->pool_list, &cont->pool_list, next);
2417         new_cont->init_mem_mng = mem_mng;
2418         rte_cio_wmb();
2419          /* Flip the master container. */
2420         priv->sh->cmng.mhi[batch] ^= (uint8_t)1;
2421         return new_cont;
2422 }
2423
2424 /**
2425  * Query a devx flow counter.
2426  *
2427  * @param[in] dev
2428  *   Pointer to the Ethernet device structure.
2429  * @param[in] cnt
2430  *   Pointer to the flow counter.
2431  * @param[out] pkts
2432  *   The statistics value of packets.
2433  * @param[out] bytes
2434  *   The statistics value of bytes.
2435  *
2436  * @return
2437  *   0 on success, otherwise a negative errno value and rte_errno is set.
2438  */
2439 static inline int
2440 _flow_dv_query_count(struct rte_eth_dev *dev,
2441                      struct mlx5_flow_counter *cnt, uint64_t *pkts,
2442                      uint64_t *bytes)
2443 {
2444         struct mlx5_priv *priv = dev->data->dev_private;
2445         struct mlx5_flow_counter_pool *pool =
2446                         flow_dv_counter_pool_get(cnt);
2447         int offset = cnt - &pool->counters_raw[0];
2448
2449         if (priv->counter_fallback)
2450                 return _flow_dv_query_count_fallback(dev, cnt, pkts, bytes);
2451
2452         rte_spinlock_lock(&pool->sl);
2453         /*
2454          * The single counters allocation may allocate smaller ID than the
2455          * current allocated in parallel to the host reading.
2456          * In this case the new counter values must be reported as 0.
2457          */
2458         if (unlikely(!cnt->batch && cnt->dcs->id < pool->raw->min_dcs_id)) {
2459                 *pkts = 0;
2460                 *bytes = 0;
2461         } else {
2462                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
2463                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
2464         }
2465         rte_spinlock_unlock(&pool->sl);
2466         return 0;
2467 }
2468
2469 /**
2470  * Create and initialize a new counter pool.
2471  *
2472  * @param[in] dev
2473  *   Pointer to the Ethernet device structure.
2474  * @param[out] dcs
2475  *   The devX counter handle.
2476  * @param[in] batch
2477  *   Whether the pool is for counter that was allocated by batch command.
2478  *
2479  * @return
2480  *   A new pool pointer on success, NULL otherwise and rte_errno is set.
2481  */
2482 static struct mlx5_flow_counter_pool *
2483 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
2484                     uint32_t batch)
2485 {
2486         struct mlx5_priv *priv = dev->data->dev_private;
2487         struct mlx5_flow_counter_pool *pool;
2488         struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
2489                                                                0);
2490         int16_t n_valid = rte_atomic16_read(&cont->n_valid);
2491         uint32_t size;
2492
2493         if (cont->n == n_valid) {
2494                 cont = flow_dv_container_resize(dev, batch);
2495                 if (!cont)
2496                         return NULL;
2497         }
2498         size = sizeof(*pool) + MLX5_COUNTERS_PER_POOL *
2499                         sizeof(struct mlx5_flow_counter);
2500         pool = rte_calloc(__func__, 1, size, 0);
2501         if (!pool) {
2502                 rte_errno = ENOMEM;
2503                 return NULL;
2504         }
2505         pool->min_dcs = dcs;
2506         pool->raw = cont->init_mem_mng->raws + n_valid %
2507                                                      MLX5_CNT_CONTAINER_RESIZE;
2508         pool->raw_hw = NULL;
2509         rte_spinlock_init(&pool->sl);
2510         /*
2511          * The generation of the new allocated counters in this pool is 0, 2 in
2512          * the pool generation makes all the counters valid for allocation.
2513          */
2514         rte_atomic64_set(&pool->query_gen, 0x2);
2515         TAILQ_INIT(&pool->counters);
2516         TAILQ_INSERT_TAIL(&cont->pool_list, pool, next);
2517         cont->pools[n_valid] = pool;
2518         /* Pool initialization must be updated before host thread access. */
2519         rte_cio_wmb();
2520         rte_atomic16_add(&cont->n_valid, 1);
2521         return pool;
2522 }
2523
2524 /**
2525  * Prepare a new counter and/or a new counter pool.
2526  *
2527  * @param[in] dev
2528  *   Pointer to the Ethernet device structure.
2529  * @param[out] cnt_free
2530  *   Where to put the pointer of a new counter.
2531  * @param[in] batch
2532  *   Whether the pool is for counter that was allocated by batch command.
2533  *
2534  * @return
2535  *   The free counter pool pointer and @p cnt_free is set on success,
2536  *   NULL otherwise and rte_errno is set.
2537  */
2538 static struct mlx5_flow_counter_pool *
2539 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
2540                              struct mlx5_flow_counter **cnt_free,
2541                              uint32_t batch)
2542 {
2543         struct mlx5_priv *priv = dev->data->dev_private;
2544         struct mlx5_flow_counter_pool *pool;
2545         struct mlx5_devx_obj *dcs = NULL;
2546         struct mlx5_flow_counter *cnt;
2547         uint32_t i;
2548
2549         if (!batch) {
2550                 /* bulk_bitmap must be 0 for single counter allocation. */
2551                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
2552                 if (!dcs)
2553                         return NULL;
2554                 pool = flow_dv_find_pool_by_id
2555                         (MLX5_CNT_CONTAINER(priv->sh, batch, 0), dcs->id);
2556                 if (!pool) {
2557                         pool = flow_dv_pool_create(dev, dcs, batch);
2558                         if (!pool) {
2559                                 mlx5_devx_cmd_destroy(dcs);
2560                                 return NULL;
2561                         }
2562                 } else if (dcs->id < pool->min_dcs->id) {
2563                         rte_atomic64_set(&pool->a64_dcs,
2564                                          (int64_t)(uintptr_t)dcs);
2565                 }
2566                 cnt = &pool->counters_raw[dcs->id % MLX5_COUNTERS_PER_POOL];
2567                 TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
2568                 cnt->dcs = dcs;
2569                 *cnt_free = cnt;
2570                 return pool;
2571         }
2572         /* bulk_bitmap is in 128 counters units. */
2573         if (priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4)
2574                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
2575         if (!dcs) {
2576                 rte_errno = ENODATA;
2577                 return NULL;
2578         }
2579         pool = flow_dv_pool_create(dev, dcs, batch);
2580         if (!pool) {
2581                 mlx5_devx_cmd_destroy(dcs);
2582                 return NULL;
2583         }
2584         for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
2585                 cnt = &pool->counters_raw[i];
2586                 cnt->pool = pool;
2587                 TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
2588         }
2589         *cnt_free = &pool->counters_raw[0];
2590         return pool;
2591 }
2592
2593 /**
2594  * Search for existed shared counter.
2595  *
2596  * @param[in] cont
2597  *   Pointer to the relevant counter pool container.
2598  * @param[in] id
2599  *   The shared counter ID to search.
2600  *
2601  * @return
2602  *   NULL if not existed, otherwise pointer to the shared counter.
2603  */
2604 static struct mlx5_flow_counter *
2605 flow_dv_counter_shared_search(struct mlx5_pools_container *cont,
2606                               uint32_t id)
2607 {
2608         static struct mlx5_flow_counter *cnt;
2609         struct mlx5_flow_counter_pool *pool;
2610         int i;
2611
2612         TAILQ_FOREACH(pool, &cont->pool_list, next) {
2613                 for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
2614                         cnt = &pool->counters_raw[i];
2615                         if (cnt->ref_cnt && cnt->shared && cnt->id == id)
2616                                 return cnt;
2617                 }
2618         }
2619         return NULL;
2620 }
2621
2622 /**
2623  * Allocate a flow counter.
2624  *
2625  * @param[in] dev
2626  *   Pointer to the Ethernet device structure.
2627  * @param[in] shared
2628  *   Indicate if this counter is shared with other flows.
2629  * @param[in] id
2630  *   Counter identifier.
2631  * @param[in] group
2632  *   Counter flow group.
2633  *
2634  * @return
2635  *   pointer to flow counter on success, NULL otherwise and rte_errno is set.
2636  */
2637 static struct mlx5_flow_counter *
2638 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id,
2639                       uint16_t group)
2640 {
2641         struct mlx5_priv *priv = dev->data->dev_private;
2642         struct mlx5_flow_counter_pool *pool = NULL;
2643         struct mlx5_flow_counter *cnt_free = NULL;
2644         /*
2645          * Currently group 0 flow counter cannot be assigned to a flow if it is
2646          * not the first one in the batch counter allocation, so it is better
2647          * to allocate counters one by one for these flows in a separate
2648          * container.
2649          * A counter can be shared between different groups so need to take
2650          * shared counters from the single container.
2651          */
2652         uint32_t batch = (group && !shared) ? 1 : 0;
2653         struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
2654                                                                0);
2655
2656         if (priv->counter_fallback)
2657                 return flow_dv_counter_alloc_fallback(dev, shared, id);
2658         if (!priv->config.devx) {
2659                 rte_errno = ENOTSUP;
2660                 return NULL;
2661         }
2662         if (shared) {
2663                 cnt_free = flow_dv_counter_shared_search(cont, id);
2664                 if (cnt_free) {
2665                         if (cnt_free->ref_cnt + 1 == 0) {
2666                                 rte_errno = E2BIG;
2667                                 return NULL;
2668                         }
2669                         cnt_free->ref_cnt++;
2670                         return cnt_free;
2671                 }
2672         }
2673         /* Pools which has a free counters are in the start. */
2674         TAILQ_FOREACH(pool, &cont->pool_list, next) {
2675                 /*
2676                  * The free counter reset values must be updated between the
2677                  * counter release to the counter allocation, so, at least one
2678                  * query must be done in this time. ensure it by saving the
2679                  * query generation in the release time.
2680                  * The free list is sorted according to the generation - so if
2681                  * the first one is not updated, all the others are not
2682                  * updated too.
2683                  */
2684                 cnt_free = TAILQ_FIRST(&pool->counters);
2685                 if (cnt_free && cnt_free->query_gen + 1 <
2686                     rte_atomic64_read(&pool->query_gen))
2687                         break;
2688                 cnt_free = NULL;
2689         }
2690         if (!cnt_free) {
2691                 pool = flow_dv_counter_pool_prepare(dev, &cnt_free, batch);
2692                 if (!pool)
2693                         return NULL;
2694         }
2695         cnt_free->batch = batch;
2696         /* Create a DV counter action only in the first time usage. */
2697         if (!cnt_free->action) {
2698                 uint16_t offset;
2699                 struct mlx5_devx_obj *dcs;
2700
2701                 if (batch) {
2702                         offset = cnt_free - &pool->counters_raw[0];
2703                         dcs = pool->min_dcs;
2704                 } else {
2705                         offset = 0;
2706                         dcs = cnt_free->dcs;
2707                 }
2708                 cnt_free->action = mlx5_glue->dv_create_flow_action_counter
2709                                         (dcs->obj, offset);
2710                 if (!cnt_free->action) {
2711                         rte_errno = errno;
2712                         return NULL;
2713                 }
2714         }
2715         /* Update the counter reset values. */
2716         if (_flow_dv_query_count(dev, cnt_free, &cnt_free->hits,
2717                                  &cnt_free->bytes))
2718                 return NULL;
2719         cnt_free->shared = shared;
2720         cnt_free->ref_cnt = 1;
2721         cnt_free->id = id;
2722         if (!priv->sh->cmng.query_thread_on)
2723                 /* Start the asynchronous batch query by the host thread. */
2724                 mlx5_set_query_alarm(priv->sh);
2725         TAILQ_REMOVE(&pool->counters, cnt_free, next);
2726         if (TAILQ_EMPTY(&pool->counters)) {
2727                 /* Move the pool to the end of the container pool list. */
2728                 TAILQ_REMOVE(&cont->pool_list, pool, next);
2729                 TAILQ_INSERT_TAIL(&cont->pool_list, pool, next);
2730         }
2731         return cnt_free;
2732 }
2733
2734 /**
2735  * Release a flow counter.
2736  *
2737  * @param[in] dev
2738  *   Pointer to the Ethernet device structure.
2739  * @param[in] counter
2740  *   Pointer to the counter handler.
2741  */
2742 static void
2743 flow_dv_counter_release(struct rte_eth_dev *dev,
2744                         struct mlx5_flow_counter *counter)
2745 {
2746         struct mlx5_priv *priv = dev->data->dev_private;
2747
2748         if (!counter)
2749                 return;
2750         if (priv->counter_fallback) {
2751                 flow_dv_counter_release_fallback(dev, counter);
2752                 return;
2753         }
2754         if (--counter->ref_cnt == 0) {
2755                 struct mlx5_flow_counter_pool *pool =
2756                                 flow_dv_counter_pool_get(counter);
2757
2758                 /* Put the counter in the end - the last updated one. */
2759                 TAILQ_INSERT_TAIL(&pool->counters, counter, next);
2760                 counter->query_gen = rte_atomic64_read(&pool->query_gen);
2761         }
2762 }
2763
2764 /**
2765  * Verify the @p attributes will be correctly understood by the NIC and store
2766  * them in the @p flow if everything is correct.
2767  *
2768  * @param[in] dev
2769  *   Pointer to dev struct.
2770  * @param[in] attributes
2771  *   Pointer to flow attributes
2772  * @param[out] error
2773  *   Pointer to error structure.
2774  *
2775  * @return
2776  *   0 on success, a negative errno value otherwise and rte_errno is set.
2777  */
2778 static int
2779 flow_dv_validate_attributes(struct rte_eth_dev *dev,
2780                             const struct rte_flow_attr *attributes,
2781                             struct rte_flow_error *error)
2782 {
2783         struct mlx5_priv *priv = dev->data->dev_private;
2784         uint32_t priority_max = priv->config.flow_prio - 1;
2785
2786 #ifndef HAVE_MLX5DV_DR
2787         if (attributes->group)
2788                 return rte_flow_error_set(error, ENOTSUP,
2789                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
2790                                           NULL,
2791                                           "groups is not supported");
2792 #endif
2793         if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
2794             attributes->priority >= priority_max)
2795                 return rte_flow_error_set(error, ENOTSUP,
2796                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
2797                                           NULL,
2798                                           "priority out of range");
2799         if (attributes->transfer) {
2800                 if (!priv->config.dv_esw_en)
2801                         return rte_flow_error_set
2802                                 (error, ENOTSUP,
2803                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2804                                  "E-Switch dr is not supported");
2805                 if (!(priv->representor || priv->master))
2806                         return rte_flow_error_set
2807                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2808                                  NULL, "E-Switch configurationd can only be"
2809                                  " done by a master or a representor device");
2810                 if (attributes->egress)
2811                         return rte_flow_error_set
2812                                 (error, ENOTSUP,
2813                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
2814                                  "egress is not supported");
2815                 if (attributes->group >= MLX5_MAX_TABLES_FDB)
2816                         return rte_flow_error_set
2817                                 (error, EINVAL,
2818                                  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
2819                                  NULL, "group must be smaller than "
2820                                  RTE_STR(MLX5_MAX_TABLES_FDB));
2821         }
2822         if (!(attributes->egress ^ attributes->ingress))
2823                 return rte_flow_error_set(error, ENOTSUP,
2824                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
2825                                           "must specify exactly one of "
2826                                           "ingress or egress");
2827         return 0;
2828 }
2829
2830 /**
2831  * Internal validation function. For validating both actions and items.
2832  *
2833  * @param[in] dev
2834  *   Pointer to the rte_eth_dev structure.
2835  * @param[in] attr
2836  *   Pointer to the flow attributes.
2837  * @param[in] items
2838  *   Pointer to the list of items.
2839  * @param[in] actions
2840  *   Pointer to the list of actions.
2841  * @param[out] error
2842  *   Pointer to the error structure.
2843  *
2844  * @return
2845  *   0 on success, a negative errno value otherwise and rte_errno is set.
2846  */
2847 static int
2848 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
2849                  const struct rte_flow_item items[],
2850                  const struct rte_flow_action actions[],
2851                  struct rte_flow_error *error)
2852 {
2853         int ret;
2854         uint64_t action_flags = 0;
2855         uint64_t item_flags = 0;
2856         uint64_t last_item = 0;
2857         uint8_t next_protocol = 0xff;
2858         int actions_n = 0;
2859         const struct rte_flow_item *gre_item = NULL;
2860         struct rte_flow_item_tcp nic_tcp_mask = {
2861                 .hdr = {
2862                         .tcp_flags = 0xFF,
2863                         .src_port = RTE_BE16(UINT16_MAX),
2864                         .dst_port = RTE_BE16(UINT16_MAX),
2865                 }
2866         };
2867
2868         if (items == NULL)
2869                 return -1;
2870         ret = flow_dv_validate_attributes(dev, attr, error);
2871         if (ret < 0)
2872                 return ret;
2873         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2874                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2875                 switch (items->type) {
2876                 case RTE_FLOW_ITEM_TYPE_VOID:
2877                         break;
2878                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
2879                         ret = flow_dv_validate_item_port_id
2880                                         (dev, items, attr, item_flags, error);
2881                         if (ret < 0)
2882                                 return ret;
2883                         last_item = MLX5_FLOW_ITEM_PORT_ID;
2884                         break;
2885                 case RTE_FLOW_ITEM_TYPE_ETH:
2886                         ret = mlx5_flow_validate_item_eth(items, item_flags,
2887                                                           error);
2888                         if (ret < 0)
2889                                 return ret;
2890                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
2891                                              MLX5_FLOW_LAYER_OUTER_L2;
2892                         break;
2893                 case RTE_FLOW_ITEM_TYPE_VLAN:
2894                         ret = mlx5_flow_validate_item_vlan(items, item_flags,
2895                                                            dev, error);
2896                         if (ret < 0)
2897                                 return ret;
2898                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
2899                                              MLX5_FLOW_LAYER_OUTER_VLAN;
2900                         break;
2901                 case RTE_FLOW_ITEM_TYPE_IPV4:
2902                         ret = mlx5_flow_validate_item_ipv4(items, item_flags,
2903                                                            NULL, error);
2904                         if (ret < 0)
2905                                 return ret;
2906                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
2907                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
2908                         if (items->mask != NULL &&
2909                             ((const struct rte_flow_item_ipv4 *)
2910                              items->mask)->hdr.next_proto_id) {
2911                                 next_protocol =
2912                                         ((const struct rte_flow_item_ipv4 *)
2913                                          (items->spec))->hdr.next_proto_id;
2914                                 next_protocol &=
2915                                         ((const struct rte_flow_item_ipv4 *)
2916                                          (items->mask))->hdr.next_proto_id;
2917                         } else {
2918                                 /* Reset for inner layer. */
2919                                 next_protocol = 0xff;
2920                         }
2921                         mlx5_flow_tunnel_ip_check(items, &last_item);
2922                         break;
2923                 case RTE_FLOW_ITEM_TYPE_IPV6:
2924                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
2925                                                            NULL, error);
2926                         if (ret < 0)
2927                                 return ret;
2928                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
2929                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
2930                         if (items->mask != NULL &&
2931                             ((const struct rte_flow_item_ipv6 *)
2932                              items->mask)->hdr.proto) {
2933                                 next_protocol =
2934                                         ((const struct rte_flow_item_ipv6 *)
2935                                          items->spec)->hdr.proto;
2936                                 next_protocol &=
2937                                         ((const struct rte_flow_item_ipv6 *)
2938                                          items->mask)->hdr.proto;
2939                         } else {
2940                                 /* Reset for inner layer. */
2941                                 next_protocol = 0xff;
2942                         }
2943                         mlx5_flow_tunnel_ip_check(items, &last_item);
2944                         break;
2945                 case RTE_FLOW_ITEM_TYPE_TCP:
2946                         ret = mlx5_flow_validate_item_tcp
2947                                                 (items, item_flags,
2948                                                  next_protocol,
2949                                                  &nic_tcp_mask,
2950                                                  error);
2951                         if (ret < 0)
2952                                 return ret;
2953                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
2954                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
2955                         break;
2956                 case RTE_FLOW_ITEM_TYPE_UDP:
2957                         ret = mlx5_flow_validate_item_udp(items, item_flags,
2958                                                           next_protocol,
2959                                                           error);
2960                         if (ret < 0)
2961                                 return ret;
2962                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
2963                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
2964                         break;
2965                 case RTE_FLOW_ITEM_TYPE_GRE:
2966                         ret = mlx5_flow_validate_item_gre(items, item_flags,
2967                                                           next_protocol, error);
2968                         if (ret < 0)
2969                                 return ret;
2970                         gre_item = items;
2971                         last_item = MLX5_FLOW_LAYER_GRE;
2972                         break;
2973                 case RTE_FLOW_ITEM_TYPE_NVGRE:
2974                         ret = mlx5_flow_validate_item_nvgre(items, item_flags,
2975                                                             next_protocol,
2976                                                             error);
2977                         if (ret < 0)
2978                                 return ret;
2979                         last_item = MLX5_FLOW_LAYER_NVGRE;
2980                         break;
2981                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
2982                         ret = mlx5_flow_validate_item_gre_key
2983                                 (items, item_flags, gre_item, error);
2984                         if (ret < 0)
2985                                 return ret;
2986                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
2987                         break;
2988                 case RTE_FLOW_ITEM_TYPE_VXLAN:
2989                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
2990                                                             error);
2991                         if (ret < 0)
2992                                 return ret;
2993                         last_item = MLX5_FLOW_LAYER_VXLAN;
2994                         break;
2995                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
2996                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
2997                                                                 item_flags, dev,
2998                                                                 error);
2999                         if (ret < 0)
3000                                 return ret;
3001                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
3002                         break;
3003                 case RTE_FLOW_ITEM_TYPE_MPLS:
3004                         ret = mlx5_flow_validate_item_mpls(dev, items,
3005                                                            item_flags,
3006                                                            last_item, error);
3007                         if (ret < 0)
3008                                 return ret;
3009                         last_item = MLX5_FLOW_LAYER_MPLS;
3010                         break;
3011                 case RTE_FLOW_ITEM_TYPE_META:
3012                         ret = flow_dv_validate_item_meta(dev, items, attr,
3013                                                          error);
3014                         if (ret < 0)
3015                                 return ret;
3016                         last_item = MLX5_FLOW_ITEM_METADATA;
3017                         break;
3018                 case RTE_FLOW_ITEM_TYPE_ICMP:
3019                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
3020                                                            next_protocol,
3021                                                            error);
3022                         if (ret < 0)
3023                                 return ret;
3024                         last_item = MLX5_FLOW_LAYER_ICMP;
3025                         break;
3026                 case RTE_FLOW_ITEM_TYPE_ICMP6:
3027                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
3028                                                             next_protocol,
3029                                                             error);
3030                         if (ret < 0)
3031                                 return ret;
3032                         last_item = MLX5_FLOW_LAYER_ICMP6;
3033                         break;
3034                 default:
3035                         return rte_flow_error_set(error, ENOTSUP,
3036                                                   RTE_FLOW_ERROR_TYPE_ITEM,
3037                                                   NULL, "item not supported");
3038                 }
3039                 item_flags |= last_item;
3040         }
3041         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3042                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
3043                         return rte_flow_error_set(error, ENOTSUP,
3044                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3045                                                   actions, "too many actions");
3046                 switch (actions->type) {
3047                 case RTE_FLOW_ACTION_TYPE_VOID:
3048                         break;
3049                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
3050                         ret = flow_dv_validate_action_port_id(dev,
3051                                                               action_flags,
3052                                                               actions,
3053                                                               attr,
3054                                                               error);
3055                         if (ret)
3056                                 return ret;
3057                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
3058                         ++actions_n;
3059                         break;
3060                 case RTE_FLOW_ACTION_TYPE_FLAG:
3061                         ret = mlx5_flow_validate_action_flag(action_flags,
3062                                                              attr, error);
3063                         if (ret < 0)
3064                                 return ret;
3065                         action_flags |= MLX5_FLOW_ACTION_FLAG;
3066                         ++actions_n;
3067                         break;
3068                 case RTE_FLOW_ACTION_TYPE_MARK:
3069                         ret = mlx5_flow_validate_action_mark(actions,
3070                                                              action_flags,
3071                                                              attr, error);
3072                         if (ret < 0)
3073                                 return ret;
3074                         action_flags |= MLX5_FLOW_ACTION_MARK;
3075                         ++actions_n;
3076                         break;
3077                 case RTE_FLOW_ACTION_TYPE_DROP:
3078                         ret = mlx5_flow_validate_action_drop(action_flags,
3079                                                              attr, error);
3080                         if (ret < 0)
3081                                 return ret;
3082                         action_flags |= MLX5_FLOW_ACTION_DROP;
3083                         ++actions_n;
3084                         break;
3085                 case RTE_FLOW_ACTION_TYPE_QUEUE:
3086                         ret = mlx5_flow_validate_action_queue(actions,
3087                                                               action_flags, dev,
3088                                                               attr, error);
3089                         if (ret < 0)
3090                                 return ret;
3091                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
3092                         ++actions_n;
3093                         break;
3094                 case RTE_FLOW_ACTION_TYPE_RSS:
3095                         ret = mlx5_flow_validate_action_rss(actions,
3096                                                             action_flags, dev,
3097                                                             attr, item_flags,
3098                                                             error);
3099                         if (ret < 0)
3100                                 return ret;
3101                         action_flags |= MLX5_FLOW_ACTION_RSS;
3102                         ++actions_n;
3103                         break;
3104                 case RTE_FLOW_ACTION_TYPE_COUNT:
3105                         ret = flow_dv_validate_action_count(dev, error);
3106                         if (ret < 0)
3107                                 return ret;
3108                         action_flags |= MLX5_FLOW_ACTION_COUNT;
3109                         ++actions_n;
3110                         break;
3111                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3112                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3113                         ret = flow_dv_validate_action_l2_encap(action_flags,
3114                                                                actions, attr,
3115                                                                error);
3116                         if (ret < 0)
3117                                 return ret;
3118                         action_flags |= actions->type ==
3119                                         RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
3120                                         MLX5_FLOW_ACTION_VXLAN_ENCAP :
3121                                         MLX5_FLOW_ACTION_NVGRE_ENCAP;
3122                         ++actions_n;
3123                         break;
3124                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
3125                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
3126                         ret = flow_dv_validate_action_l2_decap(action_flags,
3127                                                                attr, error);
3128                         if (ret < 0)
3129                                 return ret;
3130                         action_flags |= actions->type ==
3131                                         RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
3132                                         MLX5_FLOW_ACTION_VXLAN_DECAP :
3133                                         MLX5_FLOW_ACTION_NVGRE_DECAP;
3134                         ++actions_n;
3135                         break;
3136                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3137                         ret = flow_dv_validate_action_raw_encap(action_flags,
3138                                                                 actions, attr,
3139                                                                 error);
3140                         if (ret < 0)
3141                                 return ret;
3142                         action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
3143                         ++actions_n;
3144                         break;
3145                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
3146                         ret = flow_dv_validate_action_raw_decap(action_flags,
3147                                                                 actions, attr,
3148                                                                 error);
3149                         if (ret < 0)
3150                                 return ret;
3151                         action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
3152                         ++actions_n;
3153                         break;
3154                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
3155                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
3156                         ret = flow_dv_validate_action_modify_mac(action_flags,
3157                                                                  actions,
3158                                                                  item_flags,
3159                                                                  error);
3160                         if (ret < 0)
3161                                 return ret;
3162                         /* Count all modify-header actions as one action. */
3163                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3164                                 ++actions_n;
3165                         action_flags |= actions->type ==
3166                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
3167                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
3168                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
3169                         break;
3170
3171                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
3172                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
3173                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
3174                                                                   actions,
3175                                                                   item_flags,
3176                                                                   error);
3177                         if (ret < 0)
3178                                 return ret;
3179                         /* Count all modify-header actions as one action. */
3180                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3181                                 ++actions_n;
3182                         action_flags |= actions->type ==
3183                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
3184                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
3185                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
3186                         break;
3187                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
3188                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
3189                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
3190                                                                   actions,
3191                                                                   item_flags,
3192                                                                   error);
3193                         if (ret < 0)
3194                                 return ret;
3195                         /* Count all modify-header actions as one action. */
3196                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3197                                 ++actions_n;
3198                         action_flags |= actions->type ==
3199                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
3200                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
3201                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
3202                         break;
3203                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
3204                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
3205                         ret = flow_dv_validate_action_modify_tp(action_flags,
3206                                                                 actions,
3207                                                                 item_flags,
3208                                                                 error);
3209                         if (ret < 0)
3210                                 return ret;
3211                         /* Count all modify-header actions as one action. */
3212                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3213                                 ++actions_n;
3214                         action_flags |= actions->type ==
3215                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
3216                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
3217                                                 MLX5_FLOW_ACTION_SET_TP_DST;
3218                         break;
3219                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
3220                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
3221                         ret = flow_dv_validate_action_modify_ttl(action_flags,
3222                                                                  actions,
3223                                                                  item_flags,
3224                                                                  error);
3225                         if (ret < 0)
3226                                 return ret;
3227                         /* Count all modify-header actions as one action. */
3228                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3229                                 ++actions_n;
3230                         action_flags |= actions->type ==
3231                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
3232                                                 MLX5_FLOW_ACTION_SET_TTL :
3233                                                 MLX5_FLOW_ACTION_DEC_TTL;
3234                         break;
3235                 case RTE_FLOW_ACTION_TYPE_JUMP:
3236                         ret = flow_dv_validate_action_jump(actions,
3237                                                            attr->group, error);
3238                         if (ret)
3239                                 return ret;
3240                         ++actions_n;
3241                         action_flags |= MLX5_FLOW_ACTION_JUMP;
3242                         break;
3243                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
3244                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
3245                         ret = flow_dv_validate_action_modify_tcp_seq
3246                                                                 (action_flags,
3247                                                                  actions,
3248                                                                  item_flags,
3249                                                                  error);
3250                         if (ret < 0)
3251                                 return ret;
3252                         /* Count all modify-header actions as one action. */
3253                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3254                                 ++actions_n;
3255                         action_flags |= actions->type ==
3256                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
3257                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
3258                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
3259                         break;
3260                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
3261                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
3262                         ret = flow_dv_validate_action_modify_tcp_ack
3263                                                                 (action_flags,
3264                                                                  actions,
3265                                                                  item_flags,
3266                                                                  error);
3267                         if (ret < 0)
3268                                 return ret;
3269                         /* Count all modify-header actions as one action. */
3270                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3271                                 ++actions_n;
3272                         action_flags |= actions->type ==
3273                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
3274                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
3275                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
3276                         break;
3277                 default:
3278                         return rte_flow_error_set(error, ENOTSUP,
3279                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3280                                                   actions,
3281                                                   "action not supported");
3282                 }
3283         }
3284         /* Eswitch has few restrictions on using items and actions */
3285         if (attr->transfer) {
3286                 if (action_flags & MLX5_FLOW_ACTION_FLAG)
3287                         return rte_flow_error_set(error, ENOTSUP,
3288                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3289                                                   NULL,
3290                                                   "unsupported action FLAG");
3291                 if (action_flags & MLX5_FLOW_ACTION_MARK)
3292                         return rte_flow_error_set(error, ENOTSUP,
3293                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3294                                                   NULL,
3295                                                   "unsupported action MARK");
3296                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
3297                         return rte_flow_error_set(error, ENOTSUP,
3298                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3299                                                   NULL,
3300                                                   "unsupported action QUEUE");
3301                 if (action_flags & MLX5_FLOW_ACTION_RSS)
3302                         return rte_flow_error_set(error, ENOTSUP,
3303                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3304                                                   NULL,
3305                                                   "unsupported action RSS");
3306                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
3307                         return rte_flow_error_set(error, EINVAL,
3308                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3309                                                   actions,
3310                                                   "no fate action is found");
3311         } else {
3312                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
3313                         return rte_flow_error_set(error, EINVAL,
3314                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3315                                                   actions,
3316                                                   "no fate action is found");
3317         }
3318         return 0;
3319 }
3320
3321 /**
3322  * Internal preparation function. Allocates the DV flow size,
3323  * this size is constant.
3324  *
3325  * @param[in] attr
3326  *   Pointer to the flow attributes.
3327  * @param[in] items
3328  *   Pointer to the list of items.
3329  * @param[in] actions
3330  *   Pointer to the list of actions.
3331  * @param[out] error
3332  *   Pointer to the error structure.
3333  *
3334  * @return
3335  *   Pointer to mlx5_flow object on success,
3336  *   otherwise NULL and rte_errno is set.
3337  */
3338 static struct mlx5_flow *
3339 flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
3340                 const struct rte_flow_item items[] __rte_unused,
3341                 const struct rte_flow_action actions[] __rte_unused,
3342                 struct rte_flow_error *error)
3343 {
3344         uint32_t size = sizeof(struct mlx5_flow);
3345         struct mlx5_flow *flow;
3346
3347         flow = rte_calloc(__func__, 1, size, 0);
3348         if (!flow) {
3349                 rte_flow_error_set(error, ENOMEM,
3350                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3351                                    "not enough memory to create flow");
3352                 return NULL;
3353         }
3354         flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
3355         return flow;
3356 }
3357
3358 #ifndef NDEBUG
3359 /**
3360  * Sanity check for match mask and value. Similar to check_valid_spec() in
3361  * kernel driver. If unmasked bit is present in value, it returns failure.
3362  *
3363  * @param match_mask
3364  *   pointer to match mask buffer.
3365  * @param match_value
3366  *   pointer to match value buffer.
3367  *
3368  * @return
3369  *   0 if valid, -EINVAL otherwise.
3370  */
3371 static int
3372 flow_dv_check_valid_spec(void *match_mask, void *match_value)
3373 {
3374         uint8_t *m = match_mask;
3375         uint8_t *v = match_value;
3376         unsigned int i;
3377
3378         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
3379                 if (v[i] & ~m[i]) {
3380                         DRV_LOG(ERR,
3381                                 "match_value differs from match_criteria"
3382                                 " %p[%u] != %p[%u]",
3383                                 match_value, i, match_mask, i);
3384                         return -EINVAL;
3385                 }
3386         }
3387         return 0;
3388 }
3389 #endif
3390
3391 /**
3392  * Add Ethernet item to matcher and to the value.
3393  *
3394  * @param[in, out] matcher
3395  *   Flow matcher.
3396  * @param[in, out] key
3397  *   Flow matcher value.
3398  * @param[in] item
3399  *   Flow pattern to translate.
3400  * @param[in] inner
3401  *   Item is inner pattern.
3402  */
3403 static void
3404 flow_dv_translate_item_eth(void *matcher, void *key,
3405                            const struct rte_flow_item *item, int inner)
3406 {
3407         const struct rte_flow_item_eth *eth_m = item->mask;
3408         const struct rte_flow_item_eth *eth_v = item->spec;
3409         const struct rte_flow_item_eth nic_mask = {
3410                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
3411                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
3412                 .type = RTE_BE16(0xffff),
3413         };
3414         void *headers_m;
3415         void *headers_v;
3416         char *l24_v;
3417         unsigned int i;
3418
3419         if (!eth_v)
3420                 return;
3421         if (!eth_m)
3422                 eth_m = &nic_mask;
3423         if (inner) {
3424                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3425                                          inner_headers);
3426                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3427         } else {
3428                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3429                                          outer_headers);
3430                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3431         }
3432         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
3433                &eth_m->dst, sizeof(eth_m->dst));
3434         /* The value must be in the range of the mask. */
3435         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
3436         for (i = 0; i < sizeof(eth_m->dst); ++i)
3437                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
3438         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
3439                &eth_m->src, sizeof(eth_m->src));
3440         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
3441         /* The value must be in the range of the mask. */
3442         for (i = 0; i < sizeof(eth_m->dst); ++i)
3443                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
3444         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
3445                  rte_be_to_cpu_16(eth_m->type));
3446         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
3447         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
3448 }
3449
3450 /**
3451  * Add VLAN item to matcher and to the value.
3452  *
3453  * @param[in, out] dev_flow
3454  *   Flow descriptor.
3455  * @param[in, out] matcher
3456  *   Flow matcher.
3457  * @param[in, out] key
3458  *   Flow matcher value.
3459  * @param[in] item
3460  *   Flow pattern to translate.
3461  * @param[in] inner
3462  *   Item is inner pattern.
3463  */
3464 static void
3465 flow_dv_translate_item_vlan(struct mlx5_flow *dev_flow,
3466                             void *matcher, void *key,
3467                             const struct rte_flow_item *item,
3468                             int inner)
3469 {
3470         const struct rte_flow_item_vlan *vlan_m = item->mask;
3471         const struct rte_flow_item_vlan *vlan_v = item->spec;
3472         void *headers_m;
3473         void *headers_v;
3474         uint16_t tci_m;
3475         uint16_t tci_v;
3476
3477         if (!vlan_v)
3478                 return;
3479         if (!vlan_m)
3480                 vlan_m = &rte_flow_item_vlan_mask;
3481         if (inner) {
3482                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3483                                          inner_headers);
3484                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3485         } else {
3486                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3487                                          outer_headers);
3488                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3489                 /*
3490                  * This is workaround, masks are not supported,
3491                  * and pre-validated.
3492                  */
3493                 dev_flow->dv.vf_vlan.tag =
3494                         rte_be_to_cpu_16(vlan_v->tci) & 0x0fff;
3495         }
3496         tci_m = rte_be_to_cpu_16(vlan_m->tci);
3497         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
3498         MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
3499         MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
3500         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
3501         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
3502         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
3503         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
3504         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
3505         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
3506         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
3507                  rte_be_to_cpu_16(vlan_m->inner_type));
3508         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype,
3509                  rte_be_to_cpu_16(vlan_m->inner_type & vlan_v->inner_type));
3510 }
3511
3512 /**
3513  * Add IPV4 item to matcher and to the value.
3514  *
3515  * @param[in, out] matcher
3516  *   Flow matcher.
3517  * @param[in, out] key
3518  *   Flow matcher value.
3519  * @param[in] item
3520  *   Flow pattern to translate.
3521  * @param[in] inner
3522  *   Item is inner pattern.
3523  * @param[in] group
3524  *   The group to insert the rule.
3525  */
3526 static void
3527 flow_dv_translate_item_ipv4(void *matcher, void *key,
3528                             const struct rte_flow_item *item,
3529                             int inner, uint32_t group)
3530 {
3531         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
3532         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
3533         const struct rte_flow_item_ipv4 nic_mask = {
3534                 .hdr = {
3535                         .src_addr = RTE_BE32(0xffffffff),
3536                         .dst_addr = RTE_BE32(0xffffffff),
3537                         .type_of_service = 0xff,
3538                         .next_proto_id = 0xff,
3539                 },
3540         };
3541         void *headers_m;
3542         void *headers_v;
3543         char *l24_m;
3544         char *l24_v;
3545         uint8_t tos;
3546
3547         if (inner) {
3548                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3549                                          inner_headers);
3550                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3551         } else {
3552                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3553                                          outer_headers);
3554                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3555         }
3556         if (group == 0)
3557                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
3558         else
3559                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x4);
3560         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
3561         if (!ipv4_v)
3562                 return;
3563         if (!ipv4_m)
3564                 ipv4_m = &nic_mask;
3565         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
3566                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
3567         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
3568                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
3569         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
3570         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
3571         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
3572                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
3573         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
3574                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
3575         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
3576         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
3577         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
3578         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
3579                  ipv4_m->hdr.type_of_service);
3580         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
3581         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
3582                  ipv4_m->hdr.type_of_service >> 2);
3583         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
3584         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
3585                  ipv4_m->hdr.next_proto_id);
3586         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
3587                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
3588 }
3589
3590 /**
3591  * Add IPV6 item to matcher and to the value.
3592  *
3593  * @param[in, out] matcher
3594  *   Flow matcher.
3595  * @param[in, out] key
3596  *   Flow matcher value.
3597  * @param[in] item
3598  *   Flow pattern to translate.
3599  * @param[in] inner
3600  *   Item is inner pattern.
3601  * @param[in] group
3602  *   The group to insert the rule.
3603  */
3604 static void
3605 flow_dv_translate_item_ipv6(void *matcher, void *key,
3606                             const struct rte_flow_item *item,
3607                             int inner, uint32_t group)
3608 {
3609         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
3610         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
3611         const struct rte_flow_item_ipv6 nic_mask = {
3612                 .hdr = {
3613                         .src_addr =
3614                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
3615                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
3616                         .dst_addr =
3617                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
3618                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
3619                         .vtc_flow = RTE_BE32(0xffffffff),
3620                         .proto = 0xff,
3621                         .hop_limits = 0xff,
3622                 },
3623         };
3624         void *headers_m;
3625         void *headers_v;
3626         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3627         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3628         char *l24_m;
3629         char *l24_v;
3630         uint32_t vtc_m;
3631         uint32_t vtc_v;
3632         int i;
3633         int size;
3634
3635         if (inner) {
3636                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3637                                          inner_headers);
3638                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3639         } else {
3640                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3641                                          outer_headers);
3642                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3643         }
3644         if (group == 0)
3645                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
3646         else
3647                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x6);
3648         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
3649         if (!ipv6_v)
3650                 return;
3651         if (!ipv6_m)
3652                 ipv6_m = &nic_mask;
3653         size = sizeof(ipv6_m->hdr.dst_addr);
3654         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
3655                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
3656         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
3657                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
3658         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
3659         for (i = 0; i < size; ++i)
3660                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
3661         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
3662                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
3663         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
3664                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
3665         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
3666         for (i = 0; i < size; ++i)
3667                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
3668         /* TOS. */
3669         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
3670         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
3671         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
3672         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
3673         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
3674         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
3675         /* Label. */
3676         if (inner) {
3677                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
3678                          vtc_m);
3679                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
3680                          vtc_v);
3681         } else {
3682                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
3683                          vtc_m);
3684                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
3685                          vtc_v);
3686         }
3687         /* Protocol. */
3688         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
3689                  ipv6_m->hdr.proto);
3690         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
3691                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
3692 }
3693
3694 /**
3695  * Add TCP item to matcher and to the value.
3696  *
3697  * @param[in, out] matcher
3698  *   Flow matcher.
3699  * @param[in, out] key
3700  *   Flow matcher value.
3701  * @param[in] item
3702  *   Flow pattern to translate.
3703  * @param[in] inner
3704  *   Item is inner pattern.
3705  */
3706 static void
3707 flow_dv_translate_item_tcp(void *matcher, void *key,
3708                            const struct rte_flow_item *item,
3709                            int inner)
3710 {
3711         const struct rte_flow_item_tcp *tcp_m = item->mask;
3712         const struct rte_flow_item_tcp *tcp_v = item->spec;
3713         void *headers_m;
3714         void *headers_v;
3715
3716         if (inner) {
3717                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3718                                          inner_headers);
3719                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3720         } else {
3721                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3722                                          outer_headers);
3723                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3724         }
3725         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
3726         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
3727         if (!tcp_v)
3728                 return;
3729         if (!tcp_m)
3730                 tcp_m = &rte_flow_item_tcp_mask;
3731         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
3732                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
3733         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
3734                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
3735         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
3736                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
3737         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
3738                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
3739         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
3740                  tcp_m->hdr.tcp_flags);
3741         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
3742                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
3743 }
3744
3745 /**
3746  * Add UDP item to matcher and to the value.
3747  *
3748  * @param[in, out] matcher
3749  *   Flow matcher.
3750  * @param[in, out] key
3751  *   Flow matcher value.
3752  * @param[in] item
3753  *   Flow pattern to translate.
3754  * @param[in] inner
3755  *   Item is inner pattern.
3756  */
3757 static void
3758 flow_dv_translate_item_udp(void *matcher, void *key,
3759                            const struct rte_flow_item *item,
3760                            int inner)
3761 {
3762         const struct rte_flow_item_udp *udp_m = item->mask;
3763         const struct rte_flow_item_udp *udp_v = item->spec;
3764         void *headers_m;
3765         void *headers_v;
3766
3767         if (inner) {
3768                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3769                                          inner_headers);
3770                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3771         } else {
3772                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3773                                          outer_headers);
3774                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3775         }
3776         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
3777         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
3778         if (!udp_v)
3779                 return;
3780         if (!udp_m)
3781                 udp_m = &rte_flow_item_udp_mask;
3782         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
3783                  rte_be_to_cpu_16(udp_m->hdr.src_port));
3784         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
3785                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
3786         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
3787                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
3788         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
3789                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
3790 }
3791
3792 /**
3793  * Add GRE optional Key item to matcher and to the value.
3794  *
3795  * @param[in, out] matcher
3796  *   Flow matcher.
3797  * @param[in, out] key
3798  *   Flow matcher value.
3799  * @param[in] item
3800  *   Flow pattern to translate.
3801  * @param[in] inner
3802  *   Item is inner pattern.
3803  */
3804 static void
3805 flow_dv_translate_item_gre_key(void *matcher, void *key,
3806                                    const struct rte_flow_item *item)
3807 {
3808         const rte_be32_t *key_m = item->mask;
3809         const rte_be32_t *key_v = item->spec;
3810         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3811         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3812         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
3813
3814         if (!key_v)
3815                 return;
3816         if (!key_m)
3817                 key_m = &gre_key_default_mask;
3818         /* GRE K bit must be on and should already be validated */
3819         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
3820         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
3821         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
3822                  rte_be_to_cpu_32(*key_m) >> 8);
3823         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
3824                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
3825         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
3826                  rte_be_to_cpu_32(*key_m) & 0xFF);
3827         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
3828                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
3829 }
3830
3831 /**
3832  * Add GRE item to matcher and to the value.
3833  *
3834  * @param[in, out] matcher
3835  *   Flow matcher.
3836  * @param[in, out] key
3837  *   Flow matcher value.
3838  * @param[in] item
3839  *   Flow pattern to translate.
3840  * @param[in] inner
3841  *   Item is inner pattern.
3842  */
3843 static void
3844 flow_dv_translate_item_gre(void *matcher, void *key,
3845                            const struct rte_flow_item *item,
3846                            int inner)
3847 {
3848         const struct rte_flow_item_gre *gre_m = item->mask;
3849         const struct rte_flow_item_gre *gre_v = item->spec;
3850         void *headers_m;
3851         void *headers_v;
3852         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3853         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3854         struct {
3855                 union {
3856                         __extension__
3857                         struct {
3858                                 uint16_t version:3;
3859                                 uint16_t rsvd0:9;
3860                                 uint16_t s_present:1;
3861                                 uint16_t k_present:1;
3862                                 uint16_t rsvd_bit1:1;
3863                                 uint16_t c_present:1;
3864                         };
3865                         uint16_t value;
3866                 };
3867         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
3868
3869         if (inner) {
3870                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3871                                          inner_headers);
3872                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3873         } else {
3874                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3875                                          outer_headers);
3876                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3877         }
3878         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
3879         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
3880         if (!gre_v)
3881                 return;
3882         if (!gre_m)
3883                 gre_m = &rte_flow_item_gre_mask;
3884         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
3885                  rte_be_to_cpu_16(gre_m->protocol));
3886         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
3887                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
3888         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
3889         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
3890         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
3891                  gre_crks_rsvd0_ver_m.c_present);
3892         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
3893                  gre_crks_rsvd0_ver_v.c_present &
3894                  gre_crks_rsvd0_ver_m.c_present);
3895         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
3896                  gre_crks_rsvd0_ver_m.k_present);
3897         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
3898                  gre_crks_rsvd0_ver_v.k_present &
3899                  gre_crks_rsvd0_ver_m.k_present);
3900         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
3901                  gre_crks_rsvd0_ver_m.s_present);
3902         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
3903                  gre_crks_rsvd0_ver_v.s_present &
3904                  gre_crks_rsvd0_ver_m.s_present);
3905 }
3906
3907 /**
3908  * Add NVGRE item to matcher and to the value.
3909  *
3910  * @param[in, out] matcher
3911  *   Flow matcher.
3912  * @param[in, out] key
3913  *   Flow matcher value.
3914  * @param[in] item
3915  *   Flow pattern to translate.
3916  * @param[in] inner
3917  *   Item is inner pattern.
3918  */
3919 static void
3920 flow_dv_translate_item_nvgre(void *matcher, void *key,
3921                              const struct rte_flow_item *item,
3922                              int inner)
3923 {
3924         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
3925         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
3926         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3927         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3928         const char *tni_flow_id_m = (const char *)nvgre_m->tni;
3929         const char *tni_flow_id_v = (const char *)nvgre_v->tni;
3930         char *gre_key_m;
3931         char *gre_key_v;
3932         int size;
3933         int i;
3934
3935         /* For NVGRE, GRE header fields must be set with defined values. */
3936         const struct rte_flow_item_gre gre_spec = {
3937                 .c_rsvd0_ver = RTE_BE16(0x2000),
3938                 .protocol = RTE_BE16(RTE_ETHER_TYPE_TEB)
3939         };
3940         const struct rte_flow_item_gre gre_mask = {
3941                 .c_rsvd0_ver = RTE_BE16(0xB000),
3942                 .protocol = RTE_BE16(UINT16_MAX),
3943         };
3944         const struct rte_flow_item gre_item = {
3945                 .spec = &gre_spec,
3946                 .mask = &gre_mask,
3947                 .last = NULL,
3948         };
3949         flow_dv_translate_item_gre(matcher, key, &gre_item, inner);
3950         if (!nvgre_v)
3951                 return;
3952         if (!nvgre_m)
3953                 nvgre_m = &rte_flow_item_nvgre_mask;
3954         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
3955         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
3956         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
3957         memcpy(gre_key_m, tni_flow_id_m, size);
3958         for (i = 0; i < size; ++i)
3959                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
3960 }
3961
3962 /**
3963  * Add VXLAN item to matcher and to the value.
3964  *
3965  * @param[in, out] matcher
3966  *   Flow matcher.
3967  * @param[in, out] key
3968  *   Flow matcher value.
3969  * @param[in] item
3970  *   Flow pattern to translate.
3971  * @param[in] inner
3972  *   Item is inner pattern.
3973  */
3974 static void
3975 flow_dv_translate_item_vxlan(void *matcher, void *key,
3976                              const struct rte_flow_item *item,
3977                              int inner)
3978 {
3979         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
3980         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
3981         void *headers_m;
3982         void *headers_v;
3983         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3984         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3985         char *vni_m;
3986         char *vni_v;
3987         uint16_t dport;
3988         int size;
3989         int i;
3990
3991         if (inner) {
3992                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3993                                          inner_headers);
3994                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3995         } else {
3996                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3997                                          outer_headers);
3998                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3999         }
4000         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
4001                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
4002         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
4003                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
4004                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
4005         }
4006         if (!vxlan_v)
4007                 return;
4008         if (!vxlan_m)
4009                 vxlan_m = &rte_flow_item_vxlan_mask;
4010         size = sizeof(vxlan_m->vni);
4011         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
4012         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
4013         memcpy(vni_m, vxlan_m->vni, size);
4014         for (i = 0; i < size; ++i)
4015                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
4016 }
4017
4018 /**
4019  * Add MPLS item to matcher and to the value.
4020  *
4021  * @param[in, out] matcher
4022  *   Flow matcher.
4023  * @param[in, out] key
4024  *   Flow matcher value.
4025  * @param[in] item
4026  *   Flow pattern to translate.
4027  * @param[in] prev_layer
4028  *   The protocol layer indicated in previous item.
4029  * @param[in] inner
4030  *   Item is inner pattern.
4031  */
4032 static void
4033 flow_dv_translate_item_mpls(void *matcher, void *key,
4034                             const struct rte_flow_item *item,
4035                             uint64_t prev_layer,
4036                             int inner)
4037 {
4038         const uint32_t *in_mpls_m = item->mask;
4039         const uint32_t *in_mpls_v = item->spec;
4040         uint32_t *out_mpls_m = 0;
4041         uint32_t *out_mpls_v = 0;
4042         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4043         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4044         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
4045                                      misc_parameters_2);
4046         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
4047         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
4048         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4049
4050         switch (prev_layer) {
4051         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
4052                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
4053                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
4054                          MLX5_UDP_PORT_MPLS);
4055                 break;
4056         case MLX5_FLOW_LAYER_GRE:
4057                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
4058                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
4059                          RTE_ETHER_TYPE_MPLS);
4060                 break;
4061         default:
4062                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
4063                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
4064                          IPPROTO_MPLS);
4065                 break;
4066         }
4067         if (!in_mpls_v)
4068                 return;
4069         if (!in_mpls_m)
4070                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
4071         switch (prev_layer) {
4072         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
4073                 out_mpls_m =
4074                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
4075                                                  outer_first_mpls_over_udp);
4076                 out_mpls_v =
4077                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
4078                                                  outer_first_mpls_over_udp);
4079                 break;
4080         case MLX5_FLOW_LAYER_GRE:
4081                 out_mpls_m =
4082                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
4083                                                  outer_first_mpls_over_gre);
4084                 out_mpls_v =
4085                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
4086                                                  outer_first_mpls_over_gre);
4087                 break;
4088         default:
4089                 /* Inner MPLS not over GRE is not supported. */
4090                 if (!inner) {
4091                         out_mpls_m =
4092                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
4093                                                          misc2_m,
4094                                                          outer_first_mpls);
4095                         out_mpls_v =
4096                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
4097                                                          misc2_v,
4098                                                          outer_first_mpls);
4099                 }
4100                 break;
4101         }
4102         if (out_mpls_m && out_mpls_v) {
4103                 *out_mpls_m = *in_mpls_m;
4104                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
4105         }
4106 }
4107
4108 /**
4109  * Add META item to matcher
4110  *
4111  * @param[in, out] matcher
4112  *   Flow matcher.
4113  * @param[in, out] key
4114  *   Flow matcher value.
4115  * @param[in] item
4116  *   Flow pattern to translate.
4117  * @param[in] inner
4118  *   Item is inner pattern.
4119  */
4120 static void
4121 flow_dv_translate_item_meta(void *matcher, void *key,
4122                             const struct rte_flow_item *item)
4123 {
4124         const struct rte_flow_item_meta *meta_m;
4125         const struct rte_flow_item_meta *meta_v;
4126         void *misc2_m =
4127                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
4128         void *misc2_v =
4129                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
4130
4131         meta_m = (const void *)item->mask;
4132         if (!meta_m)
4133                 meta_m = &rte_flow_item_meta_mask;
4134         meta_v = (const void *)item->spec;
4135         if (meta_v) {
4136                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a,
4137                          rte_be_to_cpu_32(meta_m->data));
4138                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a,
4139                          rte_be_to_cpu_32(meta_v->data & meta_m->data));
4140         }
4141 }
4142
4143 /**
4144  * Add source vport match to the specified matcher.
4145  *
4146  * @param[in, out] matcher
4147  *   Flow matcher.
4148  * @param[in, out] key
4149  *   Flow matcher value.
4150  * @param[in] port
4151  *   Source vport value to match
4152  * @param[in] mask
4153  *   Mask
4154  */
4155 static void
4156 flow_dv_translate_item_source_vport(void *matcher, void *key,
4157                                     int16_t port, uint16_t mask)
4158 {
4159         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4160         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4161
4162         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
4163         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
4164 }
4165
4166 /**
4167  * Translate port-id item to eswitch match on  port-id.
4168  *
4169  * @param[in] dev
4170  *   The devich to configure through.
4171  * @param[in, out] matcher
4172  *   Flow matcher.
4173  * @param[in, out] key
4174  *   Flow matcher value.
4175  * @param[in] item
4176  *   Flow pattern to translate.
4177  *
4178  * @return
4179  *   0 on success, a negative errno value otherwise.
4180  */
4181 static int
4182 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
4183                                void *key, const struct rte_flow_item *item)
4184 {
4185         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
4186         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
4187         uint16_t mask, val, id;
4188         int ret;
4189
4190         mask = pid_m ? pid_m->id : 0xffff;
4191         id = pid_v ? pid_v->id : dev->data->port_id;
4192         ret = mlx5_port_to_eswitch_info(id, NULL, &val);
4193         if (ret)
4194                 return ret;
4195         flow_dv_translate_item_source_vport(matcher, key, val, mask);
4196         return 0;
4197 }
4198
4199 /**
4200  * Add ICMP6 item to matcher and to the value.
4201  *
4202  * @param[in, out] matcher
4203  *   Flow matcher.
4204  * @param[in, out] key
4205  *   Flow matcher value.
4206  * @param[in] item
4207  *   Flow pattern to translate.
4208  * @param[in] inner
4209  *   Item is inner pattern.
4210  */
4211 static void
4212 flow_dv_translate_item_icmp6(void *matcher, void *key,
4213                               const struct rte_flow_item *item,
4214                               int inner)
4215 {
4216         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
4217         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
4218         void *headers_m;
4219         void *headers_v;
4220         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
4221                                      misc_parameters_3);
4222         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
4223         if (inner) {
4224                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4225                                          inner_headers);
4226                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4227         } else {
4228                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4229                                          outer_headers);
4230                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4231         }
4232         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
4233         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
4234         if (!icmp6_v)
4235                 return;
4236         if (!icmp6_m)
4237                 icmp6_m = &rte_flow_item_icmp6_mask;
4238         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
4239         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
4240                  icmp6_v->type & icmp6_m->type);
4241         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
4242         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
4243                  icmp6_v->code & icmp6_m->code);
4244 }
4245
4246 /**
4247  * Add ICMP item to matcher and to the value.
4248  *
4249  * @param[in, out] matcher
4250  *   Flow matcher.
4251  * @param[in, out] key
4252  *   Flow matcher value.
4253  * @param[in] item
4254  *   Flow pattern to translate.
4255  * @param[in] inner
4256  *   Item is inner pattern.
4257  */
4258 static void
4259 flow_dv_translate_item_icmp(void *matcher, void *key,
4260                             const struct rte_flow_item *item,
4261                             int inner)
4262 {
4263         const struct rte_flow_item_icmp *icmp_m = item->mask;
4264         const struct rte_flow_item_icmp *icmp_v = item->spec;
4265         void *headers_m;
4266         void *headers_v;
4267         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
4268                                      misc_parameters_3);
4269         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
4270         if (inner) {
4271                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4272                                          inner_headers);
4273                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4274         } else {
4275                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4276                                          outer_headers);
4277                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4278         }
4279         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
4280         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
4281         if (!icmp_v)
4282                 return;
4283         if (!icmp_m)
4284                 icmp_m = &rte_flow_item_icmp_mask;
4285         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
4286                  icmp_m->hdr.icmp_type);
4287         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
4288                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
4289         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
4290                  icmp_m->hdr.icmp_code);
4291         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
4292                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
4293 }
4294
4295 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
4296
4297 #define HEADER_IS_ZERO(match_criteria, headers)                              \
4298         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
4299                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
4300
4301 /**
4302  * Calculate flow matcher enable bitmap.
4303  *
4304  * @param match_criteria
4305  *   Pointer to flow matcher criteria.
4306  *
4307  * @return
4308  *   Bitmap of enabled fields.
4309  */
4310 static uint8_t
4311 flow_dv_matcher_enable(uint32_t *match_criteria)
4312 {
4313         uint8_t match_criteria_enable;
4314
4315         match_criteria_enable =
4316                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
4317                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
4318         match_criteria_enable |=
4319                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
4320                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
4321         match_criteria_enable |=
4322                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
4323                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
4324         match_criteria_enable |=
4325                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
4326                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
4327         match_criteria_enable |=
4328                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
4329                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
4330         return match_criteria_enable;
4331 }
4332
4333
4334 /**
4335  * Get a flow table.
4336  *
4337  * @param dev[in, out]
4338  *   Pointer to rte_eth_dev structure.
4339  * @param[in] table_id
4340  *   Table id to use.
4341  * @param[in] egress
4342  *   Direction of the table.
4343  * @param[in] transfer
4344  *   E-Switch or NIC flow.
4345  * @param[out] error
4346  *   pointer to error structure.
4347  *
4348  * @return
4349  *   Returns tables resource based on the index, NULL in case of failed.
4350  */
4351 static struct mlx5_flow_tbl_resource *
4352 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
4353                          uint32_t table_id, uint8_t egress,
4354                          uint8_t transfer,
4355                          struct rte_flow_error *error)
4356 {
4357         struct mlx5_priv *priv = dev->data->dev_private;
4358         struct mlx5_ibv_shared *sh = priv->sh;
4359         struct mlx5_flow_tbl_resource *tbl;
4360
4361 #ifdef HAVE_MLX5DV_DR
4362         if (transfer) {
4363                 tbl = &sh->fdb_tbl[table_id];
4364                 if (!tbl->obj)
4365                         tbl->obj = mlx5_glue->dr_create_flow_tbl
4366                                 (sh->fdb_domain, table_id);
4367         } else if (egress) {
4368                 tbl = &sh->tx_tbl[table_id];
4369                 if (!tbl->obj)
4370                         tbl->obj = mlx5_glue->dr_create_flow_tbl
4371                                 (sh->tx_domain, table_id);
4372         } else {
4373                 tbl = &sh->rx_tbl[table_id];
4374                 if (!tbl->obj)
4375                         tbl->obj = mlx5_glue->dr_create_flow_tbl
4376                                 (sh->rx_domain, table_id);
4377         }
4378         if (!tbl->obj) {
4379                 rte_flow_error_set(error, ENOMEM,
4380                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4381                                    NULL, "cannot create table");
4382                 return NULL;
4383         }
4384         rte_atomic32_inc(&tbl->refcnt);
4385         return tbl;
4386 #else
4387         (void)error;
4388         (void)tbl;
4389         if (transfer)
4390                 return &sh->fdb_tbl[table_id];
4391         else if (egress)
4392                 return &sh->tx_tbl[table_id];
4393         else
4394                 return &sh->rx_tbl[table_id];
4395 #endif
4396 }
4397
4398 /**
4399  * Release a flow table.
4400  *
4401  * @param[in] tbl
4402  *   Table resource to be released.
4403  *
4404  * @return
4405  *   Returns 0 if table was released, else return 1;
4406  */
4407 static int
4408 flow_dv_tbl_resource_release(struct mlx5_flow_tbl_resource *tbl)
4409 {
4410         if (!tbl)
4411                 return 0;
4412         if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
4413                 mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
4414                 tbl->obj = NULL;
4415                 return 0;
4416         }
4417         return 1;
4418 }
4419
4420 /**
4421  * Register the flow matcher.
4422  *
4423  * @param dev[in, out]
4424  *   Pointer to rte_eth_dev structure.
4425  * @param[in, out] matcher
4426  *   Pointer to flow matcher.
4427  * @parm[in, out] dev_flow
4428  *   Pointer to the dev_flow.
4429  * @param[out] error
4430  *   pointer to error structure.
4431  *
4432  * @return
4433  *   0 on success otherwise -errno and errno is set.
4434  */
4435 static int
4436 flow_dv_matcher_register(struct rte_eth_dev *dev,
4437                          struct mlx5_flow_dv_matcher *matcher,
4438                          struct mlx5_flow *dev_flow,
4439                          struct rte_flow_error *error)
4440 {
4441         struct mlx5_priv *priv = dev->data->dev_private;
4442         struct mlx5_ibv_shared *sh = priv->sh;
4443         struct mlx5_flow_dv_matcher *cache_matcher;
4444         struct mlx5dv_flow_matcher_attr dv_attr = {
4445                 .type = IBV_FLOW_ATTR_NORMAL,
4446                 .match_mask = (void *)&matcher->mask,
4447         };
4448         struct mlx5_flow_tbl_resource *tbl = NULL;
4449
4450         /* Lookup from cache. */
4451         LIST_FOREACH(cache_matcher, &sh->matchers, next) {
4452                 if (matcher->crc == cache_matcher->crc &&
4453                     matcher->priority == cache_matcher->priority &&
4454                     matcher->egress == cache_matcher->egress &&
4455                     matcher->group == cache_matcher->group &&
4456                     matcher->transfer == cache_matcher->transfer &&
4457                     !memcmp((const void *)matcher->mask.buf,
4458                             (const void *)cache_matcher->mask.buf,
4459                             cache_matcher->mask.size)) {
4460                         DRV_LOG(DEBUG,
4461                                 "priority %hd use %s matcher %p: refcnt %d++",
4462                                 cache_matcher->priority,
4463                                 cache_matcher->egress ? "tx" : "rx",
4464                                 (void *)cache_matcher,
4465                                 rte_atomic32_read(&cache_matcher->refcnt));
4466                         rte_atomic32_inc(&cache_matcher->refcnt);
4467                         dev_flow->dv.matcher = cache_matcher;
4468                         return 0;
4469                 }
4470         }
4471         /* Register new matcher. */
4472         cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
4473         if (!cache_matcher)
4474                 return rte_flow_error_set(error, ENOMEM,
4475                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4476                                           "cannot allocate matcher memory");
4477         tbl = flow_dv_tbl_resource_get(dev, matcher->group * MLX5_GROUP_FACTOR,
4478                                        matcher->egress, matcher->transfer,
4479                                        error);
4480         if (!tbl) {
4481                 rte_free(cache_matcher);
4482                 return rte_flow_error_set(error, ENOMEM,
4483                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4484                                           NULL, "cannot create table");
4485         }
4486         *cache_matcher = *matcher;
4487         dv_attr.match_criteria_enable =
4488                 flow_dv_matcher_enable(cache_matcher->mask.buf);
4489         dv_attr.priority = matcher->priority;
4490         if (matcher->egress)
4491                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
4492         cache_matcher->matcher_object =
4493                 mlx5_glue->dv_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj);
4494         if (!cache_matcher->matcher_object) {
4495                 rte_free(cache_matcher);
4496 #ifdef HAVE_MLX5DV_DR
4497                 flow_dv_tbl_resource_release(tbl);
4498 #endif
4499                 return rte_flow_error_set(error, ENOMEM,
4500                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4501                                           NULL, "cannot create matcher");
4502         }
4503         rte_atomic32_inc(&cache_matcher->refcnt);
4504         LIST_INSERT_HEAD(&sh->matchers, cache_matcher, next);
4505         dev_flow->dv.matcher = cache_matcher;
4506         DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
4507                 cache_matcher->priority,
4508                 cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
4509                 rte_atomic32_read(&cache_matcher->refcnt));
4510         rte_atomic32_inc(&tbl->refcnt);
4511         return 0;
4512 }
4513
4514 /**
4515  * Find existing tag resource or create and register a new one.
4516  *
4517  * @param dev[in, out]
4518  *   Pointer to rte_eth_dev structure.
4519  * @param[in, out] resource
4520  *   Pointer to tag resource.
4521  * @parm[in, out] dev_flow
4522  *   Pointer to the dev_flow.
4523  * @param[out] error
4524  *   pointer to error structure.
4525  *
4526  * @return
4527  *   0 on success otherwise -errno and errno is set.
4528  */
4529 static int
4530 flow_dv_tag_resource_register
4531                         (struct rte_eth_dev *dev,
4532                          struct mlx5_flow_dv_tag_resource *resource,
4533                          struct mlx5_flow *dev_flow,
4534                          struct rte_flow_error *error)
4535 {
4536         struct mlx5_priv *priv = dev->data->dev_private;
4537         struct mlx5_ibv_shared *sh = priv->sh;
4538         struct mlx5_flow_dv_tag_resource *cache_resource;
4539
4540         /* Lookup a matching resource from cache. */
4541         LIST_FOREACH(cache_resource, &sh->tags, next) {
4542                 if (resource->tag == cache_resource->tag) {
4543                         DRV_LOG(DEBUG, "tag resource %p: refcnt %d++",
4544                                 (void *)cache_resource,
4545                                 rte_atomic32_read(&cache_resource->refcnt));
4546                         rte_atomic32_inc(&cache_resource->refcnt);
4547                         dev_flow->flow->tag_resource = cache_resource;
4548                         return 0;
4549                 }
4550         }
4551         /* Register new  resource. */
4552         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
4553         if (!cache_resource)
4554                 return rte_flow_error_set(error, ENOMEM,
4555                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4556                                           "cannot allocate resource memory");
4557         *cache_resource = *resource;
4558         cache_resource->action = mlx5_glue->dv_create_flow_action_tag
4559                 (resource->tag);
4560         if (!cache_resource->action) {
4561                 rte_free(cache_resource);
4562                 return rte_flow_error_set(error, ENOMEM,
4563                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4564                                           NULL, "cannot create action");
4565         }
4566         rte_atomic32_init(&cache_resource->refcnt);
4567         rte_atomic32_inc(&cache_resource->refcnt);
4568         LIST_INSERT_HEAD(&sh->tags, cache_resource, next);
4569         dev_flow->flow->tag_resource = cache_resource;
4570         DRV_LOG(DEBUG, "new tag resource %p: refcnt %d++",
4571                 (void *)cache_resource,
4572                 rte_atomic32_read(&cache_resource->refcnt));
4573         return 0;
4574 }
4575
4576 /**
4577  * Release the tag.
4578  *
4579  * @param dev
4580  *   Pointer to Ethernet device.
4581  * @param flow
4582  *   Pointer to mlx5_flow.
4583  *
4584  * @return
4585  *   1 while a reference on it exists, 0 when freed.
4586  */
4587 static int
4588 flow_dv_tag_release(struct rte_eth_dev *dev,
4589                     struct mlx5_flow_dv_tag_resource *tag)
4590 {
4591         assert(tag);
4592         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
4593                 dev->data->port_id, (void *)tag,
4594                 rte_atomic32_read(&tag->refcnt));
4595         if (rte_atomic32_dec_and_test(&tag->refcnt)) {
4596                 claim_zero(mlx5_glue->destroy_flow_action(tag->action));
4597                 LIST_REMOVE(tag, next);
4598                 DRV_LOG(DEBUG, "port %u tag %p: removed",
4599                         dev->data->port_id, (void *)tag);
4600                 rte_free(tag);
4601                 return 0;
4602         }
4603         return 1;
4604 }
4605
4606 /**
4607  * Translate port ID action to vport.
4608  *
4609  * @param[in] dev
4610  *   Pointer to rte_eth_dev structure.
4611  * @param[in] action
4612  *   Pointer to the port ID action.
4613  * @param[out] dst_port_id
4614  *   The target port ID.
4615  * @param[out] error
4616  *   Pointer to the error structure.
4617  *
4618  * @return
4619  *   0 on success, a negative errno value otherwise and rte_errno is set.
4620  */
4621 static int
4622 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
4623                                  const struct rte_flow_action *action,
4624                                  uint32_t *dst_port_id,
4625                                  struct rte_flow_error *error)
4626 {
4627         uint32_t port;
4628         uint16_t port_id;
4629         int ret;
4630         const struct rte_flow_action_port_id *conf =
4631                         (const struct rte_flow_action_port_id *)action->conf;
4632
4633         port = conf->original ? dev->data->port_id : conf->id;
4634         ret = mlx5_port_to_eswitch_info(port, NULL, &port_id);
4635         if (ret)
4636                 return rte_flow_error_set(error, -ret,
4637                                           RTE_FLOW_ERROR_TYPE_ACTION,
4638                                           NULL,
4639                                           "No eswitch info was found for port");
4640         *dst_port_id = port_id;
4641         return 0;
4642 }
4643
4644 /**
4645  * Fill the flow with DV spec.
4646  *
4647  * @param[in] dev
4648  *   Pointer to rte_eth_dev structure.
4649  * @param[in, out] dev_flow
4650  *   Pointer to the sub flow.
4651  * @param[in] attr
4652  *   Pointer to the flow attributes.
4653  * @param[in] items
4654  *   Pointer to the list of items.
4655  * @param[in] actions
4656  *   Pointer to the list of actions.
4657  * @param[out] error
4658  *   Pointer to the error structure.
4659  *
4660  * @return
4661  *   0 on success, a negative errno value otherwise and rte_errno is set.
4662  */
4663 static int
4664 flow_dv_translate(struct rte_eth_dev *dev,
4665                   struct mlx5_flow *dev_flow,
4666                   const struct rte_flow_attr *attr,
4667                   const struct rte_flow_item items[],
4668                   const struct rte_flow_action actions[],
4669                   struct rte_flow_error *error)
4670 {
4671         struct mlx5_priv *priv = dev->data->dev_private;
4672         struct rte_flow *flow = dev_flow->flow;
4673         uint64_t item_flags = 0;
4674         uint64_t last_item = 0;
4675         uint64_t action_flags = 0;
4676         uint64_t priority = attr->priority;
4677         struct mlx5_flow_dv_matcher matcher = {
4678                 .mask = {
4679                         .size = sizeof(matcher.mask.buf),
4680                 },
4681         };
4682         int actions_n = 0;
4683         bool actions_end = false;
4684         struct mlx5_flow_dv_modify_hdr_resource res = {
4685                 .ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4686                                           MLX5DV_FLOW_TABLE_TYPE_NIC_RX
4687         };
4688         union flow_dv_attr flow_attr = { .attr = 0 };
4689         struct mlx5_flow_dv_tag_resource tag_resource;
4690         uint32_t modify_action_position = UINT32_MAX;
4691         void *match_mask = matcher.mask.buf;
4692         void *match_value = dev_flow->dv.value.buf;
4693
4694         flow->group = attr->group;
4695         if (attr->transfer)
4696                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4697         if (priority == MLX5_FLOW_PRIO_RSVD)
4698                 priority = priv->config.flow_prio - 1;
4699         for (; !actions_end ; actions++) {
4700                 const struct rte_flow_action_queue *queue;
4701                 const struct rte_flow_action_rss *rss;
4702                 const struct rte_flow_action *action = actions;
4703                 const struct rte_flow_action_count *count = action->conf;
4704                 const uint8_t *rss_key;
4705                 const struct rte_flow_action_jump *jump_data;
4706                 struct mlx5_flow_dv_jump_tbl_resource jump_tbl_resource;
4707                 struct mlx5_flow_tbl_resource *tbl;
4708                 uint32_t port_id = 0;
4709                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
4710
4711                 switch (actions->type) {
4712                 case RTE_FLOW_ACTION_TYPE_VOID:
4713                         break;
4714                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
4715                         if (flow_dv_translate_action_port_id(dev, action,
4716                                                              &port_id, error))
4717                                 return -rte_errno;
4718                         port_id_resource.port_id = port_id;
4719                         if (flow_dv_port_id_action_resource_register
4720                             (dev, &port_id_resource, dev_flow, error))
4721                                 return -rte_errno;
4722                         dev_flow->dv.actions[actions_n++] =
4723                                 dev_flow->dv.port_id_action->action;
4724                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
4725                         break;
4726                 case RTE_FLOW_ACTION_TYPE_FLAG:
4727                         tag_resource.tag =
4728                                 mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
4729                         if (!flow->tag_resource)
4730                                 if (flow_dv_tag_resource_register
4731                                     (dev, &tag_resource, dev_flow, error))
4732                                         return errno;
4733                         dev_flow->dv.actions[actions_n++] =
4734                                 flow->tag_resource->action;
4735                         action_flags |= MLX5_FLOW_ACTION_FLAG;
4736                         break;
4737                 case RTE_FLOW_ACTION_TYPE_MARK:
4738                         tag_resource.tag = mlx5_flow_mark_set
4739                               (((const struct rte_flow_action_mark *)
4740                                (actions->conf))->id);
4741                         if (!flow->tag_resource)
4742                                 if (flow_dv_tag_resource_register
4743                                     (dev, &tag_resource, dev_flow, error))
4744                                         return errno;
4745                         dev_flow->dv.actions[actions_n++] =
4746                                 flow->tag_resource->action;
4747                         action_flags |= MLX5_FLOW_ACTION_MARK;
4748                         break;
4749                 case RTE_FLOW_ACTION_TYPE_DROP:
4750                         action_flags |= MLX5_FLOW_ACTION_DROP;
4751                         break;
4752                 case RTE_FLOW_ACTION_TYPE_QUEUE:
4753                         queue = actions->conf;
4754                         flow->rss.queue_num = 1;
4755                         (*flow->queue)[0] = queue->index;
4756                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
4757                         break;
4758                 case RTE_FLOW_ACTION_TYPE_RSS:
4759                         rss = actions->conf;
4760                         if (flow->queue)
4761                                 memcpy((*flow->queue), rss->queue,
4762                                        rss->queue_num * sizeof(uint16_t));
4763                         flow->rss.queue_num = rss->queue_num;
4764                         /* NULL RSS key indicates default RSS key. */
4765                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
4766                         memcpy(flow->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
4767                         /* RSS type 0 indicates default RSS type ETH_RSS_IP. */
4768                         flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
4769                         flow->rss.level = rss->level;
4770                         action_flags |= MLX5_FLOW_ACTION_RSS;
4771                         break;
4772                 case RTE_FLOW_ACTION_TYPE_COUNT:
4773                         if (!priv->config.devx) {
4774                                 rte_errno = ENOTSUP;
4775                                 goto cnt_err;
4776                         }
4777                         flow->counter = flow_dv_counter_alloc(dev,
4778                                                               count->shared,
4779                                                               count->id,
4780                                                               attr->group);
4781                         if (flow->counter == NULL)
4782                                 goto cnt_err;
4783                         dev_flow->dv.actions[actions_n++] =
4784                                 flow->counter->action;
4785                         action_flags |= MLX5_FLOW_ACTION_COUNT;
4786                         break;
4787 cnt_err:
4788                         if (rte_errno == ENOTSUP)
4789                                 return rte_flow_error_set
4790                                               (error, ENOTSUP,
4791                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4792                                                NULL,
4793                                                "count action not supported");
4794                         else
4795                                 return rte_flow_error_set
4796                                                 (error, rte_errno,
4797                                                  RTE_FLOW_ERROR_TYPE_ACTION,
4798                                                  action,
4799                                                  "cannot create counter"
4800                                                   " object.");
4801                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
4802                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
4803                         if (flow_dv_create_action_l2_encap(dev, actions,
4804                                                            dev_flow,
4805                                                            attr->transfer,
4806                                                            error))
4807                                 return -rte_errno;
4808                         dev_flow->dv.actions[actions_n++] =
4809                                 dev_flow->dv.encap_decap->verbs_action;
4810                         action_flags |= actions->type ==
4811                                         RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
4812                                         MLX5_FLOW_ACTION_VXLAN_ENCAP :
4813                                         MLX5_FLOW_ACTION_NVGRE_ENCAP;
4814                         break;
4815                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
4816                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
4817                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
4818                                                            attr->transfer,
4819                                                            error))
4820                                 return -rte_errno;
4821                         dev_flow->dv.actions[actions_n++] =
4822                                 dev_flow->dv.encap_decap->verbs_action;
4823                         action_flags |= actions->type ==
4824                                         RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
4825                                         MLX5_FLOW_ACTION_VXLAN_DECAP :
4826                                         MLX5_FLOW_ACTION_NVGRE_DECAP;
4827                         break;
4828                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
4829                         /* Handle encap with preceding decap. */
4830                         if (action_flags & MLX5_FLOW_ACTION_RAW_DECAP) {
4831                                 if (flow_dv_create_action_raw_encap
4832                                         (dev, actions, dev_flow, attr, error))
4833                                         return -rte_errno;
4834                                 dev_flow->dv.actions[actions_n++] =
4835                                         dev_flow->dv.encap_decap->verbs_action;
4836                         } else {
4837                                 /* Handle encap without preceding decap. */
4838                                 if (flow_dv_create_action_l2_encap
4839                                     (dev, actions, dev_flow, attr->transfer,
4840                                      error))
4841                                         return -rte_errno;
4842                                 dev_flow->dv.actions[actions_n++] =
4843                                         dev_flow->dv.encap_decap->verbs_action;
4844                         }
4845                         action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
4846                         break;
4847                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
4848                         /* Check if this decap is followed by encap. */
4849                         for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
4850                                action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
4851                                action++) {
4852                         }
4853                         /* Handle decap only if it isn't followed by encap. */
4854                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
4855                                 if (flow_dv_create_action_l2_decap
4856                                     (dev, dev_flow, attr->transfer, error))
4857                                         return -rte_errno;
4858                                 dev_flow->dv.actions[actions_n++] =
4859                                         dev_flow->dv.encap_decap->verbs_action;
4860                         }
4861                         /* If decap is followed by encap, handle it at encap. */
4862                         action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
4863                         break;
4864                 case RTE_FLOW_ACTION_TYPE_JUMP:
4865                         jump_data = action->conf;
4866                         tbl = flow_dv_tbl_resource_get(dev, jump_data->group *
4867                                                        MLX5_GROUP_FACTOR,
4868                                                        attr->egress,
4869                                                        attr->transfer, error);
4870                         if (!tbl)
4871                                 return rte_flow_error_set
4872                                                 (error, errno,
4873                                                  RTE_FLOW_ERROR_TYPE_ACTION,
4874                                                  NULL,
4875                                                  "cannot create jump action.");
4876                         jump_tbl_resource.tbl = tbl;
4877                         if (flow_dv_jump_tbl_resource_register
4878                             (dev, &jump_tbl_resource, dev_flow, error)) {
4879                                 flow_dv_tbl_resource_release(tbl);
4880                                 return rte_flow_error_set
4881                                                 (error, errno,
4882                                                  RTE_FLOW_ERROR_TYPE_ACTION,
4883                                                  NULL,
4884                                                  "cannot create jump action.");
4885                         }
4886                         dev_flow->dv.actions[actions_n++] =
4887                                 dev_flow->dv.jump->action;
4888                         action_flags |= MLX5_FLOW_ACTION_JUMP;
4889                         break;
4890                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
4891                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
4892                         if (flow_dv_convert_action_modify_mac(&res, actions,
4893                                                               error))
4894                                 return -rte_errno;
4895                         action_flags |= actions->type ==
4896                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
4897                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
4898                                         MLX5_FLOW_ACTION_SET_MAC_DST;
4899                         break;
4900                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
4901                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
4902                         if (flow_dv_convert_action_modify_ipv4(&res, actions,
4903                                                                error))
4904                                 return -rte_errno;
4905                         action_flags |= actions->type ==
4906                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
4907                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
4908                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
4909                         break;
4910                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
4911                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
4912                         if (flow_dv_convert_action_modify_ipv6(&res, actions,
4913                                                                error))
4914                                 return -rte_errno;
4915                         action_flags |= actions->type ==
4916                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
4917                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
4918                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
4919                         break;
4920                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
4921                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
4922                         if (flow_dv_convert_action_modify_tp(&res, actions,
4923                                                              items, &flow_attr,
4924                                                              error))
4925                                 return -rte_errno;
4926                         action_flags |= actions->type ==
4927                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
4928                                         MLX5_FLOW_ACTION_SET_TP_SRC :
4929                                         MLX5_FLOW_ACTION_SET_TP_DST;
4930                         break;
4931                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
4932                         if (flow_dv_convert_action_modify_dec_ttl(&res, items,
4933                                                                   &flow_attr,
4934                                                                   error))
4935                                 return -rte_errno;
4936                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
4937                         break;
4938                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
4939                         if (flow_dv_convert_action_modify_ttl(&res, actions,
4940                                                              items, &flow_attr,
4941                                                              error))
4942                                 return -rte_errno;
4943                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
4944                         break;
4945                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
4946                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
4947                         if (flow_dv_convert_action_modify_tcp_seq(&res, actions,
4948                                                                   error))
4949                                 return -rte_errno;
4950                         action_flags |= actions->type ==
4951                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
4952                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
4953                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
4954                         break;
4955
4956                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
4957                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
4958                         if (flow_dv_convert_action_modify_tcp_ack(&res, actions,
4959                                                                   error))
4960                                 return -rte_errno;
4961                         action_flags |= actions->type ==
4962                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
4963                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
4964                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
4965                         break;
4966                 case RTE_FLOW_ACTION_TYPE_END:
4967                         actions_end = true;
4968                         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) {
4969                                 /* create modify action if needed. */
4970                                 if (flow_dv_modify_hdr_resource_register
4971                                                                 (dev, &res,
4972                                                                  dev_flow,
4973                                                                  error))
4974                                         return -rte_errno;
4975                                 dev_flow->dv.actions[modify_action_position] =
4976                                         dev_flow->dv.modify_hdr->verbs_action;
4977                         }
4978                         break;
4979                 default:
4980                         break;
4981                 }
4982                 if ((action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) &&
4983                     modify_action_position == UINT32_MAX)
4984                         modify_action_position = actions_n++;
4985         }
4986         dev_flow->dv.actions_n = actions_n;
4987         flow->actions = action_flags;
4988         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
4989                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
4990
4991                 switch (items->type) {
4992                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
4993                         flow_dv_translate_item_port_id(dev, match_mask,
4994                                                        match_value, items);
4995                         last_item = MLX5_FLOW_ITEM_PORT_ID;
4996                         break;
4997                 case RTE_FLOW_ITEM_TYPE_ETH:
4998                         flow_dv_translate_item_eth(match_mask, match_value,
4999                                                    items, tunnel);
5000                         matcher.priority = MLX5_PRIORITY_MAP_L2;
5001                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
5002                                              MLX5_FLOW_LAYER_OUTER_L2;
5003                         break;
5004                 case RTE_FLOW_ITEM_TYPE_VLAN:
5005                         flow_dv_translate_item_vlan(dev_flow,
5006                                                     match_mask, match_value,
5007                                                     items, tunnel);
5008                         matcher.priority = MLX5_PRIORITY_MAP_L2;
5009                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
5010                                               MLX5_FLOW_LAYER_INNER_VLAN) :
5011                                              (MLX5_FLOW_LAYER_OUTER_L2 |
5012                                               MLX5_FLOW_LAYER_OUTER_VLAN);
5013                         break;
5014                 case RTE_FLOW_ITEM_TYPE_IPV4:
5015                         flow_dv_translate_item_ipv4(match_mask, match_value,
5016                                                     items, tunnel, attr->group);
5017                         matcher.priority = MLX5_PRIORITY_MAP_L3;
5018                         dev_flow->dv.hash_fields |=
5019                                 mlx5_flow_hashfields_adjust
5020                                         (dev_flow, tunnel,
5021                                          MLX5_IPV4_LAYER_TYPES,
5022                                          MLX5_IPV4_IBV_RX_HASH);
5023                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
5024                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
5025                         mlx5_flow_tunnel_ip_check(items, &last_item);
5026                         break;
5027                 case RTE_FLOW_ITEM_TYPE_IPV6:
5028                         flow_dv_translate_item_ipv6(match_mask, match_value,
5029                                                     items, tunnel, attr->group);
5030                         matcher.priority = MLX5_PRIORITY_MAP_L3;
5031                         dev_flow->dv.hash_fields |=
5032                                 mlx5_flow_hashfields_adjust
5033                                         (dev_flow, tunnel,
5034                                          MLX5_IPV6_LAYER_TYPES,
5035                                          MLX5_IPV6_IBV_RX_HASH);
5036                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
5037                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
5038                         mlx5_flow_tunnel_ip_check(items, &last_item);
5039                         break;
5040                 case RTE_FLOW_ITEM_TYPE_TCP:
5041                         flow_dv_translate_item_tcp(match_mask, match_value,
5042                                                    items, tunnel);
5043                         matcher.priority = MLX5_PRIORITY_MAP_L4;
5044                         dev_flow->dv.hash_fields |=
5045                                 mlx5_flow_hashfields_adjust
5046                                         (dev_flow, tunnel, ETH_RSS_TCP,
5047                                          IBV_RX_HASH_SRC_PORT_TCP |
5048                                          IBV_RX_HASH_DST_PORT_TCP);
5049                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
5050                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
5051                         break;
5052                 case RTE_FLOW_ITEM_TYPE_UDP:
5053                         flow_dv_translate_item_udp(match_mask, match_value,
5054                                                    items, tunnel);
5055                         matcher.priority = MLX5_PRIORITY_MAP_L4;
5056                         dev_flow->dv.hash_fields |=
5057                                 mlx5_flow_hashfields_adjust
5058                                         (dev_flow, tunnel, ETH_RSS_UDP,
5059                                          IBV_RX_HASH_SRC_PORT_UDP |
5060                                          IBV_RX_HASH_DST_PORT_UDP);
5061                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
5062                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
5063                         break;
5064                 case RTE_FLOW_ITEM_TYPE_GRE:
5065                         flow_dv_translate_item_gre(match_mask, match_value,
5066                                                    items, tunnel);
5067                         last_item = MLX5_FLOW_LAYER_GRE;
5068                         break;
5069                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
5070                         flow_dv_translate_item_gre_key(match_mask,
5071                                                        match_value, items);
5072                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
5073                         break;
5074                 case RTE_FLOW_ITEM_TYPE_NVGRE:
5075                         flow_dv_translate_item_nvgre(match_mask, match_value,
5076                                                      items, tunnel);
5077                         last_item = MLX5_FLOW_LAYER_GRE;
5078                         break;
5079                 case RTE_FLOW_ITEM_TYPE_VXLAN:
5080                         flow_dv_translate_item_vxlan(match_mask, match_value,
5081                                                      items, tunnel);
5082                         last_item = MLX5_FLOW_LAYER_VXLAN;
5083                         break;
5084                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
5085                         flow_dv_translate_item_vxlan(match_mask, match_value,
5086                                                      items, tunnel);
5087                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
5088                         break;
5089                 case RTE_FLOW_ITEM_TYPE_MPLS:
5090                         flow_dv_translate_item_mpls(match_mask, match_value,
5091                                                     items, last_item, tunnel);
5092                         last_item = MLX5_FLOW_LAYER_MPLS;
5093                         break;
5094                 case RTE_FLOW_ITEM_TYPE_META:
5095                         flow_dv_translate_item_meta(match_mask, match_value,
5096                                                     items);
5097                         last_item = MLX5_FLOW_ITEM_METADATA;
5098                         break;
5099                 case RTE_FLOW_ITEM_TYPE_ICMP:
5100                         flow_dv_translate_item_icmp(match_mask, match_value,
5101                                                     items, tunnel);
5102                         last_item = MLX5_FLOW_LAYER_ICMP;
5103                         break;
5104                 case RTE_FLOW_ITEM_TYPE_ICMP6:
5105                         flow_dv_translate_item_icmp6(match_mask, match_value,
5106                                                       items, tunnel);
5107                         last_item = MLX5_FLOW_LAYER_ICMP6;
5108                         break;
5109                 default:
5110                         break;
5111                 }
5112                 item_flags |= last_item;
5113         }
5114         /*
5115          * In case of ingress traffic when E-Switch mode is enabled,
5116          * we have two cases where we need to set the source port manually.
5117          * The first one, is in case of Nic steering rule, and the second is
5118          * E-Switch rule where no port_id item was found. In both cases
5119          * the source port is set according the current port in use.
5120          */
5121         if ((attr->ingress && !(item_flags & MLX5_FLOW_ITEM_PORT_ID)) &&
5122             (priv->representor || priv->master)) {
5123                 if (flow_dv_translate_item_port_id(dev, match_mask,
5124                                                    match_value, NULL))
5125                         return -rte_errno;
5126         }
5127         assert(!flow_dv_check_valid_spec(matcher.mask.buf,
5128                                          dev_flow->dv.value.buf));
5129         dev_flow->layers = item_flags;
5130         /* Register matcher. */
5131         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
5132                                     matcher.mask.size);
5133         matcher.priority = mlx5_flow_adjust_priority(dev, priority,
5134                                                      matcher.priority);
5135         matcher.egress = attr->egress;
5136         matcher.group = attr->group;
5137         matcher.transfer = attr->transfer;
5138         if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
5139                 return -rte_errno;
5140         return 0;
5141 }
5142
5143 /**
5144  * Apply the flow to the NIC.
5145  *
5146  * @param[in] dev
5147  *   Pointer to the Ethernet device structure.
5148  * @param[in, out] flow
5149  *   Pointer to flow structure.
5150  * @param[out] error
5151  *   Pointer to error structure.
5152  *
5153  * @return
5154  *   0 on success, a negative errno value otherwise and rte_errno is set.
5155  */
5156 static int
5157 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
5158               struct rte_flow_error *error)
5159 {
5160         struct mlx5_flow_dv *dv;
5161         struct mlx5_flow *dev_flow;
5162         struct mlx5_priv *priv = dev->data->dev_private;
5163         int n;
5164         int err;
5165
5166         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
5167                 dv = &dev_flow->dv;
5168                 n = dv->actions_n;
5169                 if (flow->actions & MLX5_FLOW_ACTION_DROP) {
5170                         if (flow->transfer) {
5171                                 dv->actions[n++] = priv->sh->esw_drop_action;
5172                         } else {
5173                                 dv->hrxq = mlx5_hrxq_drop_new(dev);
5174                                 if (!dv->hrxq) {
5175                                         rte_flow_error_set
5176                                                 (error, errno,
5177                                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5178                                                  NULL,
5179                                                  "cannot get drop hash queue");
5180                                         goto error;
5181                                 }
5182                                 dv->actions[n++] = dv->hrxq->action;
5183                         }
5184                 } else if (flow->actions &
5185                            (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
5186                         struct mlx5_hrxq *hrxq;
5187
5188                         hrxq = mlx5_hrxq_get(dev, flow->key,
5189                                              MLX5_RSS_HASH_KEY_LEN,
5190                                              dv->hash_fields,
5191                                              (*flow->queue),
5192                                              flow->rss.queue_num);
5193                         if (!hrxq) {
5194                                 hrxq = mlx5_hrxq_new
5195                                         (dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
5196                                          dv->hash_fields, (*flow->queue),
5197                                          flow->rss.queue_num,
5198                                          !!(dev_flow->layers &
5199                                             MLX5_FLOW_LAYER_TUNNEL));
5200                         }
5201                         if (!hrxq) {
5202                                 rte_flow_error_set
5203                                         (error, rte_errno,
5204                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5205                                          "cannot get hash queue");
5206                                 goto error;
5207                         }
5208                         dv->hrxq = hrxq;
5209                         dv->actions[n++] = dv->hrxq->action;
5210                 }
5211                 dv->flow =
5212                         mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
5213                                                   (void *)&dv->value, n,
5214                                                   dv->actions);
5215                 if (!dv->flow) {
5216                         rte_flow_error_set(error, errno,
5217                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5218                                            NULL,
5219                                            "hardware refuses to create flow");
5220                         goto error;
5221                 }
5222                 if (priv->vmwa_context &&
5223                     dev_flow->dv.vf_vlan.tag &&
5224                     !dev_flow->dv.vf_vlan.created) {
5225                         /*
5226                          * The rule contains the VLAN pattern.
5227                          * For VF we are going to create VLAN
5228                          * interface to make hypervisor set correct
5229                          * e-Switch vport context.
5230                          */
5231                         mlx5_vlan_vmwa_acquire(dev, &dev_flow->dv.vf_vlan);
5232                 }
5233         }
5234         return 0;
5235 error:
5236         err = rte_errno; /* Save rte_errno before cleanup. */
5237         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
5238                 struct mlx5_flow_dv *dv = &dev_flow->dv;
5239                 if (dv->hrxq) {
5240                         if (flow->actions & MLX5_FLOW_ACTION_DROP)
5241                                 mlx5_hrxq_drop_release(dev);
5242                         else
5243                                 mlx5_hrxq_release(dev, dv->hrxq);
5244                         dv->hrxq = NULL;
5245                 }
5246                 if (dev_flow->dv.vf_vlan.tag &&
5247                     dev_flow->dv.vf_vlan.created)
5248                         mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan);
5249         }
5250         rte_errno = err; /* Restore rte_errno. */
5251         return -rte_errno;
5252 }
5253
5254 /**
5255  * Release the flow matcher.
5256  *
5257  * @param dev
5258  *   Pointer to Ethernet device.
5259  * @param flow
5260  *   Pointer to mlx5_flow.
5261  *
5262  * @return
5263  *   1 while a reference on it exists, 0 when freed.
5264  */
5265 static int
5266 flow_dv_matcher_release(struct rte_eth_dev *dev,
5267                         struct mlx5_flow *flow)
5268 {
5269         struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
5270         struct mlx5_priv *priv = dev->data->dev_private;
5271         struct mlx5_ibv_shared *sh = priv->sh;
5272         struct mlx5_flow_tbl_resource *tbl;
5273
5274         assert(matcher->matcher_object);
5275         DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
5276                 dev->data->port_id, (void *)matcher,
5277                 rte_atomic32_read(&matcher->refcnt));
5278         if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
5279                 claim_zero(mlx5_glue->dv_destroy_flow_matcher
5280                            (matcher->matcher_object));
5281                 LIST_REMOVE(matcher, next);
5282                 if (matcher->egress)
5283                         tbl = &sh->tx_tbl[matcher->group];
5284                 else
5285                         tbl = &sh->rx_tbl[matcher->group];
5286                 flow_dv_tbl_resource_release(tbl);
5287                 rte_free(matcher);
5288                 DRV_LOG(DEBUG, "port %u matcher %p: removed",
5289                         dev->data->port_id, (void *)matcher);
5290                 return 0;
5291         }
5292         return 1;
5293 }
5294
5295 /**
5296  * Release an encap/decap resource.
5297  *
5298  * @param flow
5299  *   Pointer to mlx5_flow.
5300  *
5301  * @return
5302  *   1 while a reference on it exists, 0 when freed.
5303  */
5304 static int
5305 flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
5306 {
5307         struct mlx5_flow_dv_encap_decap_resource *cache_resource =
5308                                                 flow->dv.encap_decap;
5309
5310         assert(cache_resource->verbs_action);
5311         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
5312                 (void *)cache_resource,
5313                 rte_atomic32_read(&cache_resource->refcnt));
5314         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
5315                 claim_zero(mlx5_glue->destroy_flow_action
5316                                 (cache_resource->verbs_action));
5317                 LIST_REMOVE(cache_resource, next);
5318                 rte_free(cache_resource);
5319                 DRV_LOG(DEBUG, "encap/decap resource %p: removed",
5320                         (void *)cache_resource);
5321                 return 0;
5322         }
5323         return 1;
5324 }
5325
5326 /**
5327  * Release an jump to table action resource.
5328  *
5329  * @param flow
5330  *   Pointer to mlx5_flow.
5331  *
5332  * @return
5333  *   1 while a reference on it exists, 0 when freed.
5334  */
5335 static int
5336 flow_dv_jump_tbl_resource_release(struct mlx5_flow *flow)
5337 {
5338         struct mlx5_flow_dv_jump_tbl_resource *cache_resource =
5339                                                 flow->dv.jump;
5340
5341         assert(cache_resource->action);
5342         DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--",
5343                 (void *)cache_resource,
5344                 rte_atomic32_read(&cache_resource->refcnt));
5345         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
5346                 claim_zero(mlx5_glue->destroy_flow_action
5347                                 (cache_resource->action));
5348                 LIST_REMOVE(cache_resource, next);
5349                 flow_dv_tbl_resource_release(cache_resource->tbl);
5350                 rte_free(cache_resource);
5351                 DRV_LOG(DEBUG, "jump table resource %p: removed",
5352                         (void *)cache_resource);
5353                 return 0;
5354         }
5355         return 1;
5356 }
5357
5358 /**
5359  * Release a modify-header resource.
5360  *
5361  * @param flow
5362  *   Pointer to mlx5_flow.
5363  *
5364  * @return
5365  *   1 while a reference on it exists, 0 when freed.
5366  */
5367 static int
5368 flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
5369 {
5370         struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
5371                                                 flow->dv.modify_hdr;
5372
5373         assert(cache_resource->verbs_action);
5374         DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
5375                 (void *)cache_resource,
5376                 rte_atomic32_read(&cache_resource->refcnt));
5377         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
5378                 claim_zero(mlx5_glue->destroy_flow_action
5379                                 (cache_resource->verbs_action));
5380                 LIST_REMOVE(cache_resource, next);
5381                 rte_free(cache_resource);
5382                 DRV_LOG(DEBUG, "modify-header resource %p: removed",
5383                         (void *)cache_resource);
5384                 return 0;
5385         }
5386         return 1;
5387 }
5388
5389 /**
5390  * Release port ID action resource.
5391  *
5392  * @param flow
5393  *   Pointer to mlx5_flow.
5394  *
5395  * @return
5396  *   1 while a reference on it exists, 0 when freed.
5397  */
5398 static int
5399 flow_dv_port_id_action_resource_release(struct mlx5_flow *flow)
5400 {
5401         struct mlx5_flow_dv_port_id_action_resource *cache_resource =
5402                 flow->dv.port_id_action;
5403
5404         assert(cache_resource->action);
5405         DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--",
5406                 (void *)cache_resource,
5407                 rte_atomic32_read(&cache_resource->refcnt));
5408         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
5409                 claim_zero(mlx5_glue->destroy_flow_action
5410                                 (cache_resource->action));
5411                 LIST_REMOVE(cache_resource, next);
5412                 rte_free(cache_resource);
5413                 DRV_LOG(DEBUG, "port id action resource %p: removed",
5414                         (void *)cache_resource);
5415                 return 0;
5416         }
5417         return 1;
5418 }
5419
5420 /**
5421  * Remove the flow from the NIC but keeps it in memory.
5422  *
5423  * @param[in] dev
5424  *   Pointer to Ethernet device.
5425  * @param[in, out] flow
5426  *   Pointer to flow structure.
5427  */
5428 static void
5429 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
5430 {
5431         struct mlx5_flow_dv *dv;
5432         struct mlx5_flow *dev_flow;
5433
5434         if (!flow)
5435                 return;
5436         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
5437                 dv = &dev_flow->dv;
5438                 if (dv->flow) {
5439                         claim_zero(mlx5_glue->dv_destroy_flow(dv->flow));
5440                         dv->flow = NULL;
5441                 }
5442                 if (dv->hrxq) {
5443                         if (flow->actions & MLX5_FLOW_ACTION_DROP)
5444                                 mlx5_hrxq_drop_release(dev);
5445                         else
5446                                 mlx5_hrxq_release(dev, dv->hrxq);
5447                         dv->hrxq = NULL;
5448                 }
5449                 if (dev_flow->dv.vf_vlan.tag &&
5450                     dev_flow->dv.vf_vlan.created)
5451                         mlx5_vlan_vmwa_release(dev, &dev_flow->dv.vf_vlan);
5452         }
5453 }
5454
5455 /**
5456  * Remove the flow from the NIC and the memory.
5457  *
5458  * @param[in] dev
5459  *   Pointer to the Ethernet device structure.
5460  * @param[in, out] flow
5461  *   Pointer to flow structure.
5462  */
5463 static void
5464 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
5465 {
5466         struct mlx5_flow *dev_flow;
5467
5468         if (!flow)
5469                 return;
5470         flow_dv_remove(dev, flow);
5471         if (flow->counter) {
5472                 flow_dv_counter_release(dev, flow->counter);
5473                 flow->counter = NULL;
5474         }
5475         if (flow->tag_resource) {
5476                 flow_dv_tag_release(dev, flow->tag_resource);
5477                 flow->tag_resource = NULL;
5478         }
5479         while (!LIST_EMPTY(&flow->dev_flows)) {
5480                 dev_flow = LIST_FIRST(&flow->dev_flows);
5481                 LIST_REMOVE(dev_flow, next);
5482                 if (dev_flow->dv.matcher)
5483                         flow_dv_matcher_release(dev, dev_flow);
5484                 if (dev_flow->dv.encap_decap)
5485                         flow_dv_encap_decap_resource_release(dev_flow);
5486                 if (dev_flow->dv.modify_hdr)
5487                         flow_dv_modify_hdr_resource_release(dev_flow);
5488                 if (dev_flow->dv.jump)
5489                         flow_dv_jump_tbl_resource_release(dev_flow);
5490                 if (dev_flow->dv.port_id_action)
5491                         flow_dv_port_id_action_resource_release(dev_flow);
5492                 rte_free(dev_flow);
5493         }
5494 }
5495
5496 /**
5497  * Query a dv flow  rule for its statistics via devx.
5498  *
5499  * @param[in] dev
5500  *   Pointer to Ethernet device.
5501  * @param[in] flow
5502  *   Pointer to the sub flow.
5503  * @param[out] data
5504  *   data retrieved by the query.
5505  * @param[out] error
5506  *   Perform verbose error reporting if not NULL.
5507  *
5508  * @return
5509  *   0 on success, a negative errno value otherwise and rte_errno is set.
5510  */
5511 static int
5512 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
5513                     void *data, struct rte_flow_error *error)
5514 {
5515         struct mlx5_priv *priv = dev->data->dev_private;
5516         struct rte_flow_query_count *qc = data;
5517
5518         if (!priv->config.devx)
5519                 return rte_flow_error_set(error, ENOTSUP,
5520                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5521                                           NULL,
5522                                           "counters are not supported");
5523         if (flow->counter) {
5524                 uint64_t pkts, bytes;
5525                 int err = _flow_dv_query_count(dev, flow->counter, &pkts,
5526                                                &bytes);
5527
5528                 if (err)
5529                         return rte_flow_error_set(error, -err,
5530                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5531                                         NULL, "cannot read counters");
5532                 qc->hits_set = 1;
5533                 qc->bytes_set = 1;
5534                 qc->hits = pkts - flow->counter->hits;
5535                 qc->bytes = bytes - flow->counter->bytes;
5536                 if (qc->reset) {
5537                         flow->counter->hits = pkts;
5538                         flow->counter->bytes = bytes;
5539                 }
5540                 return 0;
5541         }
5542         return rte_flow_error_set(error, EINVAL,
5543                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5544                                   NULL,
5545                                   "counters are not available");
5546 }
5547
5548 /**
5549  * Query a flow.
5550  *
5551  * @see rte_flow_query()
5552  * @see rte_flow_ops
5553  */
5554 static int
5555 flow_dv_query(struct rte_eth_dev *dev,
5556               struct rte_flow *flow __rte_unused,
5557               const struct rte_flow_action *actions __rte_unused,
5558               void *data __rte_unused,
5559               struct rte_flow_error *error __rte_unused)
5560 {
5561         int ret = -EINVAL;
5562
5563         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
5564                 switch (actions->type) {
5565                 case RTE_FLOW_ACTION_TYPE_VOID:
5566                         break;
5567                 case RTE_FLOW_ACTION_TYPE_COUNT:
5568                         ret = flow_dv_query_count(dev, flow, data, error);
5569                         break;
5570                 default:
5571                         return rte_flow_error_set(error, ENOTSUP,
5572                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5573                                                   actions,
5574                                                   "action not supported");
5575                 }
5576         }
5577         return ret;
5578 }
5579
5580 /*
5581  * Mutex-protected thunk to flow_dv_translate().
5582  */
5583 static int
5584 flow_d_translate(struct rte_eth_dev *dev,
5585                  struct mlx5_flow *dev_flow,
5586                  const struct rte_flow_attr *attr,
5587                  const struct rte_flow_item items[],
5588                  const struct rte_flow_action actions[],
5589                  struct rte_flow_error *error)
5590 {
5591         int ret;
5592
5593         flow_d_shared_lock(dev);
5594         ret = flow_dv_translate(dev, dev_flow, attr, items, actions, error);
5595         flow_d_shared_unlock(dev);
5596         return ret;
5597 }
5598
5599 /*
5600  * Mutex-protected thunk to flow_dv_apply().
5601  */
5602 static int
5603 flow_d_apply(struct rte_eth_dev *dev,
5604              struct rte_flow *flow,
5605              struct rte_flow_error *error)
5606 {
5607         int ret;
5608
5609         flow_d_shared_lock(dev);
5610         ret = flow_dv_apply(dev, flow, error);
5611         flow_d_shared_unlock(dev);
5612         return ret;
5613 }
5614
5615 /*
5616  * Mutex-protected thunk to flow_dv_remove().
5617  */
5618 static void
5619 flow_d_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
5620 {
5621         flow_d_shared_lock(dev);
5622         flow_dv_remove(dev, flow);
5623         flow_d_shared_unlock(dev);
5624 }
5625
5626 /*
5627  * Mutex-protected thunk to flow_dv_destroy().
5628  */
5629 static void
5630 flow_d_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
5631 {
5632         flow_d_shared_lock(dev);
5633         flow_dv_destroy(dev, flow);
5634         flow_d_shared_unlock(dev);
5635 }
5636
5637 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
5638         .validate = flow_dv_validate,
5639         .prepare = flow_dv_prepare,
5640         .translate = flow_d_translate,
5641         .apply = flow_d_apply,
5642         .remove = flow_d_remove,
5643         .destroy = flow_d_destroy,
5644         .query = flow_dv_query,
5645 };
5646
5647 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */