net/mlx5: support LRO with single RxQ object
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9 #include <unistd.h>
10
11 /* Verbs header. */
12 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
13 #ifdef PEDANTIC
14 #pragma GCC diagnostic ignored "-Wpedantic"
15 #endif
16 #include <infiniband/verbs.h>
17 #ifdef PEDANTIC
18 #pragma GCC diagnostic error "-Wpedantic"
19 #endif
20
21 #include <rte_common.h>
22 #include <rte_ether.h>
23 #include <rte_ethdev_driver.h>
24 #include <rte_flow.h>
25 #include <rte_flow_driver.h>
26 #include <rte_malloc.h>
27 #include <rte_ip.h>
28 #include <rte_gre.h>
29
30 #include "mlx5.h"
31 #include "mlx5_defs.h"
32 #include "mlx5_glue.h"
33 #include "mlx5_flow.h"
34 #include "mlx5_prm.h"
35 #include "mlx5_rxtx.h"
36
37 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
38
39 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
40 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
41 #endif
42
43 #ifndef HAVE_MLX5DV_DR_ESWITCH
44 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
45 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
46 #endif
47 #endif
48
49 #ifndef HAVE_MLX5DV_DR
50 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
51 #endif
52
53 union flow_dv_attr {
54         struct {
55                 uint32_t valid:1;
56                 uint32_t ipv4:1;
57                 uint32_t ipv6:1;
58                 uint32_t tcp:1;
59                 uint32_t udp:1;
60                 uint32_t reserved:27;
61         };
62         uint32_t attr;
63 };
64
65 #define MLX5_FLOW_IPV4_LRO (1 << 0)
66 #define MLX5_FLOW_IPV6_LRO (1 << 1)
67
68 /**
69  * Initialize flow attributes structure according to flow items' types.
70  *
71  * @param[in] item
72  *   Pointer to item specification.
73  * @param[out] attr
74  *   Pointer to flow attributes structure.
75  */
76 static void
77 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr)
78 {
79         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
80                 switch (item->type) {
81                 case RTE_FLOW_ITEM_TYPE_IPV4:
82                         attr->ipv4 = 1;
83                         break;
84                 case RTE_FLOW_ITEM_TYPE_IPV6:
85                         attr->ipv6 = 1;
86                         break;
87                 case RTE_FLOW_ITEM_TYPE_UDP:
88                         attr->udp = 1;
89                         break;
90                 case RTE_FLOW_ITEM_TYPE_TCP:
91                         attr->tcp = 1;
92                         break;
93                 default:
94                         break;
95                 }
96         }
97         attr->valid = 1;
98 }
99
100 struct field_modify_info {
101         uint32_t size; /* Size of field in protocol header, in bytes. */
102         uint32_t offset; /* Offset of field in protocol header, in bytes. */
103         enum mlx5_modification_field id;
104 };
105
106 struct field_modify_info modify_eth[] = {
107         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
108         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
109         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
110         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
111         {0, 0, 0},
112 };
113
114 struct field_modify_info modify_ipv4[] = {
115         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
116         {4, 12, MLX5_MODI_OUT_SIPV4},
117         {4, 16, MLX5_MODI_OUT_DIPV4},
118         {0, 0, 0},
119 };
120
121 struct field_modify_info modify_ipv6[] = {
122         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
123         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
124         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
125         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
126         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
127         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
128         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
129         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
130         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
131         {0, 0, 0},
132 };
133
134 struct field_modify_info modify_udp[] = {
135         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
136         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
137         {0, 0, 0},
138 };
139
140 struct field_modify_info modify_tcp[] = {
141         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
142         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
143         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
144         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
145         {0, 0, 0},
146 };
147
148 static void
149 mlx5_flow_tunnel_ip_check(const struct rte_flow_item *item, uint64_t *flags)
150 {
151         uint8_t next_protocol = 0xFF;
152
153         if (item->mask != NULL) {
154                 switch (item->type) {
155                 case RTE_FLOW_ITEM_TYPE_IPV4:
156                         next_protocol =
157                                 ((const struct rte_flow_item_ipv4 *)
158                                  (item->spec))->hdr.next_proto_id;
159                         next_protocol &=
160                                 ((const struct rte_flow_item_ipv4 *)
161                                  (item->mask))->hdr.next_proto_id;
162                         break;
163                 case RTE_FLOW_ITEM_TYPE_IPV6:
164                         next_protocol =
165                                 ((const struct rte_flow_item_ipv6 *)
166                                  (item->spec))->hdr.proto;
167                         next_protocol &=
168                                 ((const struct rte_flow_item_ipv6 *)
169                                  (item->mask))->hdr.proto;
170                         break;
171                 default:
172                         break;
173                 }
174         }
175         if (next_protocol == IPPROTO_IPIP)
176                 *flags |= MLX5_FLOW_LAYER_IPIP;
177         if (next_protocol == IPPROTO_IPV6)
178                 *flags |= MLX5_FLOW_LAYER_IPV6_ENCAP;
179 }
180
181 /**
182  * Acquire the synchronizing object to protect multithreaded access
183  * to shared dv context. Lock occurs only if context is actually
184  * shared, i.e. we have multiport IB device and representors are
185  * created.
186  *
187  * @param[in] dev
188  *   Pointer to the rte_eth_dev structure.
189  */
190 static void
191 flow_d_shared_lock(struct rte_eth_dev *dev)
192 {
193         struct mlx5_priv *priv = dev->data->dev_private;
194         struct mlx5_ibv_shared *sh = priv->sh;
195
196         if (sh->dv_refcnt > 1) {
197                 int ret;
198
199                 ret = pthread_mutex_lock(&sh->dv_mutex);
200                 assert(!ret);
201                 (void)ret;
202         }
203 }
204
205 static void
206 flow_d_shared_unlock(struct rte_eth_dev *dev)
207 {
208         struct mlx5_priv *priv = dev->data->dev_private;
209         struct mlx5_ibv_shared *sh = priv->sh;
210
211         if (sh->dv_refcnt > 1) {
212                 int ret;
213
214                 ret = pthread_mutex_unlock(&sh->dv_mutex);
215                 assert(!ret);
216                 (void)ret;
217         }
218 }
219
220 /**
221  * Convert modify-header action to DV specification.
222  *
223  * @param[in] item
224  *   Pointer to item specification.
225  * @param[in] field
226  *   Pointer to field modification information.
227  * @param[in,out] resource
228  *   Pointer to the modify-header resource.
229  * @param[in] type
230  *   Type of modification.
231  * @param[out] error
232  *   Pointer to the error structure.
233  *
234  * @return
235  *   0 on success, a negative errno value otherwise and rte_errno is set.
236  */
237 static int
238 flow_dv_convert_modify_action(struct rte_flow_item *item,
239                               struct field_modify_info *field,
240                               struct mlx5_flow_dv_modify_hdr_resource *resource,
241                               uint32_t type,
242                               struct rte_flow_error *error)
243 {
244         uint32_t i = resource->actions_num;
245         struct mlx5_modification_cmd *actions = resource->actions;
246         const uint8_t *spec = item->spec;
247         const uint8_t *mask = item->mask;
248         uint32_t set;
249
250         while (field->size) {
251                 set = 0;
252                 /* Generate modify command for each mask segment. */
253                 memcpy(&set, &mask[field->offset], field->size);
254                 if (set) {
255                         if (i >= MLX5_MODIFY_NUM)
256                                 return rte_flow_error_set(error, EINVAL,
257                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
258                                          "too many items to modify");
259                         actions[i].action_type = type;
260                         actions[i].field = field->id;
261                         actions[i].length = field->size ==
262                                         4 ? 0 : field->size * 8;
263                         rte_memcpy(&actions[i].data[4 - field->size],
264                                    &spec[field->offset], field->size);
265                         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
266                         ++i;
267                 }
268                 if (resource->actions_num != i)
269                         resource->actions_num = i;
270                 field++;
271         }
272         if (!resource->actions_num)
273                 return rte_flow_error_set(error, EINVAL,
274                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
275                                           "invalid modification flow item");
276         return 0;
277 }
278
279 /**
280  * Convert modify-header set IPv4 address action to DV specification.
281  *
282  * @param[in,out] resource
283  *   Pointer to the modify-header resource.
284  * @param[in] action
285  *   Pointer to action specification.
286  * @param[out] error
287  *   Pointer to the error structure.
288  *
289  * @return
290  *   0 on success, a negative errno value otherwise and rte_errno is set.
291  */
292 static int
293 flow_dv_convert_action_modify_ipv4
294                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
295                          const struct rte_flow_action *action,
296                          struct rte_flow_error *error)
297 {
298         const struct rte_flow_action_set_ipv4 *conf =
299                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
300         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
301         struct rte_flow_item_ipv4 ipv4;
302         struct rte_flow_item_ipv4 ipv4_mask;
303
304         memset(&ipv4, 0, sizeof(ipv4));
305         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
306         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
307                 ipv4.hdr.src_addr = conf->ipv4_addr;
308                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
309         } else {
310                 ipv4.hdr.dst_addr = conf->ipv4_addr;
311                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
312         }
313         item.spec = &ipv4;
314         item.mask = &ipv4_mask;
315         return flow_dv_convert_modify_action(&item, modify_ipv4, resource,
316                                              MLX5_MODIFICATION_TYPE_SET, error);
317 }
318
319 /**
320  * Convert modify-header set IPv6 address action to DV specification.
321  *
322  * @param[in,out] resource
323  *   Pointer to the modify-header resource.
324  * @param[in] action
325  *   Pointer to action specification.
326  * @param[out] error
327  *   Pointer to the error structure.
328  *
329  * @return
330  *   0 on success, a negative errno value otherwise and rte_errno is set.
331  */
332 static int
333 flow_dv_convert_action_modify_ipv6
334                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
335                          const struct rte_flow_action *action,
336                          struct rte_flow_error *error)
337 {
338         const struct rte_flow_action_set_ipv6 *conf =
339                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
340         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
341         struct rte_flow_item_ipv6 ipv6;
342         struct rte_flow_item_ipv6 ipv6_mask;
343
344         memset(&ipv6, 0, sizeof(ipv6));
345         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
346         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
347                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
348                        sizeof(ipv6.hdr.src_addr));
349                 memcpy(&ipv6_mask.hdr.src_addr,
350                        &rte_flow_item_ipv6_mask.hdr.src_addr,
351                        sizeof(ipv6.hdr.src_addr));
352         } else {
353                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
354                        sizeof(ipv6.hdr.dst_addr));
355                 memcpy(&ipv6_mask.hdr.dst_addr,
356                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
357                        sizeof(ipv6.hdr.dst_addr));
358         }
359         item.spec = &ipv6;
360         item.mask = &ipv6_mask;
361         return flow_dv_convert_modify_action(&item, modify_ipv6, resource,
362                                              MLX5_MODIFICATION_TYPE_SET, error);
363 }
364
365 /**
366  * Convert modify-header set MAC address action to DV specification.
367  *
368  * @param[in,out] resource
369  *   Pointer to the modify-header resource.
370  * @param[in] action
371  *   Pointer to action specification.
372  * @param[out] error
373  *   Pointer to the error structure.
374  *
375  * @return
376  *   0 on success, a negative errno value otherwise and rte_errno is set.
377  */
378 static int
379 flow_dv_convert_action_modify_mac
380                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
381                          const struct rte_flow_action *action,
382                          struct rte_flow_error *error)
383 {
384         const struct rte_flow_action_set_mac *conf =
385                 (const struct rte_flow_action_set_mac *)(action->conf);
386         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
387         struct rte_flow_item_eth eth;
388         struct rte_flow_item_eth eth_mask;
389
390         memset(&eth, 0, sizeof(eth));
391         memset(&eth_mask, 0, sizeof(eth_mask));
392         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
393                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
394                        sizeof(eth.src.addr_bytes));
395                 memcpy(&eth_mask.src.addr_bytes,
396                        &rte_flow_item_eth_mask.src.addr_bytes,
397                        sizeof(eth_mask.src.addr_bytes));
398         } else {
399                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
400                        sizeof(eth.dst.addr_bytes));
401                 memcpy(&eth_mask.dst.addr_bytes,
402                        &rte_flow_item_eth_mask.dst.addr_bytes,
403                        sizeof(eth_mask.dst.addr_bytes));
404         }
405         item.spec = &eth;
406         item.mask = &eth_mask;
407         return flow_dv_convert_modify_action(&item, modify_eth, resource,
408                                              MLX5_MODIFICATION_TYPE_SET, error);
409 }
410
411 /**
412  * Convert modify-header set TP action to DV specification.
413  *
414  * @param[in,out] resource
415  *   Pointer to the modify-header resource.
416  * @param[in] action
417  *   Pointer to action specification.
418  * @param[in] items
419  *   Pointer to rte_flow_item objects list.
420  * @param[in] attr
421  *   Pointer to flow attributes structure.
422  * @param[out] error
423  *   Pointer to the error structure.
424  *
425  * @return
426  *   0 on success, a negative errno value otherwise and rte_errno is set.
427  */
428 static int
429 flow_dv_convert_action_modify_tp
430                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
431                          const struct rte_flow_action *action,
432                          const struct rte_flow_item *items,
433                          union flow_dv_attr *attr,
434                          struct rte_flow_error *error)
435 {
436         const struct rte_flow_action_set_tp *conf =
437                 (const struct rte_flow_action_set_tp *)(action->conf);
438         struct rte_flow_item item;
439         struct rte_flow_item_udp udp;
440         struct rte_flow_item_udp udp_mask;
441         struct rte_flow_item_tcp tcp;
442         struct rte_flow_item_tcp tcp_mask;
443         struct field_modify_info *field;
444
445         if (!attr->valid)
446                 flow_dv_attr_init(items, attr);
447         if (attr->udp) {
448                 memset(&udp, 0, sizeof(udp));
449                 memset(&udp_mask, 0, sizeof(udp_mask));
450                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
451                         udp.hdr.src_port = conf->port;
452                         udp_mask.hdr.src_port =
453                                         rte_flow_item_udp_mask.hdr.src_port;
454                 } else {
455                         udp.hdr.dst_port = conf->port;
456                         udp_mask.hdr.dst_port =
457                                         rte_flow_item_udp_mask.hdr.dst_port;
458                 }
459                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
460                 item.spec = &udp;
461                 item.mask = &udp_mask;
462                 field = modify_udp;
463         }
464         if (attr->tcp) {
465                 memset(&tcp, 0, sizeof(tcp));
466                 memset(&tcp_mask, 0, sizeof(tcp_mask));
467                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
468                         tcp.hdr.src_port = conf->port;
469                         tcp_mask.hdr.src_port =
470                                         rte_flow_item_tcp_mask.hdr.src_port;
471                 } else {
472                         tcp.hdr.dst_port = conf->port;
473                         tcp_mask.hdr.dst_port =
474                                         rte_flow_item_tcp_mask.hdr.dst_port;
475                 }
476                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
477                 item.spec = &tcp;
478                 item.mask = &tcp_mask;
479                 field = modify_tcp;
480         }
481         return flow_dv_convert_modify_action(&item, field, resource,
482                                              MLX5_MODIFICATION_TYPE_SET, error);
483 }
484
485 /**
486  * Convert modify-header set TTL action to DV specification.
487  *
488  * @param[in,out] resource
489  *   Pointer to the modify-header resource.
490  * @param[in] action
491  *   Pointer to action specification.
492  * @param[in] items
493  *   Pointer to rte_flow_item objects list.
494  * @param[in] attr
495  *   Pointer to flow attributes structure.
496  * @param[out] error
497  *   Pointer to the error structure.
498  *
499  * @return
500  *   0 on success, a negative errno value otherwise and rte_errno is set.
501  */
502 static int
503 flow_dv_convert_action_modify_ttl
504                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
505                          const struct rte_flow_action *action,
506                          const struct rte_flow_item *items,
507                          union flow_dv_attr *attr,
508                          struct rte_flow_error *error)
509 {
510         const struct rte_flow_action_set_ttl *conf =
511                 (const struct rte_flow_action_set_ttl *)(action->conf);
512         struct rte_flow_item item;
513         struct rte_flow_item_ipv4 ipv4;
514         struct rte_flow_item_ipv4 ipv4_mask;
515         struct rte_flow_item_ipv6 ipv6;
516         struct rte_flow_item_ipv6 ipv6_mask;
517         struct field_modify_info *field;
518
519         if (!attr->valid)
520                 flow_dv_attr_init(items, attr);
521         if (attr->ipv4) {
522                 memset(&ipv4, 0, sizeof(ipv4));
523                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
524                 ipv4.hdr.time_to_live = conf->ttl_value;
525                 ipv4_mask.hdr.time_to_live = 0xFF;
526                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
527                 item.spec = &ipv4;
528                 item.mask = &ipv4_mask;
529                 field = modify_ipv4;
530         }
531         if (attr->ipv6) {
532                 memset(&ipv6, 0, sizeof(ipv6));
533                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
534                 ipv6.hdr.hop_limits = conf->ttl_value;
535                 ipv6_mask.hdr.hop_limits = 0xFF;
536                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
537                 item.spec = &ipv6;
538                 item.mask = &ipv6_mask;
539                 field = modify_ipv6;
540         }
541         return flow_dv_convert_modify_action(&item, field, resource,
542                                              MLX5_MODIFICATION_TYPE_SET, error);
543 }
544
545 /**
546  * Convert modify-header decrement TTL action to DV specification.
547  *
548  * @param[in,out] resource
549  *   Pointer to the modify-header resource.
550  * @param[in] action
551  *   Pointer to action specification.
552  * @param[in] items
553  *   Pointer to rte_flow_item objects list.
554  * @param[in] attr
555  *   Pointer to flow attributes structure.
556  * @param[out] error
557  *   Pointer to the error structure.
558  *
559  * @return
560  *   0 on success, a negative errno value otherwise and rte_errno is set.
561  */
562 static int
563 flow_dv_convert_action_modify_dec_ttl
564                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
565                          const struct rte_flow_item *items,
566                          union flow_dv_attr *attr,
567                          struct rte_flow_error *error)
568 {
569         struct rte_flow_item item;
570         struct rte_flow_item_ipv4 ipv4;
571         struct rte_flow_item_ipv4 ipv4_mask;
572         struct rte_flow_item_ipv6 ipv6;
573         struct rte_flow_item_ipv6 ipv6_mask;
574         struct field_modify_info *field;
575
576         if (!attr->valid)
577                 flow_dv_attr_init(items, attr);
578         if (attr->ipv4) {
579                 memset(&ipv4, 0, sizeof(ipv4));
580                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
581                 ipv4.hdr.time_to_live = 0xFF;
582                 ipv4_mask.hdr.time_to_live = 0xFF;
583                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
584                 item.spec = &ipv4;
585                 item.mask = &ipv4_mask;
586                 field = modify_ipv4;
587         }
588         if (attr->ipv6) {
589                 memset(&ipv6, 0, sizeof(ipv6));
590                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
591                 ipv6.hdr.hop_limits = 0xFF;
592                 ipv6_mask.hdr.hop_limits = 0xFF;
593                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
594                 item.spec = &ipv6;
595                 item.mask = &ipv6_mask;
596                 field = modify_ipv6;
597         }
598         return flow_dv_convert_modify_action(&item, field, resource,
599                                              MLX5_MODIFICATION_TYPE_ADD, error);
600 }
601
602 /**
603  * Convert modify-header increment/decrement TCP Sequence number
604  * to DV specification.
605  *
606  * @param[in,out] resource
607  *   Pointer to the modify-header resource.
608  * @param[in] action
609  *   Pointer to action specification.
610  * @param[out] error
611  *   Pointer to the error structure.
612  *
613  * @return
614  *   0 on success, a negative errno value otherwise and rte_errno is set.
615  */
616 static int
617 flow_dv_convert_action_modify_tcp_seq
618                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
619                          const struct rte_flow_action *action,
620                          struct rte_flow_error *error)
621 {
622         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
623         uint64_t value = rte_be_to_cpu_32(*conf);
624         struct rte_flow_item item;
625         struct rte_flow_item_tcp tcp;
626         struct rte_flow_item_tcp tcp_mask;
627
628         memset(&tcp, 0, sizeof(tcp));
629         memset(&tcp_mask, 0, sizeof(tcp_mask));
630         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
631                 /*
632                  * The HW has no decrement operation, only increment operation.
633                  * To simulate decrement X from Y using increment operation
634                  * we need to add UINT32_MAX X times to Y.
635                  * Each adding of UINT32_MAX decrements Y by 1.
636                  */
637                 value *= UINT32_MAX;
638         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
639         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
640         item.type = RTE_FLOW_ITEM_TYPE_TCP;
641         item.spec = &tcp;
642         item.mask = &tcp_mask;
643         return flow_dv_convert_modify_action(&item, modify_tcp, resource,
644                                              MLX5_MODIFICATION_TYPE_ADD, error);
645 }
646
647 /**
648  * Convert modify-header increment/decrement TCP Acknowledgment number
649  * to DV specification.
650  *
651  * @param[in,out] resource
652  *   Pointer to the modify-header resource.
653  * @param[in] action
654  *   Pointer to action specification.
655  * @param[out] error
656  *   Pointer to the error structure.
657  *
658  * @return
659  *   0 on success, a negative errno value otherwise and rte_errno is set.
660  */
661 static int
662 flow_dv_convert_action_modify_tcp_ack
663                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
664                          const struct rte_flow_action *action,
665                          struct rte_flow_error *error)
666 {
667         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
668         uint64_t value = rte_be_to_cpu_32(*conf);
669         struct rte_flow_item item;
670         struct rte_flow_item_tcp tcp;
671         struct rte_flow_item_tcp tcp_mask;
672
673         memset(&tcp, 0, sizeof(tcp));
674         memset(&tcp_mask, 0, sizeof(tcp_mask));
675         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
676                 /*
677                  * The HW has no decrement operation, only increment operation.
678                  * To simulate decrement X from Y using increment operation
679                  * we need to add UINT32_MAX X times to Y.
680                  * Each adding of UINT32_MAX decrements Y by 1.
681                  */
682                 value *= UINT32_MAX;
683         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
684         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
685         item.type = RTE_FLOW_ITEM_TYPE_TCP;
686         item.spec = &tcp;
687         item.mask = &tcp_mask;
688         return flow_dv_convert_modify_action(&item, modify_tcp, resource,
689                                              MLX5_MODIFICATION_TYPE_ADD, error);
690 }
691
692 /**
693  * Validate META item.
694  *
695  * @param[in] dev
696  *   Pointer to the rte_eth_dev structure.
697  * @param[in] item
698  *   Item specification.
699  * @param[in] attr
700  *   Attributes of flow that includes this item.
701  * @param[out] error
702  *   Pointer to error structure.
703  *
704  * @return
705  *   0 on success, a negative errno value otherwise and rte_errno is set.
706  */
707 static int
708 flow_dv_validate_item_meta(struct rte_eth_dev *dev,
709                            const struct rte_flow_item *item,
710                            const struct rte_flow_attr *attr,
711                            struct rte_flow_error *error)
712 {
713         const struct rte_flow_item_meta *spec = item->spec;
714         const struct rte_flow_item_meta *mask = item->mask;
715         const struct rte_flow_item_meta nic_mask = {
716                 .data = RTE_BE32(UINT32_MAX)
717         };
718         int ret;
719         uint64_t offloads = dev->data->dev_conf.txmode.offloads;
720
721         if (!(offloads & DEV_TX_OFFLOAD_MATCH_METADATA))
722                 return rte_flow_error_set(error, EPERM,
723                                           RTE_FLOW_ERROR_TYPE_ITEM,
724                                           NULL,
725                                           "match on metadata offload "
726                                           "configuration is off for this port");
727         if (!spec)
728                 return rte_flow_error_set(error, EINVAL,
729                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
730                                           item->spec,
731                                           "data cannot be empty");
732         if (!spec->data)
733                 return rte_flow_error_set(error, EINVAL,
734                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
735                                           NULL,
736                                           "data cannot be zero");
737         if (!mask)
738                 mask = &rte_flow_item_meta_mask;
739         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
740                                         (const uint8_t *)&nic_mask,
741                                         sizeof(struct rte_flow_item_meta),
742                                         error);
743         if (ret < 0)
744                 return ret;
745         if (attr->ingress)
746                 return rte_flow_error_set(error, ENOTSUP,
747                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
748                                           NULL,
749                                           "pattern not supported for ingress");
750         return 0;
751 }
752
753 /**
754  * Validate vport item.
755  *
756  * @param[in] dev
757  *   Pointer to the rte_eth_dev structure.
758  * @param[in] item
759  *   Item specification.
760  * @param[in] attr
761  *   Attributes of flow that includes this item.
762  * @param[in] item_flags
763  *   Bit-fields that holds the items detected until now.
764  * @param[out] error
765  *   Pointer to error structure.
766  *
767  * @return
768  *   0 on success, a negative errno value otherwise and rte_errno is set.
769  */
770 static int
771 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
772                               const struct rte_flow_item *item,
773                               const struct rte_flow_attr *attr,
774                               uint64_t item_flags,
775                               struct rte_flow_error *error)
776 {
777         const struct rte_flow_item_port_id *spec = item->spec;
778         const struct rte_flow_item_port_id *mask = item->mask;
779         const struct rte_flow_item_port_id switch_mask = {
780                         .id = 0xffffffff,
781         };
782         uint16_t esw_domain_id;
783         uint16_t item_port_esw_domain_id;
784         int ret;
785
786         if (!attr->transfer)
787                 return rte_flow_error_set(error, EINVAL,
788                                           RTE_FLOW_ERROR_TYPE_ITEM,
789                                           NULL,
790                                           "match on port id is valid only"
791                                           " when transfer flag is enabled");
792         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
793                 return rte_flow_error_set(error, ENOTSUP,
794                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
795                                           "multiple source ports are not"
796                                           " supported");
797         if (!mask)
798                 mask = &switch_mask;
799         if (mask->id != 0xffffffff)
800                 return rte_flow_error_set(error, ENOTSUP,
801                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
802                                            mask,
803                                            "no support for partial mask on"
804                                            " \"id\" field");
805         ret = mlx5_flow_item_acceptable
806                                 (item, (const uint8_t *)mask,
807                                  (const uint8_t *)&rte_flow_item_port_id_mask,
808                                  sizeof(struct rte_flow_item_port_id),
809                                  error);
810         if (ret)
811                 return ret;
812         if (!spec)
813                 return 0;
814         ret = mlx5_port_to_eswitch_info(spec->id, &item_port_esw_domain_id,
815                                         NULL);
816         if (ret)
817                 return rte_flow_error_set(error, -ret,
818                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
819                                           "failed to obtain E-Switch info for"
820                                           " port");
821         ret = mlx5_port_to_eswitch_info(dev->data->port_id,
822                                         &esw_domain_id, NULL);
823         if (ret < 0)
824                 return rte_flow_error_set(error, -ret,
825                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
826                                           NULL,
827                                           "failed to obtain E-Switch info");
828         if (item_port_esw_domain_id != esw_domain_id)
829                 return rte_flow_error_set(error, -ret,
830                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
831                                           "cannot match on a port from a"
832                                           " different E-Switch");
833         return 0;
834 }
835
836 /**
837  * Validate count action.
838  *
839  * @param[in] dev
840  *   device otr.
841  * @param[out] error
842  *   Pointer to error structure.
843  *
844  * @return
845  *   0 on success, a negative errno value otherwise and rte_errno is set.
846  */
847 static int
848 flow_dv_validate_action_count(struct rte_eth_dev *dev,
849                               struct rte_flow_error *error)
850 {
851         struct mlx5_priv *priv = dev->data->dev_private;
852
853         if (!priv->config.devx)
854                 goto notsup_err;
855 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
856         return 0;
857 #endif
858 notsup_err:
859         return rte_flow_error_set
860                       (error, ENOTSUP,
861                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
862                        NULL,
863                        "count action not supported");
864 }
865
866 /**
867  * Validate the L2 encap action.
868  *
869  * @param[in] action_flags
870  *   Holds the actions detected until now.
871  * @param[in] action
872  *   Pointer to the encap action.
873  * @param[in] attr
874  *   Pointer to flow attributes
875  * @param[out] error
876  *   Pointer to error structure.
877  *
878  * @return
879  *   0 on success, a negative errno value otherwise and rte_errno is set.
880  */
881 static int
882 flow_dv_validate_action_l2_encap(uint64_t action_flags,
883                                  const struct rte_flow_action *action,
884                                  const struct rte_flow_attr *attr,
885                                  struct rte_flow_error *error)
886 {
887         if (!(action->conf))
888                 return rte_flow_error_set(error, EINVAL,
889                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
890                                           "configuration cannot be null");
891         if (action_flags & MLX5_FLOW_ACTION_DROP)
892                 return rte_flow_error_set(error, EINVAL,
893                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
894                                           "can't drop and encap in same flow");
895         if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
896                 return rte_flow_error_set(error, EINVAL,
897                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
898                                           "can only have a single encap or"
899                                           " decap action in a flow");
900         if (!attr->transfer && attr->ingress)
901                 return rte_flow_error_set(error, ENOTSUP,
902                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
903                                           NULL,
904                                           "encap action not supported for "
905                                           "ingress");
906         return 0;
907 }
908
909 /**
910  * Validate the L2 decap action.
911  *
912  * @param[in] action_flags
913  *   Holds the actions detected until now.
914  * @param[in] attr
915  *   Pointer to flow attributes
916  * @param[out] error
917  *   Pointer to error structure.
918  *
919  * @return
920  *   0 on success, a negative errno value otherwise and rte_errno is set.
921  */
922 static int
923 flow_dv_validate_action_l2_decap(uint64_t action_flags,
924                                  const struct rte_flow_attr *attr,
925                                  struct rte_flow_error *error)
926 {
927         if (action_flags & MLX5_FLOW_ACTION_DROP)
928                 return rte_flow_error_set(error, EINVAL,
929                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
930                                           "can't drop and decap in same flow");
931         if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
932                 return rte_flow_error_set(error, EINVAL,
933                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
934                                           "can only have a single encap or"
935                                           " decap action in a flow");
936         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
937                 return rte_flow_error_set(error, EINVAL,
938                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
939                                           "can't have decap action after"
940                                           " modify action");
941         if (attr->egress)
942                 return rte_flow_error_set(error, ENOTSUP,
943                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
944                                           NULL,
945                                           "decap action not supported for "
946                                           "egress");
947         return 0;
948 }
949
950 /**
951  * Validate the raw encap action.
952  *
953  * @param[in] action_flags
954  *   Holds the actions detected until now.
955  * @param[in] action
956  *   Pointer to the encap action.
957  * @param[in] attr
958  *   Pointer to flow attributes
959  * @param[out] error
960  *   Pointer to error structure.
961  *
962  * @return
963  *   0 on success, a negative errno value otherwise and rte_errno is set.
964  */
965 static int
966 flow_dv_validate_action_raw_encap(uint64_t action_flags,
967                                   const struct rte_flow_action *action,
968                                   const struct rte_flow_attr *attr,
969                                   struct rte_flow_error *error)
970 {
971         if (!(action->conf))
972                 return rte_flow_error_set(error, EINVAL,
973                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
974                                           "configuration cannot be null");
975         if (action_flags & MLX5_FLOW_ACTION_DROP)
976                 return rte_flow_error_set(error, EINVAL,
977                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
978                                           "can't drop and encap in same flow");
979         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
980                 return rte_flow_error_set(error, EINVAL,
981                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
982                                           "can only have a single encap"
983                                           " action in a flow");
984         /* encap without preceding decap is not supported for ingress */
985         if (!attr->transfer &&  attr->ingress &&
986             !(action_flags & MLX5_FLOW_ACTION_RAW_DECAP))
987                 return rte_flow_error_set(error, ENOTSUP,
988                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
989                                           NULL,
990                                           "encap action not supported for "
991                                           "ingress");
992         return 0;
993 }
994
995 /**
996  * Validate the raw decap action.
997  *
998  * @param[in] action_flags
999  *   Holds the actions detected until now.
1000  * @param[in] action
1001  *   Pointer to the encap action.
1002  * @param[in] attr
1003  *   Pointer to flow attributes
1004  * @param[out] error
1005  *   Pointer to error structure.
1006  *
1007  * @return
1008  *   0 on success, a negative errno value otherwise and rte_errno is set.
1009  */
1010 static int
1011 flow_dv_validate_action_raw_decap(uint64_t action_flags,
1012                                   const struct rte_flow_action *action,
1013                                   const struct rte_flow_attr *attr,
1014                                   struct rte_flow_error *error)
1015 {
1016         if (action_flags & MLX5_FLOW_ACTION_DROP)
1017                 return rte_flow_error_set(error, EINVAL,
1018                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1019                                           "can't drop and decap in same flow");
1020         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
1021                 return rte_flow_error_set(error, EINVAL,
1022                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1023                                           "can't have encap action before"
1024                                           " decap action");
1025         if (action_flags & MLX5_FLOW_DECAP_ACTIONS)
1026                 return rte_flow_error_set(error, EINVAL,
1027                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1028                                           "can only have a single decap"
1029                                           " action in a flow");
1030         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
1031                 return rte_flow_error_set(error, EINVAL,
1032                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1033                                           "can't have decap action after"
1034                                           " modify action");
1035         /* decap action is valid on egress only if it is followed by encap */
1036         if (attr->egress) {
1037                 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
1038                        action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
1039                        action++) {
1040                 }
1041                 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
1042                         return rte_flow_error_set
1043                                         (error, ENOTSUP,
1044                                          RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1045                                          NULL, "decap action not supported"
1046                                          " for egress");
1047         }
1048         return 0;
1049 }
1050
1051 /**
1052  * Find existing encap/decap resource or create and register a new one.
1053  *
1054  * @param dev[in, out]
1055  *   Pointer to rte_eth_dev structure.
1056  * @param[in, out] resource
1057  *   Pointer to encap/decap resource.
1058  * @parm[in, out] dev_flow
1059  *   Pointer to the dev_flow.
1060  * @param[out] error
1061  *   pointer to error structure.
1062  *
1063  * @return
1064  *   0 on success otherwise -errno and errno is set.
1065  */
1066 static int
1067 flow_dv_encap_decap_resource_register
1068                         (struct rte_eth_dev *dev,
1069                          struct mlx5_flow_dv_encap_decap_resource *resource,
1070                          struct mlx5_flow *dev_flow,
1071                          struct rte_flow_error *error)
1072 {
1073         struct mlx5_priv *priv = dev->data->dev_private;
1074         struct mlx5_ibv_shared *sh = priv->sh;
1075         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
1076         struct rte_flow *flow = dev_flow->flow;
1077         struct mlx5dv_dr_domain *domain;
1078
1079         resource->flags = flow->group ? 0 : 1;
1080         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
1081                 domain = sh->fdb_domain;
1082         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
1083                 domain = sh->rx_domain;
1084         else
1085                 domain = sh->tx_domain;
1086
1087         /* Lookup a matching resource from cache. */
1088         LIST_FOREACH(cache_resource, &sh->encaps_decaps, next) {
1089                 if (resource->reformat_type == cache_resource->reformat_type &&
1090                     resource->ft_type == cache_resource->ft_type &&
1091                     resource->flags == cache_resource->flags &&
1092                     resource->size == cache_resource->size &&
1093                     !memcmp((const void *)resource->buf,
1094                             (const void *)cache_resource->buf,
1095                             resource->size)) {
1096                         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
1097                                 (void *)cache_resource,
1098                                 rte_atomic32_read(&cache_resource->refcnt));
1099                         rte_atomic32_inc(&cache_resource->refcnt);
1100                         dev_flow->dv.encap_decap = cache_resource;
1101                         return 0;
1102                 }
1103         }
1104         /* Register new encap/decap resource. */
1105         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1106         if (!cache_resource)
1107                 return rte_flow_error_set(error, ENOMEM,
1108                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1109                                           "cannot allocate resource memory");
1110         *cache_resource = *resource;
1111         cache_resource->verbs_action =
1112                 mlx5_glue->dv_create_flow_action_packet_reformat
1113                         (sh->ctx, cache_resource->reformat_type,
1114                          cache_resource->ft_type, domain, cache_resource->flags,
1115                          cache_resource->size,
1116                          (cache_resource->size ? cache_resource->buf : NULL));
1117         if (!cache_resource->verbs_action) {
1118                 rte_free(cache_resource);
1119                 return rte_flow_error_set(error, ENOMEM,
1120                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1121                                           NULL, "cannot create action");
1122         }
1123         rte_atomic32_init(&cache_resource->refcnt);
1124         rte_atomic32_inc(&cache_resource->refcnt);
1125         LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next);
1126         dev_flow->dv.encap_decap = cache_resource;
1127         DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
1128                 (void *)cache_resource,
1129                 rte_atomic32_read(&cache_resource->refcnt));
1130         return 0;
1131 }
1132
1133 /**
1134  * Find existing table jump resource or create and register a new one.
1135  *
1136  * @param dev[in, out]
1137  *   Pointer to rte_eth_dev structure.
1138  * @param[in, out] resource
1139  *   Pointer to jump table resource.
1140  * @parm[in, out] dev_flow
1141  *   Pointer to the dev_flow.
1142  * @param[out] error
1143  *   pointer to error structure.
1144  *
1145  * @return
1146  *   0 on success otherwise -errno and errno is set.
1147  */
1148 static int
1149 flow_dv_jump_tbl_resource_register
1150                         (struct rte_eth_dev *dev,
1151                          struct mlx5_flow_dv_jump_tbl_resource *resource,
1152                          struct mlx5_flow *dev_flow,
1153                          struct rte_flow_error *error)
1154 {
1155         struct mlx5_priv *priv = dev->data->dev_private;
1156         struct mlx5_ibv_shared *sh = priv->sh;
1157         struct mlx5_flow_dv_jump_tbl_resource *cache_resource;
1158
1159         /* Lookup a matching resource from cache. */
1160         LIST_FOREACH(cache_resource, &sh->jump_tbl, next) {
1161                 if (resource->tbl == cache_resource->tbl) {
1162                         DRV_LOG(DEBUG, "jump table resource resource %p: refcnt %d++",
1163                                 (void *)cache_resource,
1164                                 rte_atomic32_read(&cache_resource->refcnt));
1165                         rte_atomic32_inc(&cache_resource->refcnt);
1166                         dev_flow->dv.jump = cache_resource;
1167                         return 0;
1168                 }
1169         }
1170         /* Register new jump table resource. */
1171         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1172         if (!cache_resource)
1173                 return rte_flow_error_set(error, ENOMEM,
1174                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1175                                           "cannot allocate resource memory");
1176         *cache_resource = *resource;
1177         cache_resource->action =
1178                 mlx5_glue->dr_create_flow_action_dest_flow_tbl
1179                 (resource->tbl->obj);
1180         if (!cache_resource->action) {
1181                 rte_free(cache_resource);
1182                 return rte_flow_error_set(error, ENOMEM,
1183                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1184                                           NULL, "cannot create action");
1185         }
1186         rte_atomic32_init(&cache_resource->refcnt);
1187         rte_atomic32_inc(&cache_resource->refcnt);
1188         LIST_INSERT_HEAD(&sh->jump_tbl, cache_resource, next);
1189         dev_flow->dv.jump = cache_resource;
1190         DRV_LOG(DEBUG, "new jump table  resource %p: refcnt %d++",
1191                 (void *)cache_resource,
1192                 rte_atomic32_read(&cache_resource->refcnt));
1193         return 0;
1194 }
1195
1196 /**
1197  * Find existing table port ID resource or create and register a new one.
1198  *
1199  * @param dev[in, out]
1200  *   Pointer to rte_eth_dev structure.
1201  * @param[in, out] resource
1202  *   Pointer to port ID action resource.
1203  * @parm[in, out] dev_flow
1204  *   Pointer to the dev_flow.
1205  * @param[out] error
1206  *   pointer to error structure.
1207  *
1208  * @return
1209  *   0 on success otherwise -errno and errno is set.
1210  */
1211 static int
1212 flow_dv_port_id_action_resource_register
1213                         (struct rte_eth_dev *dev,
1214                          struct mlx5_flow_dv_port_id_action_resource *resource,
1215                          struct mlx5_flow *dev_flow,
1216                          struct rte_flow_error *error)
1217 {
1218         struct mlx5_priv *priv = dev->data->dev_private;
1219         struct mlx5_ibv_shared *sh = priv->sh;
1220         struct mlx5_flow_dv_port_id_action_resource *cache_resource;
1221
1222         /* Lookup a matching resource from cache. */
1223         LIST_FOREACH(cache_resource, &sh->port_id_action_list, next) {
1224                 if (resource->port_id == cache_resource->port_id) {
1225                         DRV_LOG(DEBUG, "port id action resource resource %p: "
1226                                 "refcnt %d++",
1227                                 (void *)cache_resource,
1228                                 rte_atomic32_read(&cache_resource->refcnt));
1229                         rte_atomic32_inc(&cache_resource->refcnt);
1230                         dev_flow->dv.port_id_action = cache_resource;
1231                         return 0;
1232                 }
1233         }
1234         /* Register new port id action resource. */
1235         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1236         if (!cache_resource)
1237                 return rte_flow_error_set(error, ENOMEM,
1238                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1239                                           "cannot allocate resource memory");
1240         *cache_resource = *resource;
1241         cache_resource->action =
1242                 mlx5_glue->dr_create_flow_action_dest_vport
1243                         (priv->sh->fdb_domain, resource->port_id);
1244         if (!cache_resource->action) {
1245                 rte_free(cache_resource);
1246                 return rte_flow_error_set(error, ENOMEM,
1247                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1248                                           NULL, "cannot create action");
1249         }
1250         rte_atomic32_init(&cache_resource->refcnt);
1251         rte_atomic32_inc(&cache_resource->refcnt);
1252         LIST_INSERT_HEAD(&sh->port_id_action_list, cache_resource, next);
1253         dev_flow->dv.port_id_action = cache_resource;
1254         DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
1255                 (void *)cache_resource,
1256                 rte_atomic32_read(&cache_resource->refcnt));
1257         return 0;
1258 }
1259
1260 /**
1261  * Get the size of specific rte_flow_item_type
1262  *
1263  * @param[in] item_type
1264  *   Tested rte_flow_item_type.
1265  *
1266  * @return
1267  *   sizeof struct item_type, 0 if void or irrelevant.
1268  */
1269 static size_t
1270 flow_dv_get_item_len(const enum rte_flow_item_type item_type)
1271 {
1272         size_t retval;
1273
1274         switch (item_type) {
1275         case RTE_FLOW_ITEM_TYPE_ETH:
1276                 retval = sizeof(struct rte_flow_item_eth);
1277                 break;
1278         case RTE_FLOW_ITEM_TYPE_VLAN:
1279                 retval = sizeof(struct rte_flow_item_vlan);
1280                 break;
1281         case RTE_FLOW_ITEM_TYPE_IPV4:
1282                 retval = sizeof(struct rte_flow_item_ipv4);
1283                 break;
1284         case RTE_FLOW_ITEM_TYPE_IPV6:
1285                 retval = sizeof(struct rte_flow_item_ipv6);
1286                 break;
1287         case RTE_FLOW_ITEM_TYPE_UDP:
1288                 retval = sizeof(struct rte_flow_item_udp);
1289                 break;
1290         case RTE_FLOW_ITEM_TYPE_TCP:
1291                 retval = sizeof(struct rte_flow_item_tcp);
1292                 break;
1293         case RTE_FLOW_ITEM_TYPE_VXLAN:
1294                 retval = sizeof(struct rte_flow_item_vxlan);
1295                 break;
1296         case RTE_FLOW_ITEM_TYPE_GRE:
1297                 retval = sizeof(struct rte_flow_item_gre);
1298                 break;
1299         case RTE_FLOW_ITEM_TYPE_NVGRE:
1300                 retval = sizeof(struct rte_flow_item_nvgre);
1301                 break;
1302         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1303                 retval = sizeof(struct rte_flow_item_vxlan_gpe);
1304                 break;
1305         case RTE_FLOW_ITEM_TYPE_MPLS:
1306                 retval = sizeof(struct rte_flow_item_mpls);
1307                 break;
1308         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
1309         default:
1310                 retval = 0;
1311                 break;
1312         }
1313         return retval;
1314 }
1315
1316 #define MLX5_ENCAP_IPV4_VERSION         0x40
1317 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
1318 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
1319 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
1320 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
1321 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
1322 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
1323
1324 /**
1325  * Convert the encap action data from list of rte_flow_item to raw buffer
1326  *
1327  * @param[in] items
1328  *   Pointer to rte_flow_item objects list.
1329  * @param[out] buf
1330  *   Pointer to the output buffer.
1331  * @param[out] size
1332  *   Pointer to the output buffer size.
1333  * @param[out] error
1334  *   Pointer to the error structure.
1335  *
1336  * @return
1337  *   0 on success, a negative errno value otherwise and rte_errno is set.
1338  */
1339 static int
1340 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
1341                            size_t *size, struct rte_flow_error *error)
1342 {
1343         struct rte_ether_hdr *eth = NULL;
1344         struct rte_vlan_hdr *vlan = NULL;
1345         struct rte_ipv4_hdr *ipv4 = NULL;
1346         struct rte_ipv6_hdr *ipv6 = NULL;
1347         struct rte_udp_hdr *udp = NULL;
1348         struct rte_vxlan_hdr *vxlan = NULL;
1349         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
1350         struct rte_gre_hdr *gre = NULL;
1351         size_t len;
1352         size_t temp_size = 0;
1353
1354         if (!items)
1355                 return rte_flow_error_set(error, EINVAL,
1356                                           RTE_FLOW_ERROR_TYPE_ACTION,
1357                                           NULL, "invalid empty data");
1358         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1359                 len = flow_dv_get_item_len(items->type);
1360                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
1361                         return rte_flow_error_set(error, EINVAL,
1362                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1363                                                   (void *)items->type,
1364                                                   "items total size is too big"
1365                                                   " for encap action");
1366                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
1367                 switch (items->type) {
1368                 case RTE_FLOW_ITEM_TYPE_ETH:
1369                         eth = (struct rte_ether_hdr *)&buf[temp_size];
1370                         break;
1371                 case RTE_FLOW_ITEM_TYPE_VLAN:
1372                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
1373                         if (!eth)
1374                                 return rte_flow_error_set(error, EINVAL,
1375                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1376                                                 (void *)items->type,
1377                                                 "eth header not found");
1378                         if (!eth->ether_type)
1379                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
1380                         break;
1381                 case RTE_FLOW_ITEM_TYPE_IPV4:
1382                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
1383                         if (!vlan && !eth)
1384                                 return rte_flow_error_set(error, EINVAL,
1385                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1386                                                 (void *)items->type,
1387                                                 "neither eth nor vlan"
1388                                                 " header found");
1389                         if (vlan && !vlan->eth_proto)
1390                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
1391                         else if (eth && !eth->ether_type)
1392                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
1393                         if (!ipv4->version_ihl)
1394                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
1395                                                     MLX5_ENCAP_IPV4_IHL_MIN;
1396                         if (!ipv4->time_to_live)
1397                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
1398                         break;
1399                 case RTE_FLOW_ITEM_TYPE_IPV6:
1400                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
1401                         if (!vlan && !eth)
1402                                 return rte_flow_error_set(error, EINVAL,
1403                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1404                                                 (void *)items->type,
1405                                                 "neither eth nor vlan"
1406                                                 " header found");
1407                         if (vlan && !vlan->eth_proto)
1408                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
1409                         else if (eth && !eth->ether_type)
1410                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
1411                         if (!ipv6->vtc_flow)
1412                                 ipv6->vtc_flow =
1413                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
1414                         if (!ipv6->hop_limits)
1415                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
1416                         break;
1417                 case RTE_FLOW_ITEM_TYPE_UDP:
1418                         udp = (struct rte_udp_hdr *)&buf[temp_size];
1419                         if (!ipv4 && !ipv6)
1420                                 return rte_flow_error_set(error, EINVAL,
1421                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1422                                                 (void *)items->type,
1423                                                 "ip header not found");
1424                         if (ipv4 && !ipv4->next_proto_id)
1425                                 ipv4->next_proto_id = IPPROTO_UDP;
1426                         else if (ipv6 && !ipv6->proto)
1427                                 ipv6->proto = IPPROTO_UDP;
1428                         break;
1429                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1430                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
1431                         if (!udp)
1432                                 return rte_flow_error_set(error, EINVAL,
1433                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1434                                                 (void *)items->type,
1435                                                 "udp header not found");
1436                         if (!udp->dst_port)
1437                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
1438                         if (!vxlan->vx_flags)
1439                                 vxlan->vx_flags =
1440                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
1441                         break;
1442                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1443                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
1444                         if (!udp)
1445                                 return rte_flow_error_set(error, EINVAL,
1446                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1447                                                 (void *)items->type,
1448                                                 "udp header not found");
1449                         if (!vxlan_gpe->proto)
1450                                 return rte_flow_error_set(error, EINVAL,
1451                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1452                                                 (void *)items->type,
1453                                                 "next protocol not found");
1454                         if (!udp->dst_port)
1455                                 udp->dst_port =
1456                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
1457                         if (!vxlan_gpe->vx_flags)
1458                                 vxlan_gpe->vx_flags =
1459                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
1460                         break;
1461                 case RTE_FLOW_ITEM_TYPE_GRE:
1462                 case RTE_FLOW_ITEM_TYPE_NVGRE:
1463                         gre = (struct rte_gre_hdr *)&buf[temp_size];
1464                         if (!gre->proto)
1465                                 return rte_flow_error_set(error, EINVAL,
1466                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1467                                                 (void *)items->type,
1468                                                 "next protocol not found");
1469                         if (!ipv4 && !ipv6)
1470                                 return rte_flow_error_set(error, EINVAL,
1471                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1472                                                 (void *)items->type,
1473                                                 "ip header not found");
1474                         if (ipv4 && !ipv4->next_proto_id)
1475                                 ipv4->next_proto_id = IPPROTO_GRE;
1476                         else if (ipv6 && !ipv6->proto)
1477                                 ipv6->proto = IPPROTO_GRE;
1478                         break;
1479                 case RTE_FLOW_ITEM_TYPE_VOID:
1480                         break;
1481                 default:
1482                         return rte_flow_error_set(error, EINVAL,
1483                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1484                                                   (void *)items->type,
1485                                                   "unsupported item type");
1486                         break;
1487                 }
1488                 temp_size += len;
1489         }
1490         *size = temp_size;
1491         return 0;
1492 }
1493
1494 static int
1495 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
1496 {
1497         struct rte_ether_hdr *eth = NULL;
1498         struct rte_vlan_hdr *vlan = NULL;
1499         struct rte_ipv6_hdr *ipv6 = NULL;
1500         struct rte_udp_hdr *udp = NULL;
1501         char *next_hdr;
1502         uint16_t proto;
1503
1504         eth = (struct rte_ether_hdr *)data;
1505         next_hdr = (char *)(eth + 1);
1506         proto = RTE_BE16(eth->ether_type);
1507
1508         /* VLAN skipping */
1509         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
1510                 next_hdr += sizeof(struct rte_vlan_hdr);
1511                 vlan = (struct rte_vlan_hdr *)next_hdr;
1512                 proto = RTE_BE16(vlan->eth_proto);
1513         }
1514
1515         /* HW calculates IPv4 csum. no need to proceed */
1516         if (proto == RTE_ETHER_TYPE_IPV4)
1517                 return 0;
1518
1519         /* non IPv4/IPv6 header. not supported */
1520         if (proto != RTE_ETHER_TYPE_IPV6) {
1521                 return rte_flow_error_set(error, ENOTSUP,
1522                                           RTE_FLOW_ERROR_TYPE_ACTION,
1523                                           NULL, "Cannot offload non IPv4/IPv6");
1524         }
1525
1526         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
1527
1528         /* ignore non UDP */
1529         if (ipv6->proto != IPPROTO_UDP)
1530                 return 0;
1531
1532         udp = (struct rte_udp_hdr *)(ipv6 + 1);
1533         udp->dgram_cksum = 0;
1534
1535         return 0;
1536 }
1537
1538 /**
1539  * Convert L2 encap action to DV specification.
1540  *
1541  * @param[in] dev
1542  *   Pointer to rte_eth_dev structure.
1543  * @param[in] action
1544  *   Pointer to action structure.
1545  * @param[in, out] dev_flow
1546  *   Pointer to the mlx5_flow.
1547  * @param[in] transfer
1548  *   Mark if the flow is E-Switch flow.
1549  * @param[out] error
1550  *   Pointer to the error structure.
1551  *
1552  * @return
1553  *   0 on success, a negative errno value otherwise and rte_errno is set.
1554  */
1555 static int
1556 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
1557                                const struct rte_flow_action *action,
1558                                struct mlx5_flow *dev_flow,
1559                                uint8_t transfer,
1560                                struct rte_flow_error *error)
1561 {
1562         const struct rte_flow_item *encap_data;
1563         const struct rte_flow_action_raw_encap *raw_encap_data;
1564         struct mlx5_flow_dv_encap_decap_resource res = {
1565                 .reformat_type =
1566                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
1567                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
1568                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
1569         };
1570
1571         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
1572                 raw_encap_data =
1573                         (const struct rte_flow_action_raw_encap *)action->conf;
1574                 res.size = raw_encap_data->size;
1575                 memcpy(res.buf, raw_encap_data->data, res.size);
1576                 if (flow_dv_zero_encap_udp_csum(res.buf, error))
1577                         return -rte_errno;
1578         } else {
1579                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
1580                         encap_data =
1581                                 ((const struct rte_flow_action_vxlan_encap *)
1582                                                 action->conf)->definition;
1583                 else
1584                         encap_data =
1585                                 ((const struct rte_flow_action_nvgre_encap *)
1586                                                 action->conf)->definition;
1587                 if (flow_dv_convert_encap_data(encap_data, res.buf,
1588                                                &res.size, error))
1589                         return -rte_errno;
1590         }
1591         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1592                 return rte_flow_error_set(error, EINVAL,
1593                                           RTE_FLOW_ERROR_TYPE_ACTION,
1594                                           NULL, "can't create L2 encap action");
1595         return 0;
1596 }
1597
1598 /**
1599  * Convert L2 decap action to DV specification.
1600  *
1601  * @param[in] dev
1602  *   Pointer to rte_eth_dev structure.
1603  * @param[in, out] dev_flow
1604  *   Pointer to the mlx5_flow.
1605  * @param[in] transfer
1606  *   Mark if the flow is E-Switch flow.
1607  * @param[out] error
1608  *   Pointer to the error structure.
1609  *
1610  * @return
1611  *   0 on success, a negative errno value otherwise and rte_errno is set.
1612  */
1613 static int
1614 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
1615                                struct mlx5_flow *dev_flow,
1616                                uint8_t transfer,
1617                                struct rte_flow_error *error)
1618 {
1619         struct mlx5_flow_dv_encap_decap_resource res = {
1620                 .size = 0,
1621                 .reformat_type =
1622                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
1623                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
1624                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
1625         };
1626
1627         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1628                 return rte_flow_error_set(error, EINVAL,
1629                                           RTE_FLOW_ERROR_TYPE_ACTION,
1630                                           NULL, "can't create L2 decap action");
1631         return 0;
1632 }
1633
1634 /**
1635  * Convert raw decap/encap (L3 tunnel) action to DV specification.
1636  *
1637  * @param[in] dev
1638  *   Pointer to rte_eth_dev structure.
1639  * @param[in] action
1640  *   Pointer to action structure.
1641  * @param[in, out] dev_flow
1642  *   Pointer to the mlx5_flow.
1643  * @param[in] attr
1644  *   Pointer to the flow attributes.
1645  * @param[out] error
1646  *   Pointer to the error structure.
1647  *
1648  * @return
1649  *   0 on success, a negative errno value otherwise and rte_errno is set.
1650  */
1651 static int
1652 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
1653                                 const struct rte_flow_action *action,
1654                                 struct mlx5_flow *dev_flow,
1655                                 const struct rte_flow_attr *attr,
1656                                 struct rte_flow_error *error)
1657 {
1658         const struct rte_flow_action_raw_encap *encap_data;
1659         struct mlx5_flow_dv_encap_decap_resource res;
1660
1661         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
1662         res.size = encap_data->size;
1663         memcpy(res.buf, encap_data->data, res.size);
1664         res.reformat_type = attr->egress ?
1665                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL :
1666                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
1667         if (attr->transfer)
1668                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
1669         else
1670                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
1671                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
1672         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1673                 return rte_flow_error_set(error, EINVAL,
1674                                           RTE_FLOW_ERROR_TYPE_ACTION,
1675                                           NULL, "can't create encap action");
1676         return 0;
1677 }
1678
1679 /**
1680  * Validate the modify-header actions.
1681  *
1682  * @param[in] action_flags
1683  *   Holds the actions detected until now.
1684  * @param[in] action
1685  *   Pointer to the modify action.
1686  * @param[out] error
1687  *   Pointer to error structure.
1688  *
1689  * @return
1690  *   0 on success, a negative errno value otherwise and rte_errno is set.
1691  */
1692 static int
1693 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
1694                                    const struct rte_flow_action *action,
1695                                    struct rte_flow_error *error)
1696 {
1697         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
1698                 return rte_flow_error_set(error, EINVAL,
1699                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1700                                           NULL, "action configuration not set");
1701         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
1702                 return rte_flow_error_set(error, EINVAL,
1703                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1704                                           "can't have encap action before"
1705                                           " modify action");
1706         return 0;
1707 }
1708
1709 /**
1710  * Validate the modify-header MAC address actions.
1711  *
1712  * @param[in] action_flags
1713  *   Holds the actions detected until now.
1714  * @param[in] action
1715  *   Pointer to the modify action.
1716  * @param[in] item_flags
1717  *   Holds the items detected.
1718  * @param[out] error
1719  *   Pointer to error structure.
1720  *
1721  * @return
1722  *   0 on success, a negative errno value otherwise and rte_errno is set.
1723  */
1724 static int
1725 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
1726                                    const struct rte_flow_action *action,
1727                                    const uint64_t item_flags,
1728                                    struct rte_flow_error *error)
1729 {
1730         int ret = 0;
1731
1732         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1733         if (!ret) {
1734                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
1735                         return rte_flow_error_set(error, EINVAL,
1736                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1737                                                   NULL,
1738                                                   "no L2 item in pattern");
1739         }
1740         return ret;
1741 }
1742
1743 /**
1744  * Validate the modify-header IPv4 address actions.
1745  *
1746  * @param[in] action_flags
1747  *   Holds the actions detected until now.
1748  * @param[in] action
1749  *   Pointer to the modify action.
1750  * @param[in] item_flags
1751  *   Holds the items detected.
1752  * @param[out] error
1753  *   Pointer to error structure.
1754  *
1755  * @return
1756  *   0 on success, a negative errno value otherwise and rte_errno is set.
1757  */
1758 static int
1759 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
1760                                     const struct rte_flow_action *action,
1761                                     const uint64_t item_flags,
1762                                     struct rte_flow_error *error)
1763 {
1764         int ret = 0;
1765
1766         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1767         if (!ret) {
1768                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
1769                         return rte_flow_error_set(error, EINVAL,
1770                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1771                                                   NULL,
1772                                                   "no ipv4 item in pattern");
1773         }
1774         return ret;
1775 }
1776
1777 /**
1778  * Validate the modify-header IPv6 address actions.
1779  *
1780  * @param[in] action_flags
1781  *   Holds the actions detected until now.
1782  * @param[in] action
1783  *   Pointer to the modify action.
1784  * @param[in] item_flags
1785  *   Holds the items detected.
1786  * @param[out] error
1787  *   Pointer to error structure.
1788  *
1789  * @return
1790  *   0 on success, a negative errno value otherwise and rte_errno is set.
1791  */
1792 static int
1793 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
1794                                     const struct rte_flow_action *action,
1795                                     const uint64_t item_flags,
1796                                     struct rte_flow_error *error)
1797 {
1798         int ret = 0;
1799
1800         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1801         if (!ret) {
1802                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
1803                         return rte_flow_error_set(error, EINVAL,
1804                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1805                                                   NULL,
1806                                                   "no ipv6 item in pattern");
1807         }
1808         return ret;
1809 }
1810
1811 /**
1812  * Validate the modify-header TP actions.
1813  *
1814  * @param[in] action_flags
1815  *   Holds the actions detected until now.
1816  * @param[in] action
1817  *   Pointer to the modify action.
1818  * @param[in] item_flags
1819  *   Holds the items detected.
1820  * @param[out] error
1821  *   Pointer to error structure.
1822  *
1823  * @return
1824  *   0 on success, a negative errno value otherwise and rte_errno is set.
1825  */
1826 static int
1827 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
1828                                   const struct rte_flow_action *action,
1829                                   const uint64_t item_flags,
1830                                   struct rte_flow_error *error)
1831 {
1832         int ret = 0;
1833
1834         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1835         if (!ret) {
1836                 if (!(item_flags & MLX5_FLOW_LAYER_L4))
1837                         return rte_flow_error_set(error, EINVAL,
1838                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1839                                                   NULL, "no transport layer "
1840                                                   "in pattern");
1841         }
1842         return ret;
1843 }
1844
1845 /**
1846  * Validate the modify-header actions of increment/decrement
1847  * TCP Sequence-number.
1848  *
1849  * @param[in] action_flags
1850  *   Holds the actions detected until now.
1851  * @param[in] action
1852  *   Pointer to the modify action.
1853  * @param[in] item_flags
1854  *   Holds the items detected.
1855  * @param[out] error
1856  *   Pointer to error structure.
1857  *
1858  * @return
1859  *   0 on success, a negative errno value otherwise and rte_errno is set.
1860  */
1861 static int
1862 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
1863                                        const struct rte_flow_action *action,
1864                                        const uint64_t item_flags,
1865                                        struct rte_flow_error *error)
1866 {
1867         int ret = 0;
1868
1869         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1870         if (!ret) {
1871                 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
1872                         return rte_flow_error_set(error, EINVAL,
1873                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1874                                                   NULL, "no TCP item in"
1875                                                   " pattern");
1876                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
1877                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
1878                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
1879                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
1880                         return rte_flow_error_set(error, EINVAL,
1881                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1882                                                   NULL,
1883                                                   "cannot decrease and increase"
1884                                                   " TCP sequence number"
1885                                                   " at the same time");
1886         }
1887         return ret;
1888 }
1889
1890 /**
1891  * Validate the modify-header actions of increment/decrement
1892  * TCP Acknowledgment number.
1893  *
1894  * @param[in] action_flags
1895  *   Holds the actions detected until now.
1896  * @param[in] action
1897  *   Pointer to the modify action.
1898  * @param[in] item_flags
1899  *   Holds the items detected.
1900  * @param[out] error
1901  *   Pointer to error structure.
1902  *
1903  * @return
1904  *   0 on success, a negative errno value otherwise and rte_errno is set.
1905  */
1906 static int
1907 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
1908                                        const struct rte_flow_action *action,
1909                                        const uint64_t item_flags,
1910                                        struct rte_flow_error *error)
1911 {
1912         int ret = 0;
1913
1914         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1915         if (!ret) {
1916                 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
1917                         return rte_flow_error_set(error, EINVAL,
1918                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1919                                                   NULL, "no TCP item in"
1920                                                   " pattern");
1921                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
1922                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
1923                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
1924                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
1925                         return rte_flow_error_set(error, EINVAL,
1926                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1927                                                   NULL,
1928                                                   "cannot decrease and increase"
1929                                                   " TCP acknowledgment number"
1930                                                   " at the same time");
1931         }
1932         return ret;
1933 }
1934
1935 /**
1936  * Validate the modify-header TTL actions.
1937  *
1938  * @param[in] action_flags
1939  *   Holds the actions detected until now.
1940  * @param[in] action
1941  *   Pointer to the modify action.
1942  * @param[in] item_flags
1943  *   Holds the items detected.
1944  * @param[out] error
1945  *   Pointer to error structure.
1946  *
1947  * @return
1948  *   0 on success, a negative errno value otherwise and rte_errno is set.
1949  */
1950 static int
1951 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
1952                                    const struct rte_flow_action *action,
1953                                    const uint64_t item_flags,
1954                                    struct rte_flow_error *error)
1955 {
1956         int ret = 0;
1957
1958         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1959         if (!ret) {
1960                 if (!(item_flags & MLX5_FLOW_LAYER_L3))
1961                         return rte_flow_error_set(error, EINVAL,
1962                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1963                                                   NULL,
1964                                                   "no IP protocol in pattern");
1965         }
1966         return ret;
1967 }
1968
1969 /**
1970  * Validate jump action.
1971  *
1972  * @param[in] action
1973  *   Pointer to the modify action.
1974  * @param[in] group
1975  *   The group of the current flow.
1976  * @param[out] error
1977  *   Pointer to error structure.
1978  *
1979  * @return
1980  *   0 on success, a negative errno value otherwise and rte_errno is set.
1981  */
1982 static int
1983 flow_dv_validate_action_jump(const struct rte_flow_action *action,
1984                              uint32_t group,
1985                              struct rte_flow_error *error)
1986 {
1987         if (action->type != RTE_FLOW_ACTION_TYPE_JUMP && !action->conf)
1988                 return rte_flow_error_set(error, EINVAL,
1989                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1990                                           NULL, "action configuration not set");
1991         if (group >= ((const struct rte_flow_action_jump *)action->conf)->group)
1992                 return rte_flow_error_set(error, EINVAL,
1993                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1994                                           "target group must be higher then"
1995                                           " the current flow group");
1996         return 0;
1997 }
1998
1999 /*
2000  * Validate the port_id action.
2001  *
2002  * @param[in] dev
2003  *   Pointer to rte_eth_dev structure.
2004  * @param[in] action_flags
2005  *   Bit-fields that holds the actions detected until now.
2006  * @param[in] action
2007  *   Port_id RTE action structure.
2008  * @param[in] attr
2009  *   Attributes of flow that includes this action.
2010  * @param[out] error
2011  *   Pointer to error structure.
2012  *
2013  * @return
2014  *   0 on success, a negative errno value otherwise and rte_errno is set.
2015  */
2016 static int
2017 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
2018                                 uint64_t action_flags,
2019                                 const struct rte_flow_action *action,
2020                                 const struct rte_flow_attr *attr,
2021                                 struct rte_flow_error *error)
2022 {
2023         const struct rte_flow_action_port_id *port_id;
2024         uint16_t port;
2025         uint16_t esw_domain_id;
2026         uint16_t act_port_domain_id;
2027         int ret;
2028
2029         if (!attr->transfer)
2030                 return rte_flow_error_set(error, ENOTSUP,
2031                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2032                                           NULL,
2033                                           "port id action is valid in transfer"
2034                                           " mode only");
2035         if (!action || !action->conf)
2036                 return rte_flow_error_set(error, ENOTSUP,
2037                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2038                                           NULL,
2039                                           "port id action parameters must be"
2040                                           " specified");
2041         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
2042                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
2043                 return rte_flow_error_set(error, EINVAL,
2044                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2045                                           "can have only one fate actions in"
2046                                           " a flow");
2047         ret = mlx5_port_to_eswitch_info(dev->data->port_id,
2048                                         &esw_domain_id, NULL);
2049         if (ret < 0)
2050                 return rte_flow_error_set(error, -ret,
2051                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2052                                           NULL,
2053                                           "failed to obtain E-Switch info");
2054         port_id = action->conf;
2055         port = port_id->original ? dev->data->port_id : port_id->id;
2056         ret = mlx5_port_to_eswitch_info(port, &act_port_domain_id, NULL);
2057         if (ret)
2058                 return rte_flow_error_set
2059                                 (error, -ret,
2060                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
2061                                  "failed to obtain E-Switch port id for port");
2062         if (act_port_domain_id != esw_domain_id)
2063                 return rte_flow_error_set
2064                                 (error, -ret,
2065                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2066                                  "port does not belong to"
2067                                  " E-Switch being configured");
2068         return 0;
2069 }
2070
2071 /**
2072  * Find existing modify-header resource or create and register a new one.
2073  *
2074  * @param dev[in, out]
2075  *   Pointer to rte_eth_dev structure.
2076  * @param[in, out] resource
2077  *   Pointer to modify-header resource.
2078  * @parm[in, out] dev_flow
2079  *   Pointer to the dev_flow.
2080  * @param[out] error
2081  *   pointer to error structure.
2082  *
2083  * @return
2084  *   0 on success otherwise -errno and errno is set.
2085  */
2086 static int
2087 flow_dv_modify_hdr_resource_register
2088                         (struct rte_eth_dev *dev,
2089                          struct mlx5_flow_dv_modify_hdr_resource *resource,
2090                          struct mlx5_flow *dev_flow,
2091                          struct rte_flow_error *error)
2092 {
2093         struct mlx5_priv *priv = dev->data->dev_private;
2094         struct mlx5_ibv_shared *sh = priv->sh;
2095         struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
2096         struct mlx5dv_dr_domain *ns;
2097
2098         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
2099                 ns = sh->fdb_domain;
2100         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
2101                 ns = sh->tx_domain;
2102         else
2103                 ns = sh->rx_domain;
2104         resource->flags =
2105                 dev_flow->flow->group ? 0 : MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
2106         /* Lookup a matching resource from cache. */
2107         LIST_FOREACH(cache_resource, &sh->modify_cmds, next) {
2108                 if (resource->ft_type == cache_resource->ft_type &&
2109                     resource->actions_num == cache_resource->actions_num &&
2110                     resource->flags == cache_resource->flags &&
2111                     !memcmp((const void *)resource->actions,
2112                             (const void *)cache_resource->actions,
2113                             (resource->actions_num *
2114                                             sizeof(resource->actions[0])))) {
2115                         DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
2116                                 (void *)cache_resource,
2117                                 rte_atomic32_read(&cache_resource->refcnt));
2118                         rte_atomic32_inc(&cache_resource->refcnt);
2119                         dev_flow->dv.modify_hdr = cache_resource;
2120                         return 0;
2121                 }
2122         }
2123         /* Register new modify-header resource. */
2124         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
2125         if (!cache_resource)
2126                 return rte_flow_error_set(error, ENOMEM,
2127                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2128                                           "cannot allocate resource memory");
2129         *cache_resource = *resource;
2130         cache_resource->verbs_action =
2131                 mlx5_glue->dv_create_flow_action_modify_header
2132                                         (sh->ctx, cache_resource->ft_type,
2133                                          ns, cache_resource->flags,
2134                                          cache_resource->actions_num *
2135                                          sizeof(cache_resource->actions[0]),
2136                                          (uint64_t *)cache_resource->actions);
2137         if (!cache_resource->verbs_action) {
2138                 rte_free(cache_resource);
2139                 return rte_flow_error_set(error, ENOMEM,
2140                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2141                                           NULL, "cannot create action");
2142         }
2143         rte_atomic32_init(&cache_resource->refcnt);
2144         rte_atomic32_inc(&cache_resource->refcnt);
2145         LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
2146         dev_flow->dv.modify_hdr = cache_resource;
2147         DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
2148                 (void *)cache_resource,
2149                 rte_atomic32_read(&cache_resource->refcnt));
2150         return 0;
2151 }
2152
2153 #define MLX5_CNT_CONTAINER_RESIZE 64
2154
2155 /**
2156  * Get or create a flow counter.
2157  *
2158  * @param[in] dev
2159  *   Pointer to the Ethernet device structure.
2160  * @param[in] shared
2161  *   Indicate if this counter is shared with other flows.
2162  * @param[in] id
2163  *   Counter identifier.
2164  *
2165  * @return
2166  *   pointer to flow counter on success, NULL otherwise and rte_errno is set.
2167  */
2168 static struct mlx5_flow_counter *
2169 flow_dv_counter_alloc_fallback(struct rte_eth_dev *dev, uint32_t shared,
2170                                uint32_t id)
2171 {
2172         struct mlx5_priv *priv = dev->data->dev_private;
2173         struct mlx5_flow_counter *cnt = NULL;
2174         struct mlx5_devx_obj *dcs = NULL;
2175
2176         if (!priv->config.devx) {
2177                 rte_errno = ENOTSUP;
2178                 return NULL;
2179         }
2180         if (shared) {
2181                 TAILQ_FOREACH(cnt, &priv->sh->cmng.flow_counters, next) {
2182                         if (cnt->shared && cnt->id == id) {
2183                                 cnt->ref_cnt++;
2184                                 return cnt;
2185                         }
2186                 }
2187         }
2188         dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
2189         if (!dcs)
2190                 return NULL;
2191         cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
2192         if (!cnt) {
2193                 claim_zero(mlx5_devx_cmd_destroy(cnt->dcs));
2194                 rte_errno = ENOMEM;
2195                 return NULL;
2196         }
2197         struct mlx5_flow_counter tmpl = {
2198                 .shared = shared,
2199                 .ref_cnt = 1,
2200                 .id = id,
2201                 .dcs = dcs,
2202         };
2203         tmpl.action = mlx5_glue->dv_create_flow_action_counter(dcs->obj, 0);
2204         if (!tmpl.action) {
2205                 claim_zero(mlx5_devx_cmd_destroy(cnt->dcs));
2206                 rte_errno = errno;
2207                 rte_free(cnt);
2208                 return NULL;
2209         }
2210         *cnt = tmpl;
2211         TAILQ_INSERT_HEAD(&priv->sh->cmng.flow_counters, cnt, next);
2212         return cnt;
2213 }
2214
2215 /**
2216  * Release a flow counter.
2217  *
2218  * @param[in] dev
2219  *   Pointer to the Ethernet device structure.
2220  * @param[in] counter
2221  *   Pointer to the counter handler.
2222  */
2223 static void
2224 flow_dv_counter_release_fallback(struct rte_eth_dev *dev,
2225                                  struct mlx5_flow_counter *counter)
2226 {
2227         struct mlx5_priv *priv = dev->data->dev_private;
2228
2229         if (!counter)
2230                 return;
2231         if (--counter->ref_cnt == 0) {
2232                 TAILQ_REMOVE(&priv->sh->cmng.flow_counters, counter, next);
2233                 claim_zero(mlx5_devx_cmd_destroy(counter->dcs));
2234                 rte_free(counter);
2235         }
2236 }
2237
2238 /**
2239  * Query a devx flow counter.
2240  *
2241  * @param[in] dev
2242  *   Pointer to the Ethernet device structure.
2243  * @param[in] cnt
2244  *   Pointer to the flow counter.
2245  * @param[out] pkts
2246  *   The statistics value of packets.
2247  * @param[out] bytes
2248  *   The statistics value of bytes.
2249  *
2250  * @return
2251  *   0 on success, otherwise a negative errno value and rte_errno is set.
2252  */
2253 static inline int
2254 _flow_dv_query_count_fallback(struct rte_eth_dev *dev __rte_unused,
2255                      struct mlx5_flow_counter *cnt, uint64_t *pkts,
2256                      uint64_t *bytes)
2257 {
2258         return mlx5_devx_cmd_flow_counter_query(cnt->dcs, 0, 0, pkts, bytes,
2259                                                 0, NULL, NULL, 0);
2260 }
2261
2262 /**
2263  * Get a pool by a counter.
2264  *
2265  * @param[in] cnt
2266  *   Pointer to the counter.
2267  *
2268  * @return
2269  *   The counter pool.
2270  */
2271 static struct mlx5_flow_counter_pool *
2272 flow_dv_counter_pool_get(struct mlx5_flow_counter *cnt)
2273 {
2274         if (!cnt->batch) {
2275                 cnt -= cnt->dcs->id % MLX5_COUNTERS_PER_POOL;
2276                 return (struct mlx5_flow_counter_pool *)cnt - 1;
2277         }
2278         return cnt->pool;
2279 }
2280
2281 /**
2282  * Get a pool by devx counter ID.
2283  *
2284  * @param[in] cont
2285  *   Pointer to the counter container.
2286  * @param[in] id
2287  *   The counter devx ID.
2288  *
2289  * @return
2290  *   The counter pool pointer if exists, NULL otherwise,
2291  */
2292 static struct mlx5_flow_counter_pool *
2293 flow_dv_find_pool_by_id(struct mlx5_pools_container *cont, int id)
2294 {
2295         struct mlx5_flow_counter_pool *pool;
2296
2297         TAILQ_FOREACH(pool, &cont->pool_list, next) {
2298                 int base = (pool->min_dcs->id / MLX5_COUNTERS_PER_POOL) *
2299                                 MLX5_COUNTERS_PER_POOL;
2300
2301                 if (id >= base && id < base + MLX5_COUNTERS_PER_POOL)
2302                         return pool;
2303         };
2304         return NULL;
2305 }
2306
2307 /**
2308  * Allocate a new memory for the counter values wrapped by all the needed
2309  * management.
2310  *
2311  * @param[in] dev
2312  *   Pointer to the Ethernet device structure.
2313  * @param[in] raws_n
2314  *   The raw memory areas - each one for MLX5_COUNTERS_PER_POOL counters.
2315  *
2316  * @return
2317  *   The new memory management pointer on success, otherwise NULL and rte_errno
2318  *   is set.
2319  */
2320 static struct mlx5_counter_stats_mem_mng *
2321 flow_dv_create_counter_stat_mem_mng(struct rte_eth_dev *dev, int raws_n)
2322 {
2323         struct mlx5_ibv_shared *sh = ((struct mlx5_priv *)
2324                                         (dev->data->dev_private))->sh;
2325         struct mlx5_devx_mkey_attr mkey_attr;
2326         struct mlx5_counter_stats_mem_mng *mem_mng;
2327         volatile struct flow_counter_stats *raw_data;
2328         int size = (sizeof(struct flow_counter_stats) *
2329                         MLX5_COUNTERS_PER_POOL +
2330                         sizeof(struct mlx5_counter_stats_raw)) * raws_n +
2331                         sizeof(struct mlx5_counter_stats_mem_mng);
2332         uint8_t *mem = rte_calloc(__func__, 1, size, sysconf(_SC_PAGESIZE));
2333         int i;
2334
2335         if (!mem) {
2336                 rte_errno = ENOMEM;
2337                 return NULL;
2338         }
2339         mem_mng = (struct mlx5_counter_stats_mem_mng *)(mem + size) - 1;
2340         size = sizeof(*raw_data) * MLX5_COUNTERS_PER_POOL * raws_n;
2341         mem_mng->umem = mlx5_glue->devx_umem_reg(sh->ctx, mem, size,
2342                                                  IBV_ACCESS_LOCAL_WRITE);
2343         if (!mem_mng->umem) {
2344                 rte_errno = errno;
2345                 rte_free(mem);
2346                 return NULL;
2347         }
2348         mkey_attr.addr = (uintptr_t)mem;
2349         mkey_attr.size = size;
2350         mkey_attr.umem_id = mem_mng->umem->umem_id;
2351         mkey_attr.pd = sh->pdn;
2352         mem_mng->dm = mlx5_devx_cmd_mkey_create(sh->ctx, &mkey_attr);
2353         if (!mem_mng->dm) {
2354                 mlx5_glue->devx_umem_dereg(mem_mng->umem);
2355                 rte_errno = errno;
2356                 rte_free(mem);
2357                 return NULL;
2358         }
2359         mem_mng->raws = (struct mlx5_counter_stats_raw *)(mem + size);
2360         raw_data = (volatile struct flow_counter_stats *)mem;
2361         for (i = 0; i < raws_n; ++i) {
2362                 mem_mng->raws[i].mem_mng = mem_mng;
2363                 mem_mng->raws[i].data = raw_data + i * MLX5_COUNTERS_PER_POOL;
2364         }
2365         LIST_INSERT_HEAD(&sh->cmng.mem_mngs, mem_mng, next);
2366         return mem_mng;
2367 }
2368
2369 /**
2370  * Resize a counter container.
2371  *
2372  * @param[in] dev
2373  *   Pointer to the Ethernet device structure.
2374  * @param[in] batch
2375  *   Whether the pool is for counter that was allocated by batch command.
2376  *
2377  * @return
2378  *   The new container pointer on success, otherwise NULL and rte_errno is set.
2379  */
2380 static struct mlx5_pools_container *
2381 flow_dv_container_resize(struct rte_eth_dev *dev, uint32_t batch)
2382 {
2383         struct mlx5_priv *priv = dev->data->dev_private;
2384         struct mlx5_pools_container *cont =
2385                         MLX5_CNT_CONTAINER(priv->sh, batch, 0);
2386         struct mlx5_pools_container *new_cont =
2387                         MLX5_CNT_CONTAINER_UNUSED(priv->sh, batch, 0);
2388         struct mlx5_counter_stats_mem_mng *mem_mng;
2389         uint32_t resize = cont->n + MLX5_CNT_CONTAINER_RESIZE;
2390         uint32_t mem_size = sizeof(struct mlx5_flow_counter_pool *) * resize;
2391         int i;
2392
2393         if (cont != MLX5_CNT_CONTAINER(priv->sh, batch, 1)) {
2394                 /* The last resize still hasn't detected by the host thread. */
2395                 rte_errno = EAGAIN;
2396                 return NULL;
2397         }
2398         new_cont->pools = rte_calloc(__func__, 1, mem_size, 0);
2399         if (!new_cont->pools) {
2400                 rte_errno = ENOMEM;
2401                 return NULL;
2402         }
2403         if (cont->n)
2404                 memcpy(new_cont->pools, cont->pools, cont->n *
2405                        sizeof(struct mlx5_flow_counter_pool *));
2406         mem_mng = flow_dv_create_counter_stat_mem_mng(dev,
2407                 MLX5_CNT_CONTAINER_RESIZE + MLX5_MAX_PENDING_QUERIES);
2408         if (!mem_mng) {
2409                 rte_free(new_cont->pools);
2410                 return NULL;
2411         }
2412         for (i = 0; i < MLX5_MAX_PENDING_QUERIES; ++i)
2413                 LIST_INSERT_HEAD(&priv->sh->cmng.free_stat_raws,
2414                                  mem_mng->raws + MLX5_CNT_CONTAINER_RESIZE +
2415                                  i, next);
2416         new_cont->n = resize;
2417         rte_atomic16_set(&new_cont->n_valid, rte_atomic16_read(&cont->n_valid));
2418         TAILQ_INIT(&new_cont->pool_list);
2419         TAILQ_CONCAT(&new_cont->pool_list, &cont->pool_list, next);
2420         new_cont->init_mem_mng = mem_mng;
2421         rte_cio_wmb();
2422          /* Flip the master container. */
2423         priv->sh->cmng.mhi[batch] ^= (uint8_t)1;
2424         return new_cont;
2425 }
2426
2427 /**
2428  * Query a devx flow counter.
2429  *
2430  * @param[in] dev
2431  *   Pointer to the Ethernet device structure.
2432  * @param[in] cnt
2433  *   Pointer to the flow counter.
2434  * @param[out] pkts
2435  *   The statistics value of packets.
2436  * @param[out] bytes
2437  *   The statistics value of bytes.
2438  *
2439  * @return
2440  *   0 on success, otherwise a negative errno value and rte_errno is set.
2441  */
2442 static inline int
2443 _flow_dv_query_count(struct rte_eth_dev *dev,
2444                      struct mlx5_flow_counter *cnt, uint64_t *pkts,
2445                      uint64_t *bytes)
2446 {
2447         struct mlx5_priv *priv = dev->data->dev_private;
2448         struct mlx5_flow_counter_pool *pool =
2449                         flow_dv_counter_pool_get(cnt);
2450         int offset = cnt - &pool->counters_raw[0];
2451
2452         if (priv->counter_fallback)
2453                 return _flow_dv_query_count_fallback(dev, cnt, pkts, bytes);
2454
2455         rte_spinlock_lock(&pool->sl);
2456         /*
2457          * The single counters allocation may allocate smaller ID than the
2458          * current allocated in parallel to the host reading.
2459          * In this case the new counter values must be reported as 0.
2460          */
2461         if (unlikely(!cnt->batch && cnt->dcs->id < pool->raw->min_dcs_id)) {
2462                 *pkts = 0;
2463                 *bytes = 0;
2464         } else {
2465                 *pkts = rte_be_to_cpu_64(pool->raw->data[offset].hits);
2466                 *bytes = rte_be_to_cpu_64(pool->raw->data[offset].bytes);
2467         }
2468         rte_spinlock_unlock(&pool->sl);
2469         return 0;
2470 }
2471
2472 /**
2473  * Create and initialize a new counter pool.
2474  *
2475  * @param[in] dev
2476  *   Pointer to the Ethernet device structure.
2477  * @param[out] dcs
2478  *   The devX counter handle.
2479  * @param[in] batch
2480  *   Whether the pool is for counter that was allocated by batch command.
2481  *
2482  * @return
2483  *   A new pool pointer on success, NULL otherwise and rte_errno is set.
2484  */
2485 static struct mlx5_flow_counter_pool *
2486 flow_dv_pool_create(struct rte_eth_dev *dev, struct mlx5_devx_obj *dcs,
2487                     uint32_t batch)
2488 {
2489         struct mlx5_priv *priv = dev->data->dev_private;
2490         struct mlx5_flow_counter_pool *pool;
2491         struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
2492                                                                0);
2493         int16_t n_valid = rte_atomic16_read(&cont->n_valid);
2494         uint32_t size;
2495
2496         if (cont->n == n_valid) {
2497                 cont = flow_dv_container_resize(dev, batch);
2498                 if (!cont)
2499                         return NULL;
2500         }
2501         size = sizeof(*pool) + MLX5_COUNTERS_PER_POOL *
2502                         sizeof(struct mlx5_flow_counter);
2503         pool = rte_calloc(__func__, 1, size, 0);
2504         if (!pool) {
2505                 rte_errno = ENOMEM;
2506                 return NULL;
2507         }
2508         pool->min_dcs = dcs;
2509         pool->raw = cont->init_mem_mng->raws + n_valid %
2510                                                      MLX5_CNT_CONTAINER_RESIZE;
2511         pool->raw_hw = NULL;
2512         rte_spinlock_init(&pool->sl);
2513         /*
2514          * The generation of the new allocated counters in this pool is 0, 2 in
2515          * the pool generation makes all the counters valid for allocation.
2516          */
2517         rte_atomic64_set(&pool->query_gen, 0x2);
2518         TAILQ_INIT(&pool->counters);
2519         TAILQ_INSERT_TAIL(&cont->pool_list, pool, next);
2520         cont->pools[n_valid] = pool;
2521         /* Pool initialization must be updated before host thread access. */
2522         rte_cio_wmb();
2523         rte_atomic16_add(&cont->n_valid, 1);
2524         return pool;
2525 }
2526
2527 /**
2528  * Prepare a new counter and/or a new counter pool.
2529  *
2530  * @param[in] dev
2531  *   Pointer to the Ethernet device structure.
2532  * @param[out] cnt_free
2533  *   Where to put the pointer of a new counter.
2534  * @param[in] batch
2535  *   Whether the pool is for counter that was allocated by batch command.
2536  *
2537  * @return
2538  *   The free counter pool pointer and @p cnt_free is set on success,
2539  *   NULL otherwise and rte_errno is set.
2540  */
2541 static struct mlx5_flow_counter_pool *
2542 flow_dv_counter_pool_prepare(struct rte_eth_dev *dev,
2543                              struct mlx5_flow_counter **cnt_free,
2544                              uint32_t batch)
2545 {
2546         struct mlx5_priv *priv = dev->data->dev_private;
2547         struct mlx5_flow_counter_pool *pool;
2548         struct mlx5_devx_obj *dcs = NULL;
2549         struct mlx5_flow_counter *cnt;
2550         uint32_t i;
2551
2552         if (!batch) {
2553                 /* bulk_bitmap must be 0 for single counter allocation. */
2554                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0);
2555                 if (!dcs)
2556                         return NULL;
2557                 pool = flow_dv_find_pool_by_id
2558                         (MLX5_CNT_CONTAINER(priv->sh, batch, 0), dcs->id);
2559                 if (!pool) {
2560                         pool = flow_dv_pool_create(dev, dcs, batch);
2561                         if (!pool) {
2562                                 mlx5_devx_cmd_destroy(dcs);
2563                                 return NULL;
2564                         }
2565                 } else if (dcs->id < pool->min_dcs->id) {
2566                         rte_atomic64_set(&pool->a64_dcs,
2567                                          (int64_t)(uintptr_t)dcs);
2568                 }
2569                 cnt = &pool->counters_raw[dcs->id % MLX5_COUNTERS_PER_POOL];
2570                 TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
2571                 cnt->dcs = dcs;
2572                 *cnt_free = cnt;
2573                 return pool;
2574         }
2575         /* bulk_bitmap is in 128 counters units. */
2576         if (priv->config.hca_attr.flow_counter_bulk_alloc_bitmap & 0x4)
2577                 dcs = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, 0x4);
2578         if (!dcs) {
2579                 rte_errno = ENODATA;
2580                 return NULL;
2581         }
2582         pool = flow_dv_pool_create(dev, dcs, batch);
2583         if (!pool) {
2584                 mlx5_devx_cmd_destroy(dcs);
2585                 return NULL;
2586         }
2587         for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
2588                 cnt = &pool->counters_raw[i];
2589                 cnt->pool = pool;
2590                 TAILQ_INSERT_HEAD(&pool->counters, cnt, next);
2591         }
2592         *cnt_free = &pool->counters_raw[0];
2593         return pool;
2594 }
2595
2596 /**
2597  * Search for existed shared counter.
2598  *
2599  * @param[in] cont
2600  *   Pointer to the relevant counter pool container.
2601  * @param[in] id
2602  *   The shared counter ID to search.
2603  *
2604  * @return
2605  *   NULL if not existed, otherwise pointer to the shared counter.
2606  */
2607 static struct mlx5_flow_counter *
2608 flow_dv_counter_shared_search(struct mlx5_pools_container *cont,
2609                               uint32_t id)
2610 {
2611         static struct mlx5_flow_counter *cnt;
2612         struct mlx5_flow_counter_pool *pool;
2613         int i;
2614
2615         TAILQ_FOREACH(pool, &cont->pool_list, next) {
2616                 for (i = 0; i < MLX5_COUNTERS_PER_POOL; ++i) {
2617                         cnt = &pool->counters_raw[i];
2618                         if (cnt->ref_cnt && cnt->shared && cnt->id == id)
2619                                 return cnt;
2620                 }
2621         }
2622         return NULL;
2623 }
2624
2625 /**
2626  * Allocate a flow counter.
2627  *
2628  * @param[in] dev
2629  *   Pointer to the Ethernet device structure.
2630  * @param[in] shared
2631  *   Indicate if this counter is shared with other flows.
2632  * @param[in] id
2633  *   Counter identifier.
2634  * @param[in] group
2635  *   Counter flow group.
2636  *
2637  * @return
2638  *   pointer to flow counter on success, NULL otherwise and rte_errno is set.
2639  */
2640 static struct mlx5_flow_counter *
2641 flow_dv_counter_alloc(struct rte_eth_dev *dev, uint32_t shared, uint32_t id,
2642                       uint16_t group)
2643 {
2644         struct mlx5_priv *priv = dev->data->dev_private;
2645         struct mlx5_flow_counter_pool *pool = NULL;
2646         struct mlx5_flow_counter *cnt_free = NULL;
2647         /*
2648          * Currently group 0 flow counter cannot be assigned to a flow if it is
2649          * not the first one in the batch counter allocation, so it is better
2650          * to allocate counters one by one for these flows in a separate
2651          * container.
2652          * A counter can be shared between different groups so need to take
2653          * shared counters from the single container.
2654          */
2655         uint32_t batch = (group && !shared) ? 1 : 0;
2656         struct mlx5_pools_container *cont = MLX5_CNT_CONTAINER(priv->sh, batch,
2657                                                                0);
2658
2659         if (priv->counter_fallback)
2660                 return flow_dv_counter_alloc_fallback(dev, shared, id);
2661         if (!priv->config.devx) {
2662                 rte_errno = ENOTSUP;
2663                 return NULL;
2664         }
2665         if (shared) {
2666                 cnt_free = flow_dv_counter_shared_search(cont, id);
2667                 if (cnt_free) {
2668                         if (cnt_free->ref_cnt + 1 == 0) {
2669                                 rte_errno = E2BIG;
2670                                 return NULL;
2671                         }
2672                         cnt_free->ref_cnt++;
2673                         return cnt_free;
2674                 }
2675         }
2676         /* Pools which has a free counters are in the start. */
2677         TAILQ_FOREACH(pool, &cont->pool_list, next) {
2678                 /*
2679                  * The free counter reset values must be updated between the
2680                  * counter release to the counter allocation, so, at least one
2681                  * query must be done in this time. ensure it by saving the
2682                  * query generation in the release time.
2683                  * The free list is sorted according to the generation - so if
2684                  * the first one is not updated, all the others are not
2685                  * updated too.
2686                  */
2687                 cnt_free = TAILQ_FIRST(&pool->counters);
2688                 if (cnt_free && cnt_free->query_gen + 1 <
2689                     rte_atomic64_read(&pool->query_gen))
2690                         break;
2691                 cnt_free = NULL;
2692         }
2693         if (!cnt_free) {
2694                 pool = flow_dv_counter_pool_prepare(dev, &cnt_free, batch);
2695                 if (!pool)
2696                         return NULL;
2697         }
2698         cnt_free->batch = batch;
2699         /* Create a DV counter action only in the first time usage. */
2700         if (!cnt_free->action) {
2701                 uint16_t offset;
2702                 struct mlx5_devx_obj *dcs;
2703
2704                 if (batch) {
2705                         offset = cnt_free - &pool->counters_raw[0];
2706                         dcs = pool->min_dcs;
2707                 } else {
2708                         offset = 0;
2709                         dcs = cnt_free->dcs;
2710                 }
2711                 cnt_free->action = mlx5_glue->dv_create_flow_action_counter
2712                                         (dcs->obj, offset);
2713                 if (!cnt_free->action) {
2714                         rte_errno = errno;
2715                         return NULL;
2716                 }
2717         }
2718         /* Update the counter reset values. */
2719         if (_flow_dv_query_count(dev, cnt_free, &cnt_free->hits,
2720                                  &cnt_free->bytes))
2721                 return NULL;
2722         cnt_free->shared = shared;
2723         cnt_free->ref_cnt = 1;
2724         cnt_free->id = id;
2725         if (!priv->sh->cmng.query_thread_on)
2726                 /* Start the asynchronous batch query by the host thread. */
2727                 mlx5_set_query_alarm(priv->sh);
2728         TAILQ_REMOVE(&pool->counters, cnt_free, next);
2729         if (TAILQ_EMPTY(&pool->counters)) {
2730                 /* Move the pool to the end of the container pool list. */
2731                 TAILQ_REMOVE(&cont->pool_list, pool, next);
2732                 TAILQ_INSERT_TAIL(&cont->pool_list, pool, next);
2733         }
2734         return cnt_free;
2735 }
2736
2737 /**
2738  * Release a flow counter.
2739  *
2740  * @param[in] dev
2741  *   Pointer to the Ethernet device structure.
2742  * @param[in] counter
2743  *   Pointer to the counter handler.
2744  */
2745 static void
2746 flow_dv_counter_release(struct rte_eth_dev *dev,
2747                         struct mlx5_flow_counter *counter)
2748 {
2749         struct mlx5_priv *priv = dev->data->dev_private;
2750
2751         if (!counter)
2752                 return;
2753         if (priv->counter_fallback) {
2754                 flow_dv_counter_release_fallback(dev, counter);
2755                 return;
2756         }
2757         if (--counter->ref_cnt == 0) {
2758                 struct mlx5_flow_counter_pool *pool =
2759                                 flow_dv_counter_pool_get(counter);
2760
2761                 /* Put the counter in the end - the last updated one. */
2762                 TAILQ_INSERT_TAIL(&pool->counters, counter, next);
2763                 counter->query_gen = rte_atomic64_read(&pool->query_gen);
2764         }
2765 }
2766
2767 /**
2768  * Verify the @p attributes will be correctly understood by the NIC and store
2769  * them in the @p flow if everything is correct.
2770  *
2771  * @param[in] dev
2772  *   Pointer to dev struct.
2773  * @param[in] attributes
2774  *   Pointer to flow attributes
2775  * @param[out] error
2776  *   Pointer to error structure.
2777  *
2778  * @return
2779  *   0 on success, a negative errno value otherwise and rte_errno is set.
2780  */
2781 static int
2782 flow_dv_validate_attributes(struct rte_eth_dev *dev,
2783                             const struct rte_flow_attr *attributes,
2784                             struct rte_flow_error *error)
2785 {
2786         struct mlx5_priv *priv = dev->data->dev_private;
2787         uint32_t priority_max = priv->config.flow_prio - 1;
2788
2789 #ifndef HAVE_MLX5DV_DR
2790         if (attributes->group)
2791                 return rte_flow_error_set(error, ENOTSUP,
2792                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
2793                                           NULL,
2794                                           "groups is not supported");
2795 #endif
2796         if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
2797             attributes->priority >= priority_max)
2798                 return rte_flow_error_set(error, ENOTSUP,
2799                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
2800                                           NULL,
2801                                           "priority out of range");
2802         if (attributes->transfer) {
2803                 if (!priv->config.dv_esw_en)
2804                         return rte_flow_error_set
2805                                 (error, ENOTSUP,
2806                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2807                                  "E-Switch dr is not supported");
2808                 if (!(priv->representor || priv->master))
2809                         return rte_flow_error_set
2810                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2811                                  NULL, "E-Switch configurationd can only be"
2812                                  " done by a master or a representor device");
2813                 if (attributes->egress)
2814                         return rte_flow_error_set
2815                                 (error, ENOTSUP,
2816                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
2817                                  "egress is not supported");
2818                 if (attributes->group >= MLX5_MAX_TABLES_FDB)
2819                         return rte_flow_error_set
2820                                 (error, EINVAL,
2821                                  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
2822                                  NULL, "group must be smaller than "
2823                                  RTE_STR(MLX5_MAX_FDB_TABLES));
2824         }
2825         if (!(attributes->egress ^ attributes->ingress))
2826                 return rte_flow_error_set(error, ENOTSUP,
2827                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
2828                                           "must specify exactly one of "
2829                                           "ingress or egress");
2830         return 0;
2831 }
2832
2833 /**
2834  * Internal validation function. For validating both actions and items.
2835  *
2836  * @param[in] dev
2837  *   Pointer to the rte_eth_dev structure.
2838  * @param[in] attr
2839  *   Pointer to the flow attributes.
2840  * @param[in] items
2841  *   Pointer to the list of items.
2842  * @param[in] actions
2843  *   Pointer to the list of actions.
2844  * @param[out] error
2845  *   Pointer to the error structure.
2846  *
2847  * @return
2848  *   0 on success, a negative errno value otherwise and rte_errno is set.
2849  */
2850 static int
2851 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
2852                  const struct rte_flow_item items[],
2853                  const struct rte_flow_action actions[],
2854                  struct rte_flow_error *error)
2855 {
2856         int ret;
2857         uint64_t action_flags = 0;
2858         uint64_t item_flags = 0;
2859         uint64_t last_item = 0;
2860         uint8_t next_protocol = 0xff;
2861         int actions_n = 0;
2862         const struct rte_flow_item *gre_item = NULL;
2863         struct rte_flow_item_tcp nic_tcp_mask = {
2864                 .hdr = {
2865                         .tcp_flags = 0xFF,
2866                         .src_port = RTE_BE16(UINT16_MAX),
2867                         .dst_port = RTE_BE16(UINT16_MAX),
2868                 }
2869         };
2870
2871         if (items == NULL)
2872                 return -1;
2873         ret = flow_dv_validate_attributes(dev, attr, error);
2874         if (ret < 0)
2875                 return ret;
2876         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2877                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2878                 switch (items->type) {
2879                 case RTE_FLOW_ITEM_TYPE_VOID:
2880                         break;
2881                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
2882                         ret = flow_dv_validate_item_port_id
2883                                         (dev, items, attr, item_flags, error);
2884                         if (ret < 0)
2885                                 return ret;
2886                         last_item = MLX5_FLOW_ITEM_PORT_ID;
2887                         break;
2888                 case RTE_FLOW_ITEM_TYPE_ETH:
2889                         ret = mlx5_flow_validate_item_eth(items, item_flags,
2890                                                           error);
2891                         if (ret < 0)
2892                                 return ret;
2893                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
2894                                              MLX5_FLOW_LAYER_OUTER_L2;
2895                         break;
2896                 case RTE_FLOW_ITEM_TYPE_VLAN:
2897                         ret = mlx5_flow_validate_item_vlan(items, item_flags,
2898                                                            error);
2899                         if (ret < 0)
2900                                 return ret;
2901                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
2902                                              MLX5_FLOW_LAYER_OUTER_VLAN;
2903                         break;
2904                 case RTE_FLOW_ITEM_TYPE_IPV4:
2905                         ret = mlx5_flow_validate_item_ipv4(items, item_flags,
2906                                                            NULL, error);
2907                         if (ret < 0)
2908                                 return ret;
2909                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
2910                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
2911                         if (items->mask != NULL &&
2912                             ((const struct rte_flow_item_ipv4 *)
2913                              items->mask)->hdr.next_proto_id) {
2914                                 next_protocol =
2915                                         ((const struct rte_flow_item_ipv4 *)
2916                                          (items->spec))->hdr.next_proto_id;
2917                                 next_protocol &=
2918                                         ((const struct rte_flow_item_ipv4 *)
2919                                          (items->mask))->hdr.next_proto_id;
2920                         } else {
2921                                 /* Reset for inner layer. */
2922                                 next_protocol = 0xff;
2923                         }
2924                         mlx5_flow_tunnel_ip_check(items, &last_item);
2925                         break;
2926                 case RTE_FLOW_ITEM_TYPE_IPV6:
2927                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
2928                                                            NULL, error);
2929                         if (ret < 0)
2930                                 return ret;
2931                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
2932                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
2933                         if (items->mask != NULL &&
2934                             ((const struct rte_flow_item_ipv6 *)
2935                              items->mask)->hdr.proto) {
2936                                 next_protocol =
2937                                         ((const struct rte_flow_item_ipv6 *)
2938                                          items->spec)->hdr.proto;
2939                                 next_protocol &=
2940                                         ((const struct rte_flow_item_ipv6 *)
2941                                          items->mask)->hdr.proto;
2942                         } else {
2943                                 /* Reset for inner layer. */
2944                                 next_protocol = 0xff;
2945                         }
2946                         mlx5_flow_tunnel_ip_check(items, &last_item);
2947                         break;
2948                 case RTE_FLOW_ITEM_TYPE_TCP:
2949                         ret = mlx5_flow_validate_item_tcp
2950                                                 (items, item_flags,
2951                                                  next_protocol,
2952                                                  &nic_tcp_mask,
2953                                                  error);
2954                         if (ret < 0)
2955                                 return ret;
2956                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
2957                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
2958                         break;
2959                 case RTE_FLOW_ITEM_TYPE_UDP:
2960                         ret = mlx5_flow_validate_item_udp(items, item_flags,
2961                                                           next_protocol,
2962                                                           error);
2963                         if (ret < 0)
2964                                 return ret;
2965                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
2966                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
2967                         break;
2968                 case RTE_FLOW_ITEM_TYPE_GRE:
2969                 case RTE_FLOW_ITEM_TYPE_NVGRE:
2970                         ret = mlx5_flow_validate_item_gre(items, item_flags,
2971                                                           next_protocol, error);
2972                         if (ret < 0)
2973                                 return ret;
2974                         gre_item = items;
2975                         last_item = MLX5_FLOW_LAYER_GRE;
2976                         break;
2977                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
2978                         ret = mlx5_flow_validate_item_gre_key
2979                                 (items, item_flags, gre_item, error);
2980                         if (ret < 0)
2981                                 return ret;
2982                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
2983                         break;
2984                 case RTE_FLOW_ITEM_TYPE_VXLAN:
2985                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
2986                                                             error);
2987                         if (ret < 0)
2988                                 return ret;
2989                         last_item = MLX5_FLOW_LAYER_VXLAN;
2990                         break;
2991                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
2992                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
2993                                                                 item_flags, dev,
2994                                                                 error);
2995                         if (ret < 0)
2996                                 return ret;
2997                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
2998                         break;
2999                 case RTE_FLOW_ITEM_TYPE_MPLS:
3000                         ret = mlx5_flow_validate_item_mpls(dev, items,
3001                                                            item_flags,
3002                                                            last_item, error);
3003                         if (ret < 0)
3004                                 return ret;
3005                         last_item = MLX5_FLOW_LAYER_MPLS;
3006                         break;
3007                 case RTE_FLOW_ITEM_TYPE_META:
3008                         ret = flow_dv_validate_item_meta(dev, items, attr,
3009                                                          error);
3010                         if (ret < 0)
3011                                 return ret;
3012                         last_item = MLX5_FLOW_ITEM_METADATA;
3013                         break;
3014                 case RTE_FLOW_ITEM_TYPE_ICMP:
3015                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
3016                                                            next_protocol,
3017                                                            error);
3018                         if (ret < 0)
3019                                 return ret;
3020                         last_item = MLX5_FLOW_LAYER_ICMP;
3021                         break;
3022                 case RTE_FLOW_ITEM_TYPE_ICMP6:
3023                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
3024                                                             next_protocol,
3025                                                             error);
3026                         if (ret < 0)
3027                                 return ret;
3028                         last_item = MLX5_FLOW_LAYER_ICMP6;
3029                         break;
3030                 default:
3031                         return rte_flow_error_set(error, ENOTSUP,
3032                                                   RTE_FLOW_ERROR_TYPE_ITEM,
3033                                                   NULL, "item not supported");
3034                 }
3035                 item_flags |= last_item;
3036         }
3037         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3038                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
3039                         return rte_flow_error_set(error, ENOTSUP,
3040                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3041                                                   actions, "too many actions");
3042                 switch (actions->type) {
3043                 case RTE_FLOW_ACTION_TYPE_VOID:
3044                         break;
3045                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
3046                         ret = flow_dv_validate_action_port_id(dev,
3047                                                               action_flags,
3048                                                               actions,
3049                                                               attr,
3050                                                               error);
3051                         if (ret)
3052                                 return ret;
3053                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
3054                         ++actions_n;
3055                         break;
3056                 case RTE_FLOW_ACTION_TYPE_FLAG:
3057                         ret = mlx5_flow_validate_action_flag(action_flags,
3058                                                              attr, error);
3059                         if (ret < 0)
3060                                 return ret;
3061                         action_flags |= MLX5_FLOW_ACTION_FLAG;
3062                         ++actions_n;
3063                         break;
3064                 case RTE_FLOW_ACTION_TYPE_MARK:
3065                         ret = mlx5_flow_validate_action_mark(actions,
3066                                                              action_flags,
3067                                                              attr, error);
3068                         if (ret < 0)
3069                                 return ret;
3070                         action_flags |= MLX5_FLOW_ACTION_MARK;
3071                         ++actions_n;
3072                         break;
3073                 case RTE_FLOW_ACTION_TYPE_DROP:
3074                         ret = mlx5_flow_validate_action_drop(action_flags,
3075                                                              attr, error);
3076                         if (ret < 0)
3077                                 return ret;
3078                         action_flags |= MLX5_FLOW_ACTION_DROP;
3079                         ++actions_n;
3080                         break;
3081                 case RTE_FLOW_ACTION_TYPE_QUEUE:
3082                         ret = mlx5_flow_validate_action_queue(actions,
3083                                                               action_flags, dev,
3084                                                               attr, error);
3085                         if (ret < 0)
3086                                 return ret;
3087                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
3088                         ++actions_n;
3089                         break;
3090                 case RTE_FLOW_ACTION_TYPE_RSS:
3091                         ret = mlx5_flow_validate_action_rss(actions,
3092                                                             action_flags, dev,
3093                                                             attr, item_flags,
3094                                                             error);
3095                         if (ret < 0)
3096                                 return ret;
3097                         action_flags |= MLX5_FLOW_ACTION_RSS;
3098                         ++actions_n;
3099                         break;
3100                 case RTE_FLOW_ACTION_TYPE_COUNT:
3101                         ret = flow_dv_validate_action_count(dev, error);
3102                         if (ret < 0)
3103                                 return ret;
3104                         action_flags |= MLX5_FLOW_ACTION_COUNT;
3105                         ++actions_n;
3106                         break;
3107                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3108                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3109                         ret = flow_dv_validate_action_l2_encap(action_flags,
3110                                                                actions, attr,
3111                                                                error);
3112                         if (ret < 0)
3113                                 return ret;
3114                         action_flags |= actions->type ==
3115                                         RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
3116                                         MLX5_FLOW_ACTION_VXLAN_ENCAP :
3117                                         MLX5_FLOW_ACTION_NVGRE_ENCAP;
3118                         ++actions_n;
3119                         break;
3120                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
3121                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
3122                         ret = flow_dv_validate_action_l2_decap(action_flags,
3123                                                                attr, error);
3124                         if (ret < 0)
3125                                 return ret;
3126                         action_flags |= actions->type ==
3127                                         RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
3128                                         MLX5_FLOW_ACTION_VXLAN_DECAP :
3129                                         MLX5_FLOW_ACTION_NVGRE_DECAP;
3130                         ++actions_n;
3131                         break;
3132                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3133                         ret = flow_dv_validate_action_raw_encap(action_flags,
3134                                                                 actions, attr,
3135                                                                 error);
3136                         if (ret < 0)
3137                                 return ret;
3138                         action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
3139                         ++actions_n;
3140                         break;
3141                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
3142                         ret = flow_dv_validate_action_raw_decap(action_flags,
3143                                                                 actions, attr,
3144                                                                 error);
3145                         if (ret < 0)
3146                                 return ret;
3147                         action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
3148                         ++actions_n;
3149                         break;
3150                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
3151                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
3152                         ret = flow_dv_validate_action_modify_mac(action_flags,
3153                                                                  actions,
3154                                                                  item_flags,
3155                                                                  error);
3156                         if (ret < 0)
3157                                 return ret;
3158                         /* Count all modify-header actions as one action. */
3159                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3160                                 ++actions_n;
3161                         action_flags |= actions->type ==
3162                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
3163                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
3164                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
3165                         break;
3166
3167                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
3168                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
3169                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
3170                                                                   actions,
3171                                                                   item_flags,
3172                                                                   error);
3173                         if (ret < 0)
3174                                 return ret;
3175                         /* Count all modify-header actions as one action. */
3176                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3177                                 ++actions_n;
3178                         action_flags |= actions->type ==
3179                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
3180                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
3181                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
3182                         break;
3183                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
3184                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
3185                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
3186                                                                   actions,
3187                                                                   item_flags,
3188                                                                   error);
3189                         if (ret < 0)
3190                                 return ret;
3191                         /* Count all modify-header actions as one action. */
3192                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3193                                 ++actions_n;
3194                         action_flags |= actions->type ==
3195                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
3196                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
3197                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
3198                         break;
3199                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
3200                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
3201                         ret = flow_dv_validate_action_modify_tp(action_flags,
3202                                                                 actions,
3203                                                                 item_flags,
3204                                                                 error);
3205                         if (ret < 0)
3206                                 return ret;
3207                         /* Count all modify-header actions as one action. */
3208                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3209                                 ++actions_n;
3210                         action_flags |= actions->type ==
3211                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
3212                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
3213                                                 MLX5_FLOW_ACTION_SET_TP_DST;
3214                         break;
3215                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
3216                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
3217                         ret = flow_dv_validate_action_modify_ttl(action_flags,
3218                                                                  actions,
3219                                                                  item_flags,
3220                                                                  error);
3221                         if (ret < 0)
3222                                 return ret;
3223                         /* Count all modify-header actions as one action. */
3224                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3225                                 ++actions_n;
3226                         action_flags |= actions->type ==
3227                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
3228                                                 MLX5_FLOW_ACTION_SET_TTL :
3229                                                 MLX5_FLOW_ACTION_DEC_TTL;
3230                         break;
3231                 case RTE_FLOW_ACTION_TYPE_JUMP:
3232                         ret = flow_dv_validate_action_jump(actions,
3233                                                            attr->group, error);
3234                         if (ret)
3235                                 return ret;
3236                         ++actions_n;
3237                         action_flags |= MLX5_FLOW_ACTION_JUMP;
3238                         break;
3239                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
3240                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
3241                         ret = flow_dv_validate_action_modify_tcp_seq
3242                                                                 (action_flags,
3243                                                                  actions,
3244                                                                  item_flags,
3245                                                                  error);
3246                         if (ret < 0)
3247                                 return ret;
3248                         /* Count all modify-header actions as one action. */
3249                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3250                                 ++actions_n;
3251                         action_flags |= actions->type ==
3252                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
3253                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
3254                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
3255                         break;
3256                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
3257                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
3258                         ret = flow_dv_validate_action_modify_tcp_ack
3259                                                                 (action_flags,
3260                                                                  actions,
3261                                                                  item_flags,
3262                                                                  error);
3263                         if (ret < 0)
3264                                 return ret;
3265                         /* Count all modify-header actions as one action. */
3266                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
3267                                 ++actions_n;
3268                         action_flags |= actions->type ==
3269                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
3270                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
3271                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
3272                         break;
3273                 default:
3274                         return rte_flow_error_set(error, ENOTSUP,
3275                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3276                                                   actions,
3277                                                   "action not supported");
3278                 }
3279         }
3280         /* Eswitch has few restrictions on using items and actions */
3281         if (attr->transfer) {
3282                 if (action_flags & MLX5_FLOW_ACTION_FLAG)
3283                         return rte_flow_error_set(error, ENOTSUP,
3284                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3285                                                   NULL,
3286                                                   "unsupported action FLAG");
3287                 if (action_flags & MLX5_FLOW_ACTION_MARK)
3288                         return rte_flow_error_set(error, ENOTSUP,
3289                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3290                                                   NULL,
3291                                                   "unsupported action MARK");
3292                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
3293                         return rte_flow_error_set(error, ENOTSUP,
3294                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3295                                                   NULL,
3296                                                   "unsupported action QUEUE");
3297                 if (action_flags & MLX5_FLOW_ACTION_RSS)
3298                         return rte_flow_error_set(error, ENOTSUP,
3299                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3300                                                   NULL,
3301                                                   "unsupported action RSS");
3302                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
3303                         return rte_flow_error_set(error, EINVAL,
3304                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3305                                                   actions,
3306                                                   "no fate action is found");
3307         } else {
3308                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
3309                         return rte_flow_error_set(error, EINVAL,
3310                                                   RTE_FLOW_ERROR_TYPE_ACTION,
3311                                                   actions,
3312                                                   "no fate action is found");
3313         }
3314         return 0;
3315 }
3316
3317 /**
3318  * Internal preparation function. Allocates the DV flow size,
3319  * this size is constant.
3320  *
3321  * @param[in] attr
3322  *   Pointer to the flow attributes.
3323  * @param[in] items
3324  *   Pointer to the list of items.
3325  * @param[in] actions
3326  *   Pointer to the list of actions.
3327  * @param[out] error
3328  *   Pointer to the error structure.
3329  *
3330  * @return
3331  *   Pointer to mlx5_flow object on success,
3332  *   otherwise NULL and rte_errno is set.
3333  */
3334 static struct mlx5_flow *
3335 flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
3336                 const struct rte_flow_item items[] __rte_unused,
3337                 const struct rte_flow_action actions[] __rte_unused,
3338                 struct rte_flow_error *error)
3339 {
3340         uint32_t size = sizeof(struct mlx5_flow);
3341         struct mlx5_flow *flow;
3342
3343         flow = rte_calloc(__func__, 1, size, 0);
3344         if (!flow) {
3345                 rte_flow_error_set(error, ENOMEM,
3346                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3347                                    "not enough memory to create flow");
3348                 return NULL;
3349         }
3350         flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
3351         return flow;
3352 }
3353
3354 #ifndef NDEBUG
3355 /**
3356  * Sanity check for match mask and value. Similar to check_valid_spec() in
3357  * kernel driver. If unmasked bit is present in value, it returns failure.
3358  *
3359  * @param match_mask
3360  *   pointer to match mask buffer.
3361  * @param match_value
3362  *   pointer to match value buffer.
3363  *
3364  * @return
3365  *   0 if valid, -EINVAL otherwise.
3366  */
3367 static int
3368 flow_dv_check_valid_spec(void *match_mask, void *match_value)
3369 {
3370         uint8_t *m = match_mask;
3371         uint8_t *v = match_value;
3372         unsigned int i;
3373
3374         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
3375                 if (v[i] & ~m[i]) {
3376                         DRV_LOG(ERR,
3377                                 "match_value differs from match_criteria"
3378                                 " %p[%u] != %p[%u]",
3379                                 match_value, i, match_mask, i);
3380                         return -EINVAL;
3381                 }
3382         }
3383         return 0;
3384 }
3385 #endif
3386
3387 /**
3388  * Add Ethernet item to matcher and to the value.
3389  *
3390  * @param[in, out] matcher
3391  *   Flow matcher.
3392  * @param[in, out] key
3393  *   Flow matcher value.
3394  * @param[in] item
3395  *   Flow pattern to translate.
3396  * @param[in] inner
3397  *   Item is inner pattern.
3398  */
3399 static void
3400 flow_dv_translate_item_eth(void *matcher, void *key,
3401                            const struct rte_flow_item *item, int inner)
3402 {
3403         const struct rte_flow_item_eth *eth_m = item->mask;
3404         const struct rte_flow_item_eth *eth_v = item->spec;
3405         const struct rte_flow_item_eth nic_mask = {
3406                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
3407                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
3408                 .type = RTE_BE16(0xffff),
3409         };
3410         void *headers_m;
3411         void *headers_v;
3412         char *l24_v;
3413         unsigned int i;
3414
3415         if (!eth_v)
3416                 return;
3417         if (!eth_m)
3418                 eth_m = &nic_mask;
3419         if (inner) {
3420                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3421                                          inner_headers);
3422                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3423         } else {
3424                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3425                                          outer_headers);
3426                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3427         }
3428         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
3429                &eth_m->dst, sizeof(eth_m->dst));
3430         /* The value must be in the range of the mask. */
3431         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
3432         for (i = 0; i < sizeof(eth_m->dst); ++i)
3433                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
3434         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
3435                &eth_m->src, sizeof(eth_m->src));
3436         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
3437         /* The value must be in the range of the mask. */
3438         for (i = 0; i < sizeof(eth_m->dst); ++i)
3439                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
3440         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
3441                  rte_be_to_cpu_16(eth_m->type));
3442         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
3443         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
3444 }
3445
3446 /**
3447  * Add VLAN item to matcher and to the value.
3448  *
3449  * @param[in, out] matcher
3450  *   Flow matcher.
3451  * @param[in, out] key
3452  *   Flow matcher value.
3453  * @param[in] item
3454  *   Flow pattern to translate.
3455  * @param[in] inner
3456  *   Item is inner pattern.
3457  */
3458 static void
3459 flow_dv_translate_item_vlan(void *matcher, void *key,
3460                             const struct rte_flow_item *item,
3461                             int inner)
3462 {
3463         const struct rte_flow_item_vlan *vlan_m = item->mask;
3464         const struct rte_flow_item_vlan *vlan_v = item->spec;
3465         const struct rte_flow_item_vlan nic_mask = {
3466                 .tci = RTE_BE16(0x0fff),
3467                 .inner_type = RTE_BE16(0xffff),
3468         };
3469         void *headers_m;
3470         void *headers_v;
3471         uint16_t tci_m;
3472         uint16_t tci_v;
3473
3474         if (!vlan_v)
3475                 return;
3476         if (!vlan_m)
3477                 vlan_m = &nic_mask;
3478         if (inner) {
3479                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3480                                          inner_headers);
3481                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3482         } else {
3483                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3484                                          outer_headers);
3485                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3486         }
3487         tci_m = rte_be_to_cpu_16(vlan_m->tci);
3488         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
3489         MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
3490         MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
3491         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
3492         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
3493         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
3494         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
3495         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
3496         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
3497 }
3498
3499 /**
3500  * Add IPV4 item to matcher and to the value.
3501  *
3502  * @param[in, out] matcher
3503  *   Flow matcher.
3504  * @param[in, out] key
3505  *   Flow matcher value.
3506  * @param[in] item
3507  *   Flow pattern to translate.
3508  * @param[in] inner
3509  *   Item is inner pattern.
3510  * @param[in] group
3511  *   The group to insert the rule.
3512  */
3513 static void
3514 flow_dv_translate_item_ipv4(void *matcher, void *key,
3515                             const struct rte_flow_item *item,
3516                             int inner, uint32_t group)
3517 {
3518         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
3519         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
3520         const struct rte_flow_item_ipv4 nic_mask = {
3521                 .hdr = {
3522                         .src_addr = RTE_BE32(0xffffffff),
3523                         .dst_addr = RTE_BE32(0xffffffff),
3524                         .type_of_service = 0xff,
3525                         .next_proto_id = 0xff,
3526                 },
3527         };
3528         void *headers_m;
3529         void *headers_v;
3530         char *l24_m;
3531         char *l24_v;
3532         uint8_t tos;
3533
3534         if (inner) {
3535                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3536                                          inner_headers);
3537                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3538         } else {
3539                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3540                                          outer_headers);
3541                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3542         }
3543         if (group == 0)
3544                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
3545         else
3546                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x4);
3547         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
3548         if (!ipv4_v)
3549                 return;
3550         if (!ipv4_m)
3551                 ipv4_m = &nic_mask;
3552         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
3553                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
3554         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
3555                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
3556         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
3557         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
3558         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
3559                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
3560         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
3561                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
3562         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
3563         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
3564         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
3565         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
3566                  ipv4_m->hdr.type_of_service);
3567         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
3568         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
3569                  ipv4_m->hdr.type_of_service >> 2);
3570         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
3571         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
3572                  ipv4_m->hdr.next_proto_id);
3573         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
3574                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
3575 }
3576
3577 /**
3578  * Add IPV6 item to matcher and to the value.
3579  *
3580  * @param[in, out] matcher
3581  *   Flow matcher.
3582  * @param[in, out] key
3583  *   Flow matcher value.
3584  * @param[in] item
3585  *   Flow pattern to translate.
3586  * @param[in] inner
3587  *   Item is inner pattern.
3588  * @param[in] group
3589  *   The group to insert the rule.
3590  */
3591 static void
3592 flow_dv_translate_item_ipv6(void *matcher, void *key,
3593                             const struct rte_flow_item *item,
3594                             int inner, uint32_t group)
3595 {
3596         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
3597         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
3598         const struct rte_flow_item_ipv6 nic_mask = {
3599                 .hdr = {
3600                         .src_addr =
3601                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
3602                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
3603                         .dst_addr =
3604                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
3605                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
3606                         .vtc_flow = RTE_BE32(0xffffffff),
3607                         .proto = 0xff,
3608                         .hop_limits = 0xff,
3609                 },
3610         };
3611         void *headers_m;
3612         void *headers_v;
3613         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3614         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3615         char *l24_m;
3616         char *l24_v;
3617         uint32_t vtc_m;
3618         uint32_t vtc_v;
3619         int i;
3620         int size;
3621
3622         if (inner) {
3623                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3624                                          inner_headers);
3625                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3626         } else {
3627                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3628                                          outer_headers);
3629                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3630         }
3631         if (group == 0)
3632                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
3633         else
3634                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x6);
3635         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
3636         if (!ipv6_v)
3637                 return;
3638         if (!ipv6_m)
3639                 ipv6_m = &nic_mask;
3640         size = sizeof(ipv6_m->hdr.dst_addr);
3641         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
3642                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
3643         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
3644                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
3645         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
3646         for (i = 0; i < size; ++i)
3647                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
3648         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
3649                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
3650         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
3651                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
3652         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
3653         for (i = 0; i < size; ++i)
3654                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
3655         /* TOS. */
3656         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
3657         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
3658         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
3659         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
3660         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
3661         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
3662         /* Label. */
3663         if (inner) {
3664                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
3665                          vtc_m);
3666                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
3667                          vtc_v);
3668         } else {
3669                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
3670                          vtc_m);
3671                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
3672                          vtc_v);
3673         }
3674         /* Protocol. */
3675         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
3676                  ipv6_m->hdr.proto);
3677         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
3678                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
3679 }
3680
3681 /**
3682  * Add TCP item to matcher and to the value.
3683  *
3684  * @param[in, out] matcher
3685  *   Flow matcher.
3686  * @param[in, out] key
3687  *   Flow matcher value.
3688  * @param[in] item
3689  *   Flow pattern to translate.
3690  * @param[in] inner
3691  *   Item is inner pattern.
3692  */
3693 static void
3694 flow_dv_translate_item_tcp(void *matcher, void *key,
3695                            const struct rte_flow_item *item,
3696                            int inner)
3697 {
3698         const struct rte_flow_item_tcp *tcp_m = item->mask;
3699         const struct rte_flow_item_tcp *tcp_v = item->spec;
3700         void *headers_m;
3701         void *headers_v;
3702
3703         if (inner) {
3704                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3705                                          inner_headers);
3706                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3707         } else {
3708                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3709                                          outer_headers);
3710                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3711         }
3712         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
3713         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
3714         if (!tcp_v)
3715                 return;
3716         if (!tcp_m)
3717                 tcp_m = &rte_flow_item_tcp_mask;
3718         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
3719                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
3720         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
3721                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
3722         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
3723                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
3724         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
3725                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
3726         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
3727                  tcp_m->hdr.tcp_flags);
3728         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
3729                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
3730 }
3731
3732 /**
3733  * Add UDP item to matcher and to the value.
3734  *
3735  * @param[in, out] matcher
3736  *   Flow matcher.
3737  * @param[in, out] key
3738  *   Flow matcher value.
3739  * @param[in] item
3740  *   Flow pattern to translate.
3741  * @param[in] inner
3742  *   Item is inner pattern.
3743  */
3744 static void
3745 flow_dv_translate_item_udp(void *matcher, void *key,
3746                            const struct rte_flow_item *item,
3747                            int inner)
3748 {
3749         const struct rte_flow_item_udp *udp_m = item->mask;
3750         const struct rte_flow_item_udp *udp_v = item->spec;
3751         void *headers_m;
3752         void *headers_v;
3753
3754         if (inner) {
3755                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3756                                          inner_headers);
3757                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3758         } else {
3759                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3760                                          outer_headers);
3761                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3762         }
3763         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
3764         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
3765         if (!udp_v)
3766                 return;
3767         if (!udp_m)
3768                 udp_m = &rte_flow_item_udp_mask;
3769         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
3770                  rte_be_to_cpu_16(udp_m->hdr.src_port));
3771         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
3772                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
3773         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
3774                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
3775         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
3776                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
3777 }
3778
3779 /**
3780  * Add GRE optional Key item to matcher and to the value.
3781  *
3782  * @param[in, out] matcher
3783  *   Flow matcher.
3784  * @param[in, out] key
3785  *   Flow matcher value.
3786  * @param[in] item
3787  *   Flow pattern to translate.
3788  * @param[in] inner
3789  *   Item is inner pattern.
3790  */
3791 static void
3792 flow_dv_translate_item_gre_key(void *matcher, void *key,
3793                                    const struct rte_flow_item *item)
3794 {
3795         const rte_be32_t *key_m = item->mask;
3796         const rte_be32_t *key_v = item->spec;
3797         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3798         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3799         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
3800
3801         if (!key_v)
3802                 return;
3803         if (!key_m)
3804                 key_m = &gre_key_default_mask;
3805         /* GRE K bit must be on and should already be validated */
3806         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
3807         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
3808         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
3809                  rte_be_to_cpu_32(*key_m) >> 8);
3810         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
3811                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
3812         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
3813                  rte_be_to_cpu_32(*key_m) & 0xFF);
3814         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
3815                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
3816 }
3817
3818 /**
3819  * Add GRE item to matcher and to the value.
3820  *
3821  * @param[in, out] matcher
3822  *   Flow matcher.
3823  * @param[in, out] key
3824  *   Flow matcher value.
3825  * @param[in] item
3826  *   Flow pattern to translate.
3827  * @param[in] inner
3828  *   Item is inner pattern.
3829  */
3830 static void
3831 flow_dv_translate_item_gre(void *matcher, void *key,
3832                            const struct rte_flow_item *item,
3833                            int inner)
3834 {
3835         const struct rte_flow_item_gre *gre_m = item->mask;
3836         const struct rte_flow_item_gre *gre_v = item->spec;
3837         void *headers_m;
3838         void *headers_v;
3839         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3840         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3841         struct {
3842                 union {
3843                         __extension__
3844                         struct {
3845                                 uint16_t version:3;
3846                                 uint16_t rsvd0:9;
3847                                 uint16_t s_present:1;
3848                                 uint16_t k_present:1;
3849                                 uint16_t rsvd_bit1:1;
3850                                 uint16_t c_present:1;
3851                         };
3852                         uint16_t value;
3853                 };
3854         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
3855
3856         if (inner) {
3857                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3858                                          inner_headers);
3859                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3860         } else {
3861                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3862                                          outer_headers);
3863                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3864         }
3865         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
3866         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
3867         if (!gre_v)
3868                 return;
3869         if (!gre_m)
3870                 gre_m = &rte_flow_item_gre_mask;
3871         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
3872                  rte_be_to_cpu_16(gre_m->protocol));
3873         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
3874                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
3875         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
3876         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
3877         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
3878                  gre_crks_rsvd0_ver_m.c_present);
3879         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
3880                  gre_crks_rsvd0_ver_v.c_present &
3881                  gre_crks_rsvd0_ver_m.c_present);
3882         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
3883                  gre_crks_rsvd0_ver_m.k_present);
3884         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
3885                  gre_crks_rsvd0_ver_v.k_present &
3886                  gre_crks_rsvd0_ver_m.k_present);
3887         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
3888                  gre_crks_rsvd0_ver_m.s_present);
3889         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
3890                  gre_crks_rsvd0_ver_v.s_present &
3891                  gre_crks_rsvd0_ver_m.s_present);
3892 }
3893
3894 /**
3895  * Add NVGRE item to matcher and to the value.
3896  *
3897  * @param[in, out] matcher
3898  *   Flow matcher.
3899  * @param[in, out] key
3900  *   Flow matcher value.
3901  * @param[in] item
3902  *   Flow pattern to translate.
3903  * @param[in] inner
3904  *   Item is inner pattern.
3905  */
3906 static void
3907 flow_dv_translate_item_nvgre(void *matcher, void *key,
3908                              const struct rte_flow_item *item,
3909                              int inner)
3910 {
3911         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
3912         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
3913         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3914         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3915         const char *tni_flow_id_m = (const char *)nvgre_m->tni;
3916         const char *tni_flow_id_v = (const char *)nvgre_v->tni;
3917         char *gre_key_m;
3918         char *gre_key_v;
3919         int size;
3920         int i;
3921
3922         flow_dv_translate_item_gre(matcher, key, item, inner);
3923         if (!nvgre_v)
3924                 return;
3925         if (!nvgre_m)
3926                 nvgre_m = &rte_flow_item_nvgre_mask;
3927         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
3928         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
3929         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
3930         memcpy(gre_key_m, tni_flow_id_m, size);
3931         for (i = 0; i < size; ++i)
3932                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
3933 }
3934
3935 /**
3936  * Add VXLAN item to matcher and to the value.
3937  *
3938  * @param[in, out] matcher
3939  *   Flow matcher.
3940  * @param[in, out] key
3941  *   Flow matcher value.
3942  * @param[in] item
3943  *   Flow pattern to translate.
3944  * @param[in] inner
3945  *   Item is inner pattern.
3946  */
3947 static void
3948 flow_dv_translate_item_vxlan(void *matcher, void *key,
3949                              const struct rte_flow_item *item,
3950                              int inner)
3951 {
3952         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
3953         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
3954         void *headers_m;
3955         void *headers_v;
3956         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3957         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3958         char *vni_m;
3959         char *vni_v;
3960         uint16_t dport;
3961         int size;
3962         int i;
3963
3964         if (inner) {
3965                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3966                                          inner_headers);
3967                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3968         } else {
3969                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3970                                          outer_headers);
3971                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3972         }
3973         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
3974                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
3975         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
3976                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
3977                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
3978         }
3979         if (!vxlan_v)
3980                 return;
3981         if (!vxlan_m)
3982                 vxlan_m = &rte_flow_item_vxlan_mask;
3983         size = sizeof(vxlan_m->vni);
3984         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
3985         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
3986         memcpy(vni_m, vxlan_m->vni, size);
3987         for (i = 0; i < size; ++i)
3988                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
3989 }
3990
3991 /**
3992  * Add MPLS item to matcher and to the value.
3993  *
3994  * @param[in, out] matcher
3995  *   Flow matcher.
3996  * @param[in, out] key
3997  *   Flow matcher value.
3998  * @param[in] item
3999  *   Flow pattern to translate.
4000  * @param[in] prev_layer
4001  *   The protocol layer indicated in previous item.
4002  * @param[in] inner
4003  *   Item is inner pattern.
4004  */
4005 static void
4006 flow_dv_translate_item_mpls(void *matcher, void *key,
4007                             const struct rte_flow_item *item,
4008                             uint64_t prev_layer,
4009                             int inner)
4010 {
4011         const uint32_t *in_mpls_m = item->mask;
4012         const uint32_t *in_mpls_v = item->spec;
4013         uint32_t *out_mpls_m = 0;
4014         uint32_t *out_mpls_v = 0;
4015         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4016         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4017         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
4018                                      misc_parameters_2);
4019         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
4020         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
4021         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4022
4023         switch (prev_layer) {
4024         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
4025                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
4026                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
4027                          MLX5_UDP_PORT_MPLS);
4028                 break;
4029         case MLX5_FLOW_LAYER_GRE:
4030                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
4031                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
4032                          RTE_ETHER_TYPE_MPLS);
4033                 break;
4034         default:
4035                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
4036                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
4037                          IPPROTO_MPLS);
4038                 break;
4039         }
4040         if (!in_mpls_v)
4041                 return;
4042         if (!in_mpls_m)
4043                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
4044         switch (prev_layer) {
4045         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
4046                 out_mpls_m =
4047                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
4048                                                  outer_first_mpls_over_udp);
4049                 out_mpls_v =
4050                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
4051                                                  outer_first_mpls_over_udp);
4052                 break;
4053         case MLX5_FLOW_LAYER_GRE:
4054                 out_mpls_m =
4055                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
4056                                                  outer_first_mpls_over_gre);
4057                 out_mpls_v =
4058                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
4059                                                  outer_first_mpls_over_gre);
4060                 break;
4061         default:
4062                 /* Inner MPLS not over GRE is not supported. */
4063                 if (!inner) {
4064                         out_mpls_m =
4065                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
4066                                                          misc2_m,
4067                                                          outer_first_mpls);
4068                         out_mpls_v =
4069                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
4070                                                          misc2_v,
4071                                                          outer_first_mpls);
4072                 }
4073                 break;
4074         }
4075         if (out_mpls_m && out_mpls_v) {
4076                 *out_mpls_m = *in_mpls_m;
4077                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
4078         }
4079 }
4080
4081 /**
4082  * Add META item to matcher
4083  *
4084  * @param[in, out] matcher
4085  *   Flow matcher.
4086  * @param[in, out] key
4087  *   Flow matcher value.
4088  * @param[in] item
4089  *   Flow pattern to translate.
4090  * @param[in] inner
4091  *   Item is inner pattern.
4092  */
4093 static void
4094 flow_dv_translate_item_meta(void *matcher, void *key,
4095                             const struct rte_flow_item *item)
4096 {
4097         const struct rte_flow_item_meta *meta_m;
4098         const struct rte_flow_item_meta *meta_v;
4099         void *misc2_m =
4100                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
4101         void *misc2_v =
4102                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
4103
4104         meta_m = (const void *)item->mask;
4105         if (!meta_m)
4106                 meta_m = &rte_flow_item_meta_mask;
4107         meta_v = (const void *)item->spec;
4108         if (meta_v) {
4109                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a,
4110                          rte_be_to_cpu_32(meta_m->data));
4111                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a,
4112                          rte_be_to_cpu_32(meta_v->data & meta_m->data));
4113         }
4114 }
4115
4116 /**
4117  * Add source vport match to the specified matcher.
4118  *
4119  * @param[in, out] matcher
4120  *   Flow matcher.
4121  * @param[in, out] key
4122  *   Flow matcher value.
4123  * @param[in] port
4124  *   Source vport value to match
4125  * @param[in] mask
4126  *   Mask
4127  */
4128 static void
4129 flow_dv_translate_item_source_vport(void *matcher, void *key,
4130                                     int16_t port, uint16_t mask)
4131 {
4132         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
4133         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
4134
4135         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
4136         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
4137 }
4138
4139 /**
4140  * Translate port-id item to eswitch match on  port-id.
4141  *
4142  * @param[in] dev
4143  *   The devich to configure through.
4144  * @param[in, out] matcher
4145  *   Flow matcher.
4146  * @param[in, out] key
4147  *   Flow matcher value.
4148  * @param[in] item
4149  *   Flow pattern to translate.
4150  *
4151  * @return
4152  *   0 on success, a negative errno value otherwise.
4153  */
4154 static int
4155 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
4156                                void *key, const struct rte_flow_item *item)
4157 {
4158         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
4159         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
4160         uint16_t mask, val, id;
4161         int ret;
4162
4163         mask = pid_m ? pid_m->id : 0xffff;
4164         id = pid_v ? pid_v->id : dev->data->port_id;
4165         ret = mlx5_port_to_eswitch_info(id, NULL, &val);
4166         if (ret)
4167                 return ret;
4168         flow_dv_translate_item_source_vport(matcher, key, val, mask);
4169         return 0;
4170 }
4171
4172 /**
4173  * Add ICMP6 item to matcher and to the value.
4174  *
4175  * @param[in, out] matcher
4176  *   Flow matcher.
4177  * @param[in, out] key
4178  *   Flow matcher value.
4179  * @param[in] item
4180  *   Flow pattern to translate.
4181  * @param[in] inner
4182  *   Item is inner pattern.
4183  */
4184 static void
4185 flow_dv_translate_item_icmp6(void *matcher, void *key,
4186                               const struct rte_flow_item *item,
4187                               int inner)
4188 {
4189         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
4190         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
4191         void *headers_m;
4192         void *headers_v;
4193         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
4194                                      misc_parameters_3);
4195         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
4196         if (inner) {
4197                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4198                                          inner_headers);
4199                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4200         } else {
4201                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4202                                          outer_headers);
4203                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4204         }
4205         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
4206         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
4207         if (!icmp6_v)
4208                 return;
4209         if (!icmp6_m)
4210                 icmp6_m = &rte_flow_item_icmp6_mask;
4211         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
4212         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
4213                  icmp6_v->type & icmp6_m->type);
4214         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
4215         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
4216                  icmp6_v->code & icmp6_m->code);
4217 }
4218
4219 /**
4220  * Add ICMP item to matcher and to the value.
4221  *
4222  * @param[in, out] matcher
4223  *   Flow matcher.
4224  * @param[in, out] key
4225  *   Flow matcher value.
4226  * @param[in] item
4227  *   Flow pattern to translate.
4228  * @param[in] inner
4229  *   Item is inner pattern.
4230  */
4231 static void
4232 flow_dv_translate_item_icmp(void *matcher, void *key,
4233                             const struct rte_flow_item *item,
4234                             int inner)
4235 {
4236         const struct rte_flow_item_icmp *icmp_m = item->mask;
4237         const struct rte_flow_item_icmp *icmp_v = item->spec;
4238         void *headers_m;
4239         void *headers_v;
4240         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
4241                                      misc_parameters_3);
4242         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
4243         if (inner) {
4244                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4245                                          inner_headers);
4246                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
4247         } else {
4248                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
4249                                          outer_headers);
4250                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
4251         }
4252         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
4253         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
4254         if (!icmp_v)
4255                 return;
4256         if (!icmp_m)
4257                 icmp_m = &rte_flow_item_icmp_mask;
4258         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
4259                  icmp_m->hdr.icmp_type);
4260         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
4261                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
4262         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
4263                  icmp_m->hdr.icmp_code);
4264         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
4265                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
4266 }
4267
4268 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
4269
4270 #define HEADER_IS_ZERO(match_criteria, headers)                              \
4271         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
4272                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
4273
4274 /**
4275  * Calculate flow matcher enable bitmap.
4276  *
4277  * @param match_criteria
4278  *   Pointer to flow matcher criteria.
4279  *
4280  * @return
4281  *   Bitmap of enabled fields.
4282  */
4283 static uint8_t
4284 flow_dv_matcher_enable(uint32_t *match_criteria)
4285 {
4286         uint8_t match_criteria_enable;
4287
4288         match_criteria_enable =
4289                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
4290                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
4291         match_criteria_enable |=
4292                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
4293                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
4294         match_criteria_enable |=
4295                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
4296                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
4297         match_criteria_enable |=
4298                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
4299                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
4300 #ifdef HAVE_MLX5DV_DR
4301         match_criteria_enable |=
4302                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
4303                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
4304 #endif
4305         return match_criteria_enable;
4306 }
4307
4308
4309 /**
4310  * Get a flow table.
4311  *
4312  * @param dev[in, out]
4313  *   Pointer to rte_eth_dev structure.
4314  * @param[in] table_id
4315  *   Table id to use.
4316  * @param[in] egress
4317  *   Direction of the table.
4318  * @param[in] transfer
4319  *   E-Switch or NIC flow.
4320  * @param[out] error
4321  *   pointer to error structure.
4322  *
4323  * @return
4324  *   Returns tables resource based on the index, NULL in case of failed.
4325  */
4326 static struct mlx5_flow_tbl_resource *
4327 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
4328                          uint32_t table_id, uint8_t egress,
4329                          uint8_t transfer,
4330                          struct rte_flow_error *error)
4331 {
4332         struct mlx5_priv *priv = dev->data->dev_private;
4333         struct mlx5_ibv_shared *sh = priv->sh;
4334         struct mlx5_flow_tbl_resource *tbl;
4335
4336 #ifdef HAVE_MLX5DV_DR
4337         if (transfer) {
4338                 tbl = &sh->fdb_tbl[table_id];
4339                 if (!tbl->obj)
4340                         tbl->obj = mlx5_glue->dr_create_flow_tbl
4341                                 (sh->fdb_domain, table_id);
4342         } else if (egress) {
4343                 tbl = &sh->tx_tbl[table_id];
4344                 if (!tbl->obj)
4345                         tbl->obj = mlx5_glue->dr_create_flow_tbl
4346                                 (sh->tx_domain, table_id);
4347         } else {
4348                 tbl = &sh->rx_tbl[table_id];
4349                 if (!tbl->obj)
4350                         tbl->obj = mlx5_glue->dr_create_flow_tbl
4351                                 (sh->rx_domain, table_id);
4352         }
4353         if (!tbl->obj) {
4354                 rte_flow_error_set(error, ENOMEM,
4355                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4356                                    NULL, "cannot create table");
4357                 return NULL;
4358         }
4359         rte_atomic32_inc(&tbl->refcnt);
4360         return tbl;
4361 #else
4362         (void)error;
4363         (void)tbl;
4364         if (transfer)
4365                 return &sh->fdb_tbl[table_id];
4366         else if (egress)
4367                 return &sh->tx_tbl[table_id];
4368         else
4369                 return &sh->rx_tbl[table_id];
4370 #endif
4371 }
4372
4373 /**
4374  * Release a flow table.
4375  *
4376  * @param[in] tbl
4377  *   Table resource to be released.
4378  *
4379  * @return
4380  *   Returns 0 if table was released, else return 1;
4381  */
4382 static int
4383 flow_dv_tbl_resource_release(struct mlx5_flow_tbl_resource *tbl)
4384 {
4385         if (!tbl)
4386                 return 0;
4387         if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
4388                 mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
4389                 tbl->obj = NULL;
4390                 return 0;
4391         }
4392         return 1;
4393 }
4394
4395 /**
4396  * Register the flow matcher.
4397  *
4398  * @param dev[in, out]
4399  *   Pointer to rte_eth_dev structure.
4400  * @param[in, out] matcher
4401  *   Pointer to flow matcher.
4402  * @parm[in, out] dev_flow
4403  *   Pointer to the dev_flow.
4404  * @param[out] error
4405  *   pointer to error structure.
4406  *
4407  * @return
4408  *   0 on success otherwise -errno and errno is set.
4409  */
4410 static int
4411 flow_dv_matcher_register(struct rte_eth_dev *dev,
4412                          struct mlx5_flow_dv_matcher *matcher,
4413                          struct mlx5_flow *dev_flow,
4414                          struct rte_flow_error *error)
4415 {
4416         struct mlx5_priv *priv = dev->data->dev_private;
4417         struct mlx5_ibv_shared *sh = priv->sh;
4418         struct mlx5_flow_dv_matcher *cache_matcher;
4419         struct mlx5dv_flow_matcher_attr dv_attr = {
4420                 .type = IBV_FLOW_ATTR_NORMAL,
4421                 .match_mask = (void *)&matcher->mask,
4422         };
4423         struct mlx5_flow_tbl_resource *tbl = NULL;
4424
4425         /* Lookup from cache. */
4426         LIST_FOREACH(cache_matcher, &sh->matchers, next) {
4427                 if (matcher->crc == cache_matcher->crc &&
4428                     matcher->priority == cache_matcher->priority &&
4429                     matcher->egress == cache_matcher->egress &&
4430                     matcher->group == cache_matcher->group &&
4431                     matcher->transfer == cache_matcher->transfer &&
4432                     !memcmp((const void *)matcher->mask.buf,
4433                             (const void *)cache_matcher->mask.buf,
4434                             cache_matcher->mask.size)) {
4435                         DRV_LOG(DEBUG,
4436                                 "priority %hd use %s matcher %p: refcnt %d++",
4437                                 cache_matcher->priority,
4438                                 cache_matcher->egress ? "tx" : "rx",
4439                                 (void *)cache_matcher,
4440                                 rte_atomic32_read(&cache_matcher->refcnt));
4441                         rte_atomic32_inc(&cache_matcher->refcnt);
4442                         dev_flow->dv.matcher = cache_matcher;
4443                         return 0;
4444                 }
4445         }
4446         /* Register new matcher. */
4447         cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
4448         if (!cache_matcher)
4449                 return rte_flow_error_set(error, ENOMEM,
4450                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4451                                           "cannot allocate matcher memory");
4452         tbl = flow_dv_tbl_resource_get(dev, matcher->group * MLX5_GROUP_FACTOR,
4453                                        matcher->egress, matcher->transfer,
4454                                        error);
4455         if (!tbl) {
4456                 rte_free(cache_matcher);
4457                 return rte_flow_error_set(error, ENOMEM,
4458                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4459                                           NULL, "cannot create table");
4460         }
4461         *cache_matcher = *matcher;
4462         dv_attr.match_criteria_enable =
4463                 flow_dv_matcher_enable(cache_matcher->mask.buf);
4464         dv_attr.priority = matcher->priority;
4465         if (matcher->egress)
4466                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
4467         cache_matcher->matcher_object =
4468                 mlx5_glue->dv_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj);
4469         if (!cache_matcher->matcher_object) {
4470                 rte_free(cache_matcher);
4471 #ifdef HAVE_MLX5DV_DR
4472                 flow_dv_tbl_resource_release(tbl);
4473 #endif
4474                 return rte_flow_error_set(error, ENOMEM,
4475                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4476                                           NULL, "cannot create matcher");
4477         }
4478         rte_atomic32_inc(&cache_matcher->refcnt);
4479         LIST_INSERT_HEAD(&sh->matchers, cache_matcher, next);
4480         dev_flow->dv.matcher = cache_matcher;
4481         DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
4482                 cache_matcher->priority,
4483                 cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
4484                 rte_atomic32_read(&cache_matcher->refcnt));
4485         rte_atomic32_inc(&tbl->refcnt);
4486         return 0;
4487 }
4488
4489 /**
4490  * Find existing tag resource or create and register a new one.
4491  *
4492  * @param dev[in, out]
4493  *   Pointer to rte_eth_dev structure.
4494  * @param[in, out] resource
4495  *   Pointer to tag resource.
4496  * @parm[in, out] dev_flow
4497  *   Pointer to the dev_flow.
4498  * @param[out] error
4499  *   pointer to error structure.
4500  *
4501  * @return
4502  *   0 on success otherwise -errno and errno is set.
4503  */
4504 static int
4505 flow_dv_tag_resource_register
4506                         (struct rte_eth_dev *dev,
4507                          struct mlx5_flow_dv_tag_resource *resource,
4508                          struct mlx5_flow *dev_flow,
4509                          struct rte_flow_error *error)
4510 {
4511         struct mlx5_priv *priv = dev->data->dev_private;
4512         struct mlx5_ibv_shared *sh = priv->sh;
4513         struct mlx5_flow_dv_tag_resource *cache_resource;
4514
4515         /* Lookup a matching resource from cache. */
4516         LIST_FOREACH(cache_resource, &sh->tags, next) {
4517                 if (resource->tag == cache_resource->tag) {
4518                         DRV_LOG(DEBUG, "tag resource %p: refcnt %d++",
4519                                 (void *)cache_resource,
4520                                 rte_atomic32_read(&cache_resource->refcnt));
4521                         rte_atomic32_inc(&cache_resource->refcnt);
4522                         dev_flow->flow->tag_resource = cache_resource;
4523                         return 0;
4524                 }
4525         }
4526         /* Register new  resource. */
4527         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
4528         if (!cache_resource)
4529                 return rte_flow_error_set(error, ENOMEM,
4530                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4531                                           "cannot allocate resource memory");
4532         *cache_resource = *resource;
4533         cache_resource->action = mlx5_glue->dv_create_flow_action_tag
4534                 (resource->tag);
4535         if (!cache_resource->action) {
4536                 rte_free(cache_resource);
4537                 return rte_flow_error_set(error, ENOMEM,
4538                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4539                                           NULL, "cannot create action");
4540         }
4541         rte_atomic32_init(&cache_resource->refcnt);
4542         rte_atomic32_inc(&cache_resource->refcnt);
4543         LIST_INSERT_HEAD(&sh->tags, cache_resource, next);
4544         dev_flow->flow->tag_resource = cache_resource;
4545         DRV_LOG(DEBUG, "new tag resource %p: refcnt %d++",
4546                 (void *)cache_resource,
4547                 rte_atomic32_read(&cache_resource->refcnt));
4548         return 0;
4549 }
4550
4551 /**
4552  * Release the tag.
4553  *
4554  * @param dev
4555  *   Pointer to Ethernet device.
4556  * @param flow
4557  *   Pointer to mlx5_flow.
4558  *
4559  * @return
4560  *   1 while a reference on it exists, 0 when freed.
4561  */
4562 static int
4563 flow_dv_tag_release(struct rte_eth_dev *dev,
4564                     struct mlx5_flow_dv_tag_resource *tag)
4565 {
4566         assert(tag);
4567         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
4568                 dev->data->port_id, (void *)tag,
4569                 rte_atomic32_read(&tag->refcnt));
4570         if (rte_atomic32_dec_and_test(&tag->refcnt)) {
4571                 claim_zero(mlx5_glue->destroy_flow_action(tag->action));
4572                 LIST_REMOVE(tag, next);
4573                 DRV_LOG(DEBUG, "port %u tag %p: removed",
4574                         dev->data->port_id, (void *)tag);
4575                 rte_free(tag);
4576                 return 0;
4577         }
4578         return 1;
4579 }
4580
4581 /**
4582  * Translate port ID action to vport.
4583  *
4584  * @param[in] dev
4585  *   Pointer to rte_eth_dev structure.
4586  * @param[in] action
4587  *   Pointer to the port ID action.
4588  * @param[out] dst_port_id
4589  *   The target port ID.
4590  * @param[out] error
4591  *   Pointer to the error structure.
4592  *
4593  * @return
4594  *   0 on success, a negative errno value otherwise and rte_errno is set.
4595  */
4596 static int
4597 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
4598                                  const struct rte_flow_action *action,
4599                                  uint32_t *dst_port_id,
4600                                  struct rte_flow_error *error)
4601 {
4602         uint32_t port;
4603         uint16_t port_id;
4604         int ret;
4605         const struct rte_flow_action_port_id *conf =
4606                         (const struct rte_flow_action_port_id *)action->conf;
4607
4608         port = conf->original ? dev->data->port_id : conf->id;
4609         ret = mlx5_port_to_eswitch_info(port, NULL, &port_id);
4610         if (ret)
4611                 return rte_flow_error_set(error, -ret,
4612                                           RTE_FLOW_ERROR_TYPE_ACTION,
4613                                           NULL,
4614                                           "No eswitch info was found for port");
4615         *dst_port_id = port_id;
4616         return 0;
4617 }
4618
4619 /**
4620  * Fill the flow with DV spec.
4621  *
4622  * @param[in] dev
4623  *   Pointer to rte_eth_dev structure.
4624  * @param[in, out] dev_flow
4625  *   Pointer to the sub flow.
4626  * @param[in] attr
4627  *   Pointer to the flow attributes.
4628  * @param[in] items
4629  *   Pointer to the list of items.
4630  * @param[in] actions
4631  *   Pointer to the list of actions.
4632  * @param[out] error
4633  *   Pointer to the error structure.
4634  *
4635  * @return
4636  *   0 on success, a negative errno value otherwise and rte_errno is set.
4637  */
4638 static int
4639 flow_dv_translate(struct rte_eth_dev *dev,
4640                   struct mlx5_flow *dev_flow,
4641                   const struct rte_flow_attr *attr,
4642                   const struct rte_flow_item items[],
4643                   const struct rte_flow_action actions[],
4644                   struct rte_flow_error *error)
4645 {
4646         struct mlx5_priv *priv = dev->data->dev_private;
4647         struct rte_flow *flow = dev_flow->flow;
4648         uint64_t item_flags = 0;
4649         uint64_t last_item = 0;
4650         uint64_t action_flags = 0;
4651         uint64_t priority = attr->priority;
4652         struct mlx5_flow_dv_matcher matcher = {
4653                 .mask = {
4654                         .size = sizeof(matcher.mask.buf),
4655                 },
4656         };
4657         int actions_n = 0;
4658         bool actions_end = false;
4659         struct mlx5_flow_dv_modify_hdr_resource res = {
4660                 .ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4661                                           MLX5DV_FLOW_TABLE_TYPE_NIC_RX
4662         };
4663         union flow_dv_attr flow_attr = { .attr = 0 };
4664         struct mlx5_flow_dv_tag_resource tag_resource;
4665         uint32_t modify_action_position = UINT32_MAX;
4666         void *match_mask = matcher.mask.buf;
4667         void *match_value = dev_flow->dv.value.buf;
4668
4669         flow->group = attr->group;
4670         if (attr->transfer)
4671                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4672         if (priority == MLX5_FLOW_PRIO_RSVD)
4673                 priority = priv->config.flow_prio - 1;
4674         for (; !actions_end ; actions++) {
4675                 const struct rte_flow_action_queue *queue;
4676                 const struct rte_flow_action_rss *rss;
4677                 const struct rte_flow_action *action = actions;
4678                 const struct rte_flow_action_count *count = action->conf;
4679                 const uint8_t *rss_key;
4680                 const struct rte_flow_action_jump *jump_data;
4681                 struct mlx5_flow_dv_jump_tbl_resource jump_tbl_resource;
4682                 struct mlx5_flow_tbl_resource *tbl;
4683                 uint32_t port_id = 0;
4684                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
4685
4686                 switch (actions->type) {
4687                 case RTE_FLOW_ACTION_TYPE_VOID:
4688                         break;
4689                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
4690                         if (flow_dv_translate_action_port_id(dev, action,
4691                                                              &port_id, error))
4692                                 return -rte_errno;
4693                         port_id_resource.port_id = port_id;
4694                         if (flow_dv_port_id_action_resource_register
4695                             (dev, &port_id_resource, dev_flow, error))
4696                                 return -rte_errno;
4697                         dev_flow->dv.actions[actions_n++] =
4698                                 dev_flow->dv.port_id_action->action;
4699                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
4700                         break;
4701                 case RTE_FLOW_ACTION_TYPE_FLAG:
4702                         tag_resource.tag =
4703                                 mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
4704                         if (!flow->tag_resource)
4705                                 if (flow_dv_tag_resource_register
4706                                     (dev, &tag_resource, dev_flow, error))
4707                                         return errno;
4708                         dev_flow->dv.actions[actions_n++] =
4709                                 flow->tag_resource->action;
4710                         action_flags |= MLX5_FLOW_ACTION_FLAG;
4711                         break;
4712                 case RTE_FLOW_ACTION_TYPE_MARK:
4713                         tag_resource.tag = mlx5_flow_mark_set
4714                               (((const struct rte_flow_action_mark *)
4715                                (actions->conf))->id);
4716                         if (!flow->tag_resource)
4717                                 if (flow_dv_tag_resource_register
4718                                     (dev, &tag_resource, dev_flow, error))
4719                                         return errno;
4720                         dev_flow->dv.actions[actions_n++] =
4721                                 flow->tag_resource->action;
4722                         action_flags |= MLX5_FLOW_ACTION_MARK;
4723                         break;
4724                 case RTE_FLOW_ACTION_TYPE_DROP:
4725                         action_flags |= MLX5_FLOW_ACTION_DROP;
4726                         break;
4727                 case RTE_FLOW_ACTION_TYPE_QUEUE:
4728                         queue = actions->conf;
4729                         flow->rss.queue_num = 1;
4730                         (*flow->queue)[0] = queue->index;
4731                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
4732                         break;
4733                 case RTE_FLOW_ACTION_TYPE_RSS:
4734                         rss = actions->conf;
4735                         if (flow->queue)
4736                                 memcpy((*flow->queue), rss->queue,
4737                                        rss->queue_num * sizeof(uint16_t));
4738                         flow->rss.queue_num = rss->queue_num;
4739                         /* NULL RSS key indicates default RSS key. */
4740                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
4741                         memcpy(flow->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
4742                         /* RSS type 0 indicates default RSS type ETH_RSS_IP. */
4743                         flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
4744                         flow->rss.level = rss->level;
4745                         action_flags |= MLX5_FLOW_ACTION_RSS;
4746                         break;
4747                 case RTE_FLOW_ACTION_TYPE_COUNT:
4748                         if (!priv->config.devx) {
4749                                 rte_errno = ENOTSUP;
4750                                 goto cnt_err;
4751                         }
4752                         flow->counter = flow_dv_counter_alloc(dev,
4753                                                               count->shared,
4754                                                               count->id,
4755                                                               attr->group);
4756                         if (flow->counter == NULL)
4757                                 goto cnt_err;
4758                         dev_flow->dv.actions[actions_n++] =
4759                                 flow->counter->action;
4760                         action_flags |= MLX5_FLOW_ACTION_COUNT;
4761                         break;
4762 cnt_err:
4763                         if (rte_errno == ENOTSUP)
4764                                 return rte_flow_error_set
4765                                               (error, ENOTSUP,
4766                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4767                                                NULL,
4768                                                "count action not supported");
4769                         else
4770                                 return rte_flow_error_set
4771                                                 (error, rte_errno,
4772                                                  RTE_FLOW_ERROR_TYPE_ACTION,
4773                                                  action,
4774                                                  "cannot create counter"
4775                                                   " object.");
4776                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
4777                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
4778                         if (flow_dv_create_action_l2_encap(dev, actions,
4779                                                            dev_flow,
4780                                                            attr->transfer,
4781                                                            error))
4782                                 return -rte_errno;
4783                         dev_flow->dv.actions[actions_n++] =
4784                                 dev_flow->dv.encap_decap->verbs_action;
4785                         action_flags |= actions->type ==
4786                                         RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
4787                                         MLX5_FLOW_ACTION_VXLAN_ENCAP :
4788                                         MLX5_FLOW_ACTION_NVGRE_ENCAP;
4789                         break;
4790                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
4791                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
4792                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
4793                                                            attr->transfer,
4794                                                            error))
4795                                 return -rte_errno;
4796                         dev_flow->dv.actions[actions_n++] =
4797                                 dev_flow->dv.encap_decap->verbs_action;
4798                         action_flags |= actions->type ==
4799                                         RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
4800                                         MLX5_FLOW_ACTION_VXLAN_DECAP :
4801                                         MLX5_FLOW_ACTION_NVGRE_DECAP;
4802                         break;
4803                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
4804                         /* Handle encap with preceding decap. */
4805                         if (action_flags & MLX5_FLOW_ACTION_RAW_DECAP) {
4806                                 if (flow_dv_create_action_raw_encap
4807                                         (dev, actions, dev_flow, attr, error))
4808                                         return -rte_errno;
4809                                 dev_flow->dv.actions[actions_n++] =
4810                                         dev_flow->dv.encap_decap->verbs_action;
4811                         } else {
4812                                 /* Handle encap without preceding decap. */
4813                                 if (flow_dv_create_action_l2_encap
4814                                     (dev, actions, dev_flow, attr->transfer,
4815                                      error))
4816                                         return -rte_errno;
4817                                 dev_flow->dv.actions[actions_n++] =
4818                                         dev_flow->dv.encap_decap->verbs_action;
4819                         }
4820                         action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
4821                         break;
4822                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
4823                         /* Check if this decap is followed by encap. */
4824                         for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
4825                                action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
4826                                action++) {
4827                         }
4828                         /* Handle decap only if it isn't followed by encap. */
4829                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
4830                                 if (flow_dv_create_action_l2_decap
4831                                     (dev, dev_flow, attr->transfer, error))
4832                                         return -rte_errno;
4833                                 dev_flow->dv.actions[actions_n++] =
4834                                         dev_flow->dv.encap_decap->verbs_action;
4835                         }
4836                         /* If decap is followed by encap, handle it at encap. */
4837                         action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
4838                         break;
4839                 case RTE_FLOW_ACTION_TYPE_JUMP:
4840                         jump_data = action->conf;
4841                         tbl = flow_dv_tbl_resource_get(dev, jump_data->group *
4842                                                        MLX5_GROUP_FACTOR,
4843                                                        attr->egress,
4844                                                        attr->transfer, error);
4845                         if (!tbl)
4846                                 return rte_flow_error_set
4847                                                 (error, errno,
4848                                                  RTE_FLOW_ERROR_TYPE_ACTION,
4849                                                  NULL,
4850                                                  "cannot create jump action.");
4851                         jump_tbl_resource.tbl = tbl;
4852                         if (flow_dv_jump_tbl_resource_register
4853                             (dev, &jump_tbl_resource, dev_flow, error)) {
4854                                 flow_dv_tbl_resource_release(tbl);
4855                                 return rte_flow_error_set
4856                                                 (error, errno,
4857                                                  RTE_FLOW_ERROR_TYPE_ACTION,
4858                                                  NULL,
4859                                                  "cannot create jump action.");
4860                         }
4861                         dev_flow->dv.actions[actions_n++] =
4862                                 dev_flow->dv.jump->action;
4863                         action_flags |= MLX5_FLOW_ACTION_JUMP;
4864                         break;
4865                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
4866                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
4867                         if (flow_dv_convert_action_modify_mac(&res, actions,
4868                                                               error))
4869                                 return -rte_errno;
4870                         action_flags |= actions->type ==
4871                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
4872                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
4873                                         MLX5_FLOW_ACTION_SET_MAC_DST;
4874                         break;
4875                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
4876                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
4877                         if (flow_dv_convert_action_modify_ipv4(&res, actions,
4878                                                                error))
4879                                 return -rte_errno;
4880                         action_flags |= actions->type ==
4881                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
4882                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
4883                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
4884                         break;
4885                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
4886                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
4887                         if (flow_dv_convert_action_modify_ipv6(&res, actions,
4888                                                                error))
4889                                 return -rte_errno;
4890                         action_flags |= actions->type ==
4891                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
4892                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
4893                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
4894                         break;
4895                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
4896                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
4897                         if (flow_dv_convert_action_modify_tp(&res, actions,
4898                                                              items, &flow_attr,
4899                                                              error))
4900                                 return -rte_errno;
4901                         action_flags |= actions->type ==
4902                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
4903                                         MLX5_FLOW_ACTION_SET_TP_SRC :
4904                                         MLX5_FLOW_ACTION_SET_TP_DST;
4905                         break;
4906                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
4907                         if (flow_dv_convert_action_modify_dec_ttl(&res, items,
4908                                                                   &flow_attr,
4909                                                                   error))
4910                                 return -rte_errno;
4911                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
4912                         break;
4913                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
4914                         if (flow_dv_convert_action_modify_ttl(&res, actions,
4915                                                              items, &flow_attr,
4916                                                              error))
4917                                 return -rte_errno;
4918                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
4919                         break;
4920                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
4921                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
4922                         if (flow_dv_convert_action_modify_tcp_seq(&res, actions,
4923                                                                   error))
4924                                 return -rte_errno;
4925                         action_flags |= actions->type ==
4926                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
4927                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
4928                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
4929                         break;
4930
4931                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
4932                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
4933                         if (flow_dv_convert_action_modify_tcp_ack(&res, actions,
4934                                                                   error))
4935                                 return -rte_errno;
4936                         action_flags |= actions->type ==
4937                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
4938                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
4939                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
4940                         break;
4941                 case RTE_FLOW_ACTION_TYPE_END:
4942                         actions_end = true;
4943                         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) {
4944                                 /* create modify action if needed. */
4945                                 if (flow_dv_modify_hdr_resource_register
4946                                                                 (dev, &res,
4947                                                                  dev_flow,
4948                                                                  error))
4949                                         return -rte_errno;
4950                                 dev_flow->dv.actions[modify_action_position] =
4951                                         dev_flow->dv.modify_hdr->verbs_action;
4952                         }
4953                         break;
4954                 default:
4955                         break;
4956                 }
4957                 if ((action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) &&
4958                     modify_action_position == UINT32_MAX)
4959                         modify_action_position = actions_n++;
4960         }
4961         dev_flow->dv.actions_n = actions_n;
4962         flow->actions = action_flags;
4963         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
4964                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
4965
4966                 switch (items->type) {
4967                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
4968                         flow_dv_translate_item_port_id(dev, match_mask,
4969                                                        match_value, items);
4970                         last_item = MLX5_FLOW_ITEM_PORT_ID;
4971                         break;
4972                 case RTE_FLOW_ITEM_TYPE_ETH:
4973                         flow_dv_translate_item_eth(match_mask, match_value,
4974                                                    items, tunnel);
4975                         matcher.priority = MLX5_PRIORITY_MAP_L2;
4976                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
4977                                              MLX5_FLOW_LAYER_OUTER_L2;
4978                         break;
4979                 case RTE_FLOW_ITEM_TYPE_VLAN:
4980                         flow_dv_translate_item_vlan(match_mask, match_value,
4981                                                     items, tunnel);
4982                         matcher.priority = MLX5_PRIORITY_MAP_L2;
4983                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
4984                                               MLX5_FLOW_LAYER_INNER_VLAN) :
4985                                              (MLX5_FLOW_LAYER_OUTER_L2 |
4986                                               MLX5_FLOW_LAYER_OUTER_VLAN);
4987                         break;
4988                 case RTE_FLOW_ITEM_TYPE_IPV4:
4989                         flow_dv_translate_item_ipv4(match_mask, match_value,
4990                                                     items, tunnel, attr->group);
4991                         matcher.priority = MLX5_PRIORITY_MAP_L3;
4992                         dev_flow->dv.hash_fields |=
4993                                 mlx5_flow_hashfields_adjust
4994                                         (dev_flow, tunnel,
4995                                          MLX5_IPV4_LAYER_TYPES,
4996                                          MLX5_IPV4_IBV_RX_HASH);
4997                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
4998                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
4999                         mlx5_flow_tunnel_ip_check(items, &last_item);
5000                         break;
5001                 case RTE_FLOW_ITEM_TYPE_IPV6:
5002                         flow_dv_translate_item_ipv6(match_mask, match_value,
5003                                                     items, tunnel, attr->group);
5004                         matcher.priority = MLX5_PRIORITY_MAP_L3;
5005                         dev_flow->dv.hash_fields |=
5006                                 mlx5_flow_hashfields_adjust
5007                                         (dev_flow, tunnel,
5008                                          MLX5_IPV6_LAYER_TYPES,
5009                                          MLX5_IPV6_IBV_RX_HASH);
5010                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
5011                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
5012                         mlx5_flow_tunnel_ip_check(items, &last_item);
5013                         break;
5014                 case RTE_FLOW_ITEM_TYPE_TCP:
5015                         flow_dv_translate_item_tcp(match_mask, match_value,
5016                                                    items, tunnel);
5017                         matcher.priority = MLX5_PRIORITY_MAP_L4;
5018                         dev_flow->dv.hash_fields |=
5019                                 mlx5_flow_hashfields_adjust
5020                                         (dev_flow, tunnel, ETH_RSS_TCP,
5021                                          IBV_RX_HASH_SRC_PORT_TCP |
5022                                          IBV_RX_HASH_DST_PORT_TCP);
5023                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
5024                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
5025                         break;
5026                 case RTE_FLOW_ITEM_TYPE_UDP:
5027                         flow_dv_translate_item_udp(match_mask, match_value,
5028                                                    items, tunnel);
5029                         matcher.priority = MLX5_PRIORITY_MAP_L4;
5030                         dev_flow->dv.hash_fields |=
5031                                 mlx5_flow_hashfields_adjust
5032                                         (dev_flow, tunnel, ETH_RSS_UDP,
5033                                          IBV_RX_HASH_SRC_PORT_UDP |
5034                                          IBV_RX_HASH_DST_PORT_UDP);
5035                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
5036                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
5037                         break;
5038                 case RTE_FLOW_ITEM_TYPE_GRE:
5039                         flow_dv_translate_item_gre(match_mask, match_value,
5040                                                    items, tunnel);
5041                         last_item = MLX5_FLOW_LAYER_GRE;
5042                         break;
5043                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
5044                         flow_dv_translate_item_gre_key(match_mask,
5045                                                        match_value, items);
5046                         last_item = MLX5_FLOW_LAYER_GRE_KEY;
5047                         break;
5048                 case RTE_FLOW_ITEM_TYPE_NVGRE:
5049                         flow_dv_translate_item_nvgre(match_mask, match_value,
5050                                                      items, tunnel);
5051                         last_item = MLX5_FLOW_LAYER_GRE;
5052                         break;
5053                 case RTE_FLOW_ITEM_TYPE_VXLAN:
5054                         flow_dv_translate_item_vxlan(match_mask, match_value,
5055                                                      items, tunnel);
5056                         last_item = MLX5_FLOW_LAYER_VXLAN;
5057                         break;
5058                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
5059                         flow_dv_translate_item_vxlan(match_mask, match_value,
5060                                                      items, tunnel);
5061                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
5062                         break;
5063                 case RTE_FLOW_ITEM_TYPE_MPLS:
5064                         flow_dv_translate_item_mpls(match_mask, match_value,
5065                                                     items, last_item, tunnel);
5066                         last_item = MLX5_FLOW_LAYER_MPLS;
5067                         break;
5068                 case RTE_FLOW_ITEM_TYPE_META:
5069                         flow_dv_translate_item_meta(match_mask, match_value,
5070                                                     items);
5071                         last_item = MLX5_FLOW_ITEM_METADATA;
5072                         break;
5073                 case RTE_FLOW_ITEM_TYPE_ICMP:
5074                         flow_dv_translate_item_icmp(match_mask, match_value,
5075                                                     items, tunnel);
5076                         last_item = MLX5_FLOW_LAYER_ICMP;
5077                         break;
5078                 case RTE_FLOW_ITEM_TYPE_ICMP6:
5079                         flow_dv_translate_item_icmp6(match_mask, match_value,
5080                                                       items, tunnel);
5081                         last_item = MLX5_FLOW_LAYER_ICMP6;
5082                         break;
5083                 default:
5084                         break;
5085                 }
5086                 item_flags |= last_item;
5087         }
5088         /*
5089          * In case of ingress traffic when E-Switch mode is enabled,
5090          * we have two cases where we need to set the source port manually.
5091          * The first one, is in case of Nic steering rule, and the second is
5092          * E-Switch rule where no port_id item was found. In both cases
5093          * the source port is set according the current port in use.
5094          */
5095         if ((attr->ingress && !(item_flags & MLX5_FLOW_ITEM_PORT_ID)) &&
5096             (priv->representor || priv->master)) {
5097                 if (flow_dv_translate_item_port_id(dev, match_mask,
5098                                                    match_value, NULL))
5099                         return -rte_errno;
5100         }
5101         assert(!flow_dv_check_valid_spec(matcher.mask.buf,
5102                                          dev_flow->dv.value.buf));
5103         dev_flow->layers = item_flags;
5104         /* Register matcher. */
5105         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
5106                                     matcher.mask.size);
5107         matcher.priority = mlx5_flow_adjust_priority(dev, priority,
5108                                                      matcher.priority);
5109         matcher.egress = attr->egress;
5110         matcher.group = attr->group;
5111         matcher.transfer = attr->transfer;
5112         if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
5113                 return -rte_errno;
5114         return 0;
5115 }
5116
5117 /**
5118  * Apply the flow to the NIC.
5119  *
5120  * @param[in] dev
5121  *   Pointer to the Ethernet device structure.
5122  * @param[in, out] flow
5123  *   Pointer to flow structure.
5124  * @param[out] error
5125  *   Pointer to error structure.
5126  *
5127  * @return
5128  *   0 on success, a negative errno value otherwise and rte_errno is set.
5129  */
5130 static int
5131 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
5132               struct rte_flow_error *error)
5133 {
5134         struct mlx5_flow_dv *dv;
5135         struct mlx5_flow *dev_flow;
5136         struct mlx5_priv *priv = dev->data->dev_private;
5137         int n;
5138         int err;
5139
5140         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
5141                 dv = &dev_flow->dv;
5142                 n = dv->actions_n;
5143                 if (flow->actions & MLX5_FLOW_ACTION_DROP) {
5144                         if (flow->transfer) {
5145                                 dv->actions[n++] = priv->sh->esw_drop_action;
5146                         } else {
5147                                 dv->hrxq = mlx5_hrxq_drop_new(dev);
5148                                 if (!dv->hrxq) {
5149                                         rte_flow_error_set
5150                                                 (error, errno,
5151                                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5152                                                  NULL,
5153                                                  "cannot get drop hash queue");
5154                                         goto error;
5155                                 }
5156                                 dv->actions[n++] = dv->hrxq->action;
5157                         }
5158                 } else if (flow->actions &
5159                            (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
5160                         struct mlx5_hrxq *hrxq;
5161
5162                         hrxq = mlx5_hrxq_get(dev, flow->key,
5163                                              MLX5_RSS_HASH_KEY_LEN,
5164                                              dv->hash_fields,
5165                                              (*flow->queue),
5166                                              flow->rss.queue_num);
5167                         if (!hrxq) {
5168                                 int lro = 0;
5169
5170                                 if (mlx5_lro_on(dev)) {
5171                                         if ((dev_flow->layers &
5172                                              MLX5_FLOW_LAYER_IPV4_LRO)
5173                                             == MLX5_FLOW_LAYER_IPV4_LRO)
5174                                                 lro = MLX5_FLOW_IPV4_LRO;
5175                                         else if ((dev_flow->layers &
5176                                                   MLX5_FLOW_LAYER_IPV6_LRO)
5177                                                  == MLX5_FLOW_LAYER_IPV6_LRO)
5178                                                 lro = MLX5_FLOW_IPV6_LRO;
5179                                 }
5180                                 hrxq = mlx5_hrxq_new
5181                                         (dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
5182                                          dv->hash_fields, (*flow->queue),
5183                                          flow->rss.queue_num,
5184                                          !!(dev_flow->layers &
5185                                             MLX5_FLOW_LAYER_TUNNEL), lro);
5186                         }
5187
5188                         if (!hrxq) {
5189                                 rte_flow_error_set
5190                                         (error, rte_errno,
5191                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
5192                                          "cannot get hash queue");
5193                                 goto error;
5194                         }
5195                         dv->hrxq = hrxq;
5196                         dv->actions[n++] = dv->hrxq->action;
5197                 }
5198                 dv->flow =
5199                         mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
5200                                                   (void *)&dv->value, n,
5201                                                   dv->actions);
5202                 if (!dv->flow) {
5203                         rte_flow_error_set(error, errno,
5204                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5205                                            NULL,
5206                                            "hardware refuses to create flow");
5207                         goto error;
5208                 }
5209         }
5210         return 0;
5211 error:
5212         err = rte_errno; /* Save rte_errno before cleanup. */
5213         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
5214                 struct mlx5_flow_dv *dv = &dev_flow->dv;
5215                 if (dv->hrxq) {
5216                         if (flow->actions & MLX5_FLOW_ACTION_DROP)
5217                                 mlx5_hrxq_drop_release(dev);
5218                         else
5219                                 mlx5_hrxq_release(dev, dv->hrxq);
5220                         dv->hrxq = NULL;
5221                 }
5222         }
5223         rte_errno = err; /* Restore rte_errno. */
5224         return -rte_errno;
5225 }
5226
5227 /**
5228  * Release the flow matcher.
5229  *
5230  * @param dev
5231  *   Pointer to Ethernet device.
5232  * @param flow
5233  *   Pointer to mlx5_flow.
5234  *
5235  * @return
5236  *   1 while a reference on it exists, 0 when freed.
5237  */
5238 static int
5239 flow_dv_matcher_release(struct rte_eth_dev *dev,
5240                         struct mlx5_flow *flow)
5241 {
5242         struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
5243         struct mlx5_priv *priv = dev->data->dev_private;
5244         struct mlx5_ibv_shared *sh = priv->sh;
5245         struct mlx5_flow_tbl_resource *tbl;
5246
5247         assert(matcher->matcher_object);
5248         DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
5249                 dev->data->port_id, (void *)matcher,
5250                 rte_atomic32_read(&matcher->refcnt));
5251         if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
5252                 claim_zero(mlx5_glue->dv_destroy_flow_matcher
5253                            (matcher->matcher_object));
5254                 LIST_REMOVE(matcher, next);
5255                 if (matcher->egress)
5256                         tbl = &sh->tx_tbl[matcher->group];
5257                 else
5258                         tbl = &sh->rx_tbl[matcher->group];
5259                 flow_dv_tbl_resource_release(tbl);
5260                 rte_free(matcher);
5261                 DRV_LOG(DEBUG, "port %u matcher %p: removed",
5262                         dev->data->port_id, (void *)matcher);
5263                 return 0;
5264         }
5265         return 1;
5266 }
5267
5268 /**
5269  * Release an encap/decap resource.
5270  *
5271  * @param flow
5272  *   Pointer to mlx5_flow.
5273  *
5274  * @return
5275  *   1 while a reference on it exists, 0 when freed.
5276  */
5277 static int
5278 flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
5279 {
5280         struct mlx5_flow_dv_encap_decap_resource *cache_resource =
5281                                                 flow->dv.encap_decap;
5282
5283         assert(cache_resource->verbs_action);
5284         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
5285                 (void *)cache_resource,
5286                 rte_atomic32_read(&cache_resource->refcnt));
5287         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
5288                 claim_zero(mlx5_glue->destroy_flow_action
5289                                 (cache_resource->verbs_action));
5290                 LIST_REMOVE(cache_resource, next);
5291                 rte_free(cache_resource);
5292                 DRV_LOG(DEBUG, "encap/decap resource %p: removed",
5293                         (void *)cache_resource);
5294                 return 0;
5295         }
5296         return 1;
5297 }
5298
5299 /**
5300  * Release an jump to table action resource.
5301  *
5302  * @param flow
5303  *   Pointer to mlx5_flow.
5304  *
5305  * @return
5306  *   1 while a reference on it exists, 0 when freed.
5307  */
5308 static int
5309 flow_dv_jump_tbl_resource_release(struct mlx5_flow *flow)
5310 {
5311         struct mlx5_flow_dv_jump_tbl_resource *cache_resource =
5312                                                 flow->dv.jump;
5313
5314         assert(cache_resource->action);
5315         DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--",
5316                 (void *)cache_resource,
5317                 rte_atomic32_read(&cache_resource->refcnt));
5318         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
5319                 claim_zero(mlx5_glue->destroy_flow_action
5320                                 (cache_resource->action));
5321                 LIST_REMOVE(cache_resource, next);
5322                 flow_dv_tbl_resource_release(cache_resource->tbl);
5323                 rte_free(cache_resource);
5324                 DRV_LOG(DEBUG, "jump table resource %p: removed",
5325                         (void *)cache_resource);
5326                 return 0;
5327         }
5328         return 1;
5329 }
5330
5331 /**
5332  * Release a modify-header resource.
5333  *
5334  * @param flow
5335  *   Pointer to mlx5_flow.
5336  *
5337  * @return
5338  *   1 while a reference on it exists, 0 when freed.
5339  */
5340 static int
5341 flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
5342 {
5343         struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
5344                                                 flow->dv.modify_hdr;
5345
5346         assert(cache_resource->verbs_action);
5347         DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
5348                 (void *)cache_resource,
5349                 rte_atomic32_read(&cache_resource->refcnt));
5350         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
5351                 claim_zero(mlx5_glue->destroy_flow_action
5352                                 (cache_resource->verbs_action));
5353                 LIST_REMOVE(cache_resource, next);
5354                 rte_free(cache_resource);
5355                 DRV_LOG(DEBUG, "modify-header resource %p: removed",
5356                         (void *)cache_resource);
5357                 return 0;
5358         }
5359         return 1;
5360 }
5361
5362 /**
5363  * Release port ID action resource.
5364  *
5365  * @param flow
5366  *   Pointer to mlx5_flow.
5367  *
5368  * @return
5369  *   1 while a reference on it exists, 0 when freed.
5370  */
5371 static int
5372 flow_dv_port_id_action_resource_release(struct mlx5_flow *flow)
5373 {
5374         struct mlx5_flow_dv_port_id_action_resource *cache_resource =
5375                 flow->dv.port_id_action;
5376
5377         assert(cache_resource->action);
5378         DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--",
5379                 (void *)cache_resource,
5380                 rte_atomic32_read(&cache_resource->refcnt));
5381         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
5382                 claim_zero(mlx5_glue->destroy_flow_action
5383                                 (cache_resource->action));
5384                 LIST_REMOVE(cache_resource, next);
5385                 rte_free(cache_resource);
5386                 DRV_LOG(DEBUG, "port id action resource %p: removed",
5387                         (void *)cache_resource);
5388                 return 0;
5389         }
5390         return 1;
5391 }
5392
5393 /**
5394  * Remove the flow from the NIC but keeps it in memory.
5395  *
5396  * @param[in] dev
5397  *   Pointer to Ethernet device.
5398  * @param[in, out] flow
5399  *   Pointer to flow structure.
5400  */
5401 static void
5402 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
5403 {
5404         struct mlx5_flow_dv *dv;
5405         struct mlx5_flow *dev_flow;
5406
5407         if (!flow)
5408                 return;
5409         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
5410                 dv = &dev_flow->dv;
5411                 if (dv->flow) {
5412                         claim_zero(mlx5_glue->dv_destroy_flow(dv->flow));
5413                         dv->flow = NULL;
5414                 }
5415                 if (dv->hrxq) {
5416                         if (flow->actions & MLX5_FLOW_ACTION_DROP)
5417                                 mlx5_hrxq_drop_release(dev);
5418                         else
5419                                 mlx5_hrxq_release(dev, dv->hrxq);
5420                         dv->hrxq = NULL;
5421                 }
5422         }
5423 }
5424
5425 /**
5426  * Remove the flow from the NIC and the memory.
5427  *
5428  * @param[in] dev
5429  *   Pointer to the Ethernet device structure.
5430  * @param[in, out] flow
5431  *   Pointer to flow structure.
5432  */
5433 static void
5434 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
5435 {
5436         struct mlx5_flow *dev_flow;
5437
5438         if (!flow)
5439                 return;
5440         flow_dv_remove(dev, flow);
5441         if (flow->counter) {
5442                 flow_dv_counter_release(dev, flow->counter);
5443                 flow->counter = NULL;
5444         }
5445         if (flow->tag_resource) {
5446                 flow_dv_tag_release(dev, flow->tag_resource);
5447                 flow->tag_resource = NULL;
5448         }
5449         while (!LIST_EMPTY(&flow->dev_flows)) {
5450                 dev_flow = LIST_FIRST(&flow->dev_flows);
5451                 LIST_REMOVE(dev_flow, next);
5452                 if (dev_flow->dv.matcher)
5453                         flow_dv_matcher_release(dev, dev_flow);
5454                 if (dev_flow->dv.encap_decap)
5455                         flow_dv_encap_decap_resource_release(dev_flow);
5456                 if (dev_flow->dv.modify_hdr)
5457                         flow_dv_modify_hdr_resource_release(dev_flow);
5458                 if (dev_flow->dv.jump)
5459                         flow_dv_jump_tbl_resource_release(dev_flow);
5460                 if (dev_flow->dv.port_id_action)
5461                         flow_dv_port_id_action_resource_release(dev_flow);
5462                 rte_free(dev_flow);
5463         }
5464 }
5465
5466 /**
5467  * Query a dv flow  rule for its statistics via devx.
5468  *
5469  * @param[in] dev
5470  *   Pointer to Ethernet device.
5471  * @param[in] flow
5472  *   Pointer to the sub flow.
5473  * @param[out] data
5474  *   data retrieved by the query.
5475  * @param[out] error
5476  *   Perform verbose error reporting if not NULL.
5477  *
5478  * @return
5479  *   0 on success, a negative errno value otherwise and rte_errno is set.
5480  */
5481 static int
5482 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
5483                     void *data, struct rte_flow_error *error)
5484 {
5485         struct mlx5_priv *priv = dev->data->dev_private;
5486         struct rte_flow_query_count *qc = data;
5487
5488         if (!priv->config.devx)
5489                 return rte_flow_error_set(error, ENOTSUP,
5490                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5491                                           NULL,
5492                                           "counters are not supported");
5493         if (flow->counter) {
5494                 uint64_t pkts, bytes;
5495                 int err = _flow_dv_query_count(dev, flow->counter, &pkts,
5496                                                &bytes);
5497
5498                 if (err)
5499                         return rte_flow_error_set(error, -err,
5500                                         RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5501                                         NULL, "cannot read counters");
5502                 qc->hits_set = 1;
5503                 qc->bytes_set = 1;
5504                 qc->hits = pkts - flow->counter->hits;
5505                 qc->bytes = bytes - flow->counter->bytes;
5506                 if (qc->reset) {
5507                         flow->counter->hits = pkts;
5508                         flow->counter->bytes = bytes;
5509                 }
5510                 return 0;
5511         }
5512         return rte_flow_error_set(error, EINVAL,
5513                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
5514                                   NULL,
5515                                   "counters are not available");
5516 }
5517
5518 /**
5519  * Query a flow.
5520  *
5521  * @see rte_flow_query()
5522  * @see rte_flow_ops
5523  */
5524 static int
5525 flow_dv_query(struct rte_eth_dev *dev,
5526               struct rte_flow *flow __rte_unused,
5527               const struct rte_flow_action *actions __rte_unused,
5528               void *data __rte_unused,
5529               struct rte_flow_error *error __rte_unused)
5530 {
5531         int ret = -EINVAL;
5532
5533         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
5534                 switch (actions->type) {
5535                 case RTE_FLOW_ACTION_TYPE_VOID:
5536                         break;
5537                 case RTE_FLOW_ACTION_TYPE_COUNT:
5538                         ret = flow_dv_query_count(dev, flow, data, error);
5539                         break;
5540                 default:
5541                         return rte_flow_error_set(error, ENOTSUP,
5542                                                   RTE_FLOW_ERROR_TYPE_ACTION,
5543                                                   actions,
5544                                                   "action not supported");
5545                 }
5546         }
5547         return ret;
5548 }
5549
5550 /*
5551  * Mutex-protected thunk to flow_dv_translate().
5552  */
5553 static int
5554 flow_d_translate(struct rte_eth_dev *dev,
5555                  struct mlx5_flow *dev_flow,
5556                  const struct rte_flow_attr *attr,
5557                  const struct rte_flow_item items[],
5558                  const struct rte_flow_action actions[],
5559                  struct rte_flow_error *error)
5560 {
5561         int ret;
5562
5563         flow_d_shared_lock(dev);
5564         ret = flow_dv_translate(dev, dev_flow, attr, items, actions, error);
5565         flow_d_shared_unlock(dev);
5566         return ret;
5567 }
5568
5569 /*
5570  * Mutex-protected thunk to flow_dv_apply().
5571  */
5572 static int
5573 flow_d_apply(struct rte_eth_dev *dev,
5574              struct rte_flow *flow,
5575              struct rte_flow_error *error)
5576 {
5577         int ret;
5578
5579         flow_d_shared_lock(dev);
5580         ret = flow_dv_apply(dev, flow, error);
5581         flow_d_shared_unlock(dev);
5582         return ret;
5583 }
5584
5585 /*
5586  * Mutex-protected thunk to flow_dv_remove().
5587  */
5588 static void
5589 flow_d_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
5590 {
5591         flow_d_shared_lock(dev);
5592         flow_dv_remove(dev, flow);
5593         flow_d_shared_unlock(dev);
5594 }
5595
5596 /*
5597  * Mutex-protected thunk to flow_dv_destroy().
5598  */
5599 static void
5600 flow_d_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
5601 {
5602         flow_d_shared_lock(dev);
5603         flow_dv_destroy(dev, flow);
5604         flow_d_shared_unlock(dev);
5605 }
5606
5607 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
5608         .validate = flow_dv_validate,
5609         .prepare = flow_dv_prepare,
5610         .translate = flow_d_translate,
5611         .apply = flow_d_apply,
5612         .remove = flow_d_remove,
5613         .destroy = flow_d_destroy,
5614         .query = flow_dv_query,
5615 };
5616
5617 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */