net/mlx5: add drop action to Direct Verbs E-Switch
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9
10 /* Verbs header. */
11 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
12 #ifdef PEDANTIC
13 #pragma GCC diagnostic ignored "-Wpedantic"
14 #endif
15 #include <infiniband/verbs.h>
16 #ifdef PEDANTIC
17 #pragma GCC diagnostic error "-Wpedantic"
18 #endif
19
20 #include <rte_common.h>
21 #include <rte_ether.h>
22 #include <rte_ethdev_driver.h>
23 #include <rte_flow.h>
24 #include <rte_flow_driver.h>
25 #include <rte_malloc.h>
26 #include <rte_ip.h>
27 #include <rte_gre.h>
28
29 #include "mlx5.h"
30 #include "mlx5_defs.h"
31 #include "mlx5_glue.h"
32 #include "mlx5_flow.h"
33 #include "mlx5_prm.h"
34 #include "mlx5_rxtx.h"
35
36 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
37
38 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
39 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
40 #endif
41
42 #ifndef HAVE_MLX5DV_DR_ESWITCH
43 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
44 #endif
45
46 union flow_dv_attr {
47         struct {
48                 uint32_t valid:1;
49                 uint32_t ipv4:1;
50                 uint32_t ipv6:1;
51                 uint32_t tcp:1;
52                 uint32_t udp:1;
53                 uint32_t reserved:27;
54         };
55         uint32_t attr;
56 };
57
58 /**
59  * Initialize flow attributes structure according to flow items' types.
60  *
61  * @param[in] item
62  *   Pointer to item specification.
63  * @param[out] attr
64  *   Pointer to flow attributes structure.
65  */
66 static void
67 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr)
68 {
69         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
70                 switch (item->type) {
71                 case RTE_FLOW_ITEM_TYPE_IPV4:
72                         attr->ipv4 = 1;
73                         break;
74                 case RTE_FLOW_ITEM_TYPE_IPV6:
75                         attr->ipv6 = 1;
76                         break;
77                 case RTE_FLOW_ITEM_TYPE_UDP:
78                         attr->udp = 1;
79                         break;
80                 case RTE_FLOW_ITEM_TYPE_TCP:
81                         attr->tcp = 1;
82                         break;
83                 default:
84                         break;
85                 }
86         }
87         attr->valid = 1;
88 }
89
90 struct field_modify_info {
91         uint32_t size; /* Size of field in protocol header, in bytes. */
92         uint32_t offset; /* Offset of field in protocol header, in bytes. */
93         enum mlx5_modification_field id;
94 };
95
96 struct field_modify_info modify_eth[] = {
97         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
98         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
99         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
100         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
101         {0, 0, 0},
102 };
103
104 struct field_modify_info modify_ipv4[] = {
105         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
106         {4, 12, MLX5_MODI_OUT_SIPV4},
107         {4, 16, MLX5_MODI_OUT_DIPV4},
108         {0, 0, 0},
109 };
110
111 struct field_modify_info modify_ipv6[] = {
112         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
113         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
114         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
115         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
116         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
117         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
118         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
119         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
120         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
121         {0, 0, 0},
122 };
123
124 struct field_modify_info modify_udp[] = {
125         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
126         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
127         {0, 0, 0},
128 };
129
130 struct field_modify_info modify_tcp[] = {
131         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
132         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
133         {0, 0, 0},
134 };
135
136 /**
137  * Acquire the synchronizing object to protect multithreaded access
138  * to shared dv context. Lock occurs only if context is actually
139  * shared, i.e. we have multiport IB device and representors are
140  * created.
141  *
142  * @param[in] dev
143  *   Pointer to the rte_eth_dev structure.
144  */
145 static void
146 flow_d_shared_lock(struct rte_eth_dev *dev)
147 {
148         struct mlx5_priv *priv = dev->data->dev_private;
149         struct mlx5_ibv_shared *sh = priv->sh;
150
151         if (sh->dv_refcnt > 1) {
152                 int ret;
153
154                 ret = pthread_mutex_lock(&sh->dv_mutex);
155                 assert(!ret);
156                 (void)ret;
157         }
158 }
159
160 static void
161 flow_d_shared_unlock(struct rte_eth_dev *dev)
162 {
163         struct mlx5_priv *priv = dev->data->dev_private;
164         struct mlx5_ibv_shared *sh = priv->sh;
165
166         if (sh->dv_refcnt > 1) {
167                 int ret;
168
169                 ret = pthread_mutex_unlock(&sh->dv_mutex);
170                 assert(!ret);
171                 (void)ret;
172         }
173 }
174
175 /**
176  * Convert modify-header action to DV specification.
177  *
178  * @param[in] item
179  *   Pointer to item specification.
180  * @param[in] field
181  *   Pointer to field modification information.
182  * @param[in,out] resource
183  *   Pointer to the modify-header resource.
184  * @param[in] type
185  *   Type of modification.
186  * @param[out] error
187  *   Pointer to the error structure.
188  *
189  * @return
190  *   0 on success, a negative errno value otherwise and rte_errno is set.
191  */
192 static int
193 flow_dv_convert_modify_action(struct rte_flow_item *item,
194                               struct field_modify_info *field,
195                               struct mlx5_flow_dv_modify_hdr_resource *resource,
196                               uint32_t type,
197                               struct rte_flow_error *error)
198 {
199         uint32_t i = resource->actions_num;
200         struct mlx5_modification_cmd *actions = resource->actions;
201         const uint8_t *spec = item->spec;
202         const uint8_t *mask = item->mask;
203         uint32_t set;
204
205         while (field->size) {
206                 set = 0;
207                 /* Generate modify command for each mask segment. */
208                 memcpy(&set, &mask[field->offset], field->size);
209                 if (set) {
210                         if (i >= MLX5_MODIFY_NUM)
211                                 return rte_flow_error_set(error, EINVAL,
212                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
213                                          "too many items to modify");
214                         actions[i].action_type = type;
215                         actions[i].field = field->id;
216                         actions[i].length = field->size ==
217                                         4 ? 0 : field->size * 8;
218                         rte_memcpy(&actions[i].data[4 - field->size],
219                                    &spec[field->offset], field->size);
220                         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
221                         ++i;
222                 }
223                 if (resource->actions_num != i)
224                         resource->actions_num = i;
225                 field++;
226         }
227         if (!resource->actions_num)
228                 return rte_flow_error_set(error, EINVAL,
229                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
230                                           "invalid modification flow item");
231         return 0;
232 }
233
234 /**
235  * Convert modify-header set IPv4 address action to DV specification.
236  *
237  * @param[in,out] resource
238  *   Pointer to the modify-header resource.
239  * @param[in] action
240  *   Pointer to action specification.
241  * @param[out] error
242  *   Pointer to the error structure.
243  *
244  * @return
245  *   0 on success, a negative errno value otherwise and rte_errno is set.
246  */
247 static int
248 flow_dv_convert_action_modify_ipv4
249                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
250                          const struct rte_flow_action *action,
251                          struct rte_flow_error *error)
252 {
253         const struct rte_flow_action_set_ipv4 *conf =
254                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
255         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
256         struct rte_flow_item_ipv4 ipv4;
257         struct rte_flow_item_ipv4 ipv4_mask;
258
259         memset(&ipv4, 0, sizeof(ipv4));
260         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
261         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
262                 ipv4.hdr.src_addr = conf->ipv4_addr;
263                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
264         } else {
265                 ipv4.hdr.dst_addr = conf->ipv4_addr;
266                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
267         }
268         item.spec = &ipv4;
269         item.mask = &ipv4_mask;
270         return flow_dv_convert_modify_action(&item, modify_ipv4, resource,
271                                              MLX5_MODIFICATION_TYPE_SET, error);
272 }
273
274 /**
275  * Convert modify-header set IPv6 address action to DV specification.
276  *
277  * @param[in,out] resource
278  *   Pointer to the modify-header resource.
279  * @param[in] action
280  *   Pointer to action specification.
281  * @param[out] error
282  *   Pointer to the error structure.
283  *
284  * @return
285  *   0 on success, a negative errno value otherwise and rte_errno is set.
286  */
287 static int
288 flow_dv_convert_action_modify_ipv6
289                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
290                          const struct rte_flow_action *action,
291                          struct rte_flow_error *error)
292 {
293         const struct rte_flow_action_set_ipv6 *conf =
294                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
295         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
296         struct rte_flow_item_ipv6 ipv6;
297         struct rte_flow_item_ipv6 ipv6_mask;
298
299         memset(&ipv6, 0, sizeof(ipv6));
300         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
301         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
302                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
303                        sizeof(ipv6.hdr.src_addr));
304                 memcpy(&ipv6_mask.hdr.src_addr,
305                        &rte_flow_item_ipv6_mask.hdr.src_addr,
306                        sizeof(ipv6.hdr.src_addr));
307         } else {
308                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
309                        sizeof(ipv6.hdr.dst_addr));
310                 memcpy(&ipv6_mask.hdr.dst_addr,
311                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
312                        sizeof(ipv6.hdr.dst_addr));
313         }
314         item.spec = &ipv6;
315         item.mask = &ipv6_mask;
316         return flow_dv_convert_modify_action(&item, modify_ipv6, resource,
317                                              MLX5_MODIFICATION_TYPE_SET, error);
318 }
319
320 /**
321  * Convert modify-header set MAC address action to DV specification.
322  *
323  * @param[in,out] resource
324  *   Pointer to the modify-header resource.
325  * @param[in] action
326  *   Pointer to action specification.
327  * @param[out] error
328  *   Pointer to the error structure.
329  *
330  * @return
331  *   0 on success, a negative errno value otherwise and rte_errno is set.
332  */
333 static int
334 flow_dv_convert_action_modify_mac
335                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
336                          const struct rte_flow_action *action,
337                          struct rte_flow_error *error)
338 {
339         const struct rte_flow_action_set_mac *conf =
340                 (const struct rte_flow_action_set_mac *)(action->conf);
341         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
342         struct rte_flow_item_eth eth;
343         struct rte_flow_item_eth eth_mask;
344
345         memset(&eth, 0, sizeof(eth));
346         memset(&eth_mask, 0, sizeof(eth_mask));
347         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
348                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
349                        sizeof(eth.src.addr_bytes));
350                 memcpy(&eth_mask.src.addr_bytes,
351                        &rte_flow_item_eth_mask.src.addr_bytes,
352                        sizeof(eth_mask.src.addr_bytes));
353         } else {
354                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
355                        sizeof(eth.dst.addr_bytes));
356                 memcpy(&eth_mask.dst.addr_bytes,
357                        &rte_flow_item_eth_mask.dst.addr_bytes,
358                        sizeof(eth_mask.dst.addr_bytes));
359         }
360         item.spec = &eth;
361         item.mask = &eth_mask;
362         return flow_dv_convert_modify_action(&item, modify_eth, resource,
363                                              MLX5_MODIFICATION_TYPE_SET, error);
364 }
365
366 /**
367  * Convert modify-header set TP action to DV specification.
368  *
369  * @param[in,out] resource
370  *   Pointer to the modify-header resource.
371  * @param[in] action
372  *   Pointer to action specification.
373  * @param[in] items
374  *   Pointer to rte_flow_item objects list.
375  * @param[in] attr
376  *   Pointer to flow attributes structure.
377  * @param[out] error
378  *   Pointer to the error structure.
379  *
380  * @return
381  *   0 on success, a negative errno value otherwise and rte_errno is set.
382  */
383 static int
384 flow_dv_convert_action_modify_tp
385                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
386                          const struct rte_flow_action *action,
387                          const struct rte_flow_item *items,
388                          union flow_dv_attr *attr,
389                          struct rte_flow_error *error)
390 {
391         const struct rte_flow_action_set_tp *conf =
392                 (const struct rte_flow_action_set_tp *)(action->conf);
393         struct rte_flow_item item;
394         struct rte_flow_item_udp udp;
395         struct rte_flow_item_udp udp_mask;
396         struct rte_flow_item_tcp tcp;
397         struct rte_flow_item_tcp tcp_mask;
398         struct field_modify_info *field;
399
400         if (!attr->valid)
401                 flow_dv_attr_init(items, attr);
402         if (attr->udp) {
403                 memset(&udp, 0, sizeof(udp));
404                 memset(&udp_mask, 0, sizeof(udp_mask));
405                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
406                         udp.hdr.src_port = conf->port;
407                         udp_mask.hdr.src_port =
408                                         rte_flow_item_udp_mask.hdr.src_port;
409                 } else {
410                         udp.hdr.dst_port = conf->port;
411                         udp_mask.hdr.dst_port =
412                                         rte_flow_item_udp_mask.hdr.dst_port;
413                 }
414                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
415                 item.spec = &udp;
416                 item.mask = &udp_mask;
417                 field = modify_udp;
418         }
419         if (attr->tcp) {
420                 memset(&tcp, 0, sizeof(tcp));
421                 memset(&tcp_mask, 0, sizeof(tcp_mask));
422                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
423                         tcp.hdr.src_port = conf->port;
424                         tcp_mask.hdr.src_port =
425                                         rte_flow_item_tcp_mask.hdr.src_port;
426                 } else {
427                         tcp.hdr.dst_port = conf->port;
428                         tcp_mask.hdr.dst_port =
429                                         rte_flow_item_tcp_mask.hdr.dst_port;
430                 }
431                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
432                 item.spec = &tcp;
433                 item.mask = &tcp_mask;
434                 field = modify_tcp;
435         }
436         return flow_dv_convert_modify_action(&item, field, resource,
437                                              MLX5_MODIFICATION_TYPE_SET, error);
438 }
439
440 /**
441  * Convert modify-header set TTL action to DV specification.
442  *
443  * @param[in,out] resource
444  *   Pointer to the modify-header resource.
445  * @param[in] action
446  *   Pointer to action specification.
447  * @param[in] items
448  *   Pointer to rte_flow_item objects list.
449  * @param[in] attr
450  *   Pointer to flow attributes structure.
451  * @param[out] error
452  *   Pointer to the error structure.
453  *
454  * @return
455  *   0 on success, a negative errno value otherwise and rte_errno is set.
456  */
457 static int
458 flow_dv_convert_action_modify_ttl
459                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
460                          const struct rte_flow_action *action,
461                          const struct rte_flow_item *items,
462                          union flow_dv_attr *attr,
463                          struct rte_flow_error *error)
464 {
465         const struct rte_flow_action_set_ttl *conf =
466                 (const struct rte_flow_action_set_ttl *)(action->conf);
467         struct rte_flow_item item;
468         struct rte_flow_item_ipv4 ipv4;
469         struct rte_flow_item_ipv4 ipv4_mask;
470         struct rte_flow_item_ipv6 ipv6;
471         struct rte_flow_item_ipv6 ipv6_mask;
472         struct field_modify_info *field;
473
474         if (!attr->valid)
475                 flow_dv_attr_init(items, attr);
476         if (attr->ipv4) {
477                 memset(&ipv4, 0, sizeof(ipv4));
478                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
479                 ipv4.hdr.time_to_live = conf->ttl_value;
480                 ipv4_mask.hdr.time_to_live = 0xFF;
481                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
482                 item.spec = &ipv4;
483                 item.mask = &ipv4_mask;
484                 field = modify_ipv4;
485         }
486         if (attr->ipv6) {
487                 memset(&ipv6, 0, sizeof(ipv6));
488                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
489                 ipv6.hdr.hop_limits = conf->ttl_value;
490                 ipv6_mask.hdr.hop_limits = 0xFF;
491                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
492                 item.spec = &ipv6;
493                 item.mask = &ipv6_mask;
494                 field = modify_ipv6;
495         }
496         return flow_dv_convert_modify_action(&item, field, resource,
497                                              MLX5_MODIFICATION_TYPE_SET, error);
498 }
499
500 /**
501  * Convert modify-header decrement TTL action to DV specification.
502  *
503  * @param[in,out] resource
504  *   Pointer to the modify-header resource.
505  * @param[in] action
506  *   Pointer to action specification.
507  * @param[in] items
508  *   Pointer to rte_flow_item objects list.
509  * @param[in] attr
510  *   Pointer to flow attributes structure.
511  * @param[out] error
512  *   Pointer to the error structure.
513  *
514  * @return
515  *   0 on success, a negative errno value otherwise and rte_errno is set.
516  */
517 static int
518 flow_dv_convert_action_modify_dec_ttl
519                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
520                          const struct rte_flow_item *items,
521                          union flow_dv_attr *attr,
522                          struct rte_flow_error *error)
523 {
524         struct rte_flow_item item;
525         struct rte_flow_item_ipv4 ipv4;
526         struct rte_flow_item_ipv4 ipv4_mask;
527         struct rte_flow_item_ipv6 ipv6;
528         struct rte_flow_item_ipv6 ipv6_mask;
529         struct field_modify_info *field;
530
531         if (!attr->valid)
532                 flow_dv_attr_init(items, attr);
533         if (attr->ipv4) {
534                 memset(&ipv4, 0, sizeof(ipv4));
535                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
536                 ipv4.hdr.time_to_live = 0xFF;
537                 ipv4_mask.hdr.time_to_live = 0xFF;
538                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
539                 item.spec = &ipv4;
540                 item.mask = &ipv4_mask;
541                 field = modify_ipv4;
542         }
543         if (attr->ipv6) {
544                 memset(&ipv6, 0, sizeof(ipv6));
545                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
546                 ipv6.hdr.hop_limits = 0xFF;
547                 ipv6_mask.hdr.hop_limits = 0xFF;
548                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
549                 item.spec = &ipv6;
550                 item.mask = &ipv6_mask;
551                 field = modify_ipv6;
552         }
553         return flow_dv_convert_modify_action(&item, field, resource,
554                                              MLX5_MODIFICATION_TYPE_ADD, error);
555 }
556
557 /**
558  * Validate META item.
559  *
560  * @param[in] dev
561  *   Pointer to the rte_eth_dev structure.
562  * @param[in] item
563  *   Item specification.
564  * @param[in] attr
565  *   Attributes of flow that includes this item.
566  * @param[out] error
567  *   Pointer to error structure.
568  *
569  * @return
570  *   0 on success, a negative errno value otherwise and rte_errno is set.
571  */
572 static int
573 flow_dv_validate_item_meta(struct rte_eth_dev *dev,
574                            const struct rte_flow_item *item,
575                            const struct rte_flow_attr *attr,
576                            struct rte_flow_error *error)
577 {
578         const struct rte_flow_item_meta *spec = item->spec;
579         const struct rte_flow_item_meta *mask = item->mask;
580         const struct rte_flow_item_meta nic_mask = {
581                 .data = RTE_BE32(UINT32_MAX)
582         };
583         int ret;
584         uint64_t offloads = dev->data->dev_conf.txmode.offloads;
585
586         if (!(offloads & DEV_TX_OFFLOAD_MATCH_METADATA))
587                 return rte_flow_error_set(error, EPERM,
588                                           RTE_FLOW_ERROR_TYPE_ITEM,
589                                           NULL,
590                                           "match on metadata offload "
591                                           "configuration is off for this port");
592         if (!spec)
593                 return rte_flow_error_set(error, EINVAL,
594                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
595                                           item->spec,
596                                           "data cannot be empty");
597         if (!spec->data)
598                 return rte_flow_error_set(error, EINVAL,
599                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
600                                           NULL,
601                                           "data cannot be zero");
602         if (!mask)
603                 mask = &rte_flow_item_meta_mask;
604         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
605                                         (const uint8_t *)&nic_mask,
606                                         sizeof(struct rte_flow_item_meta),
607                                         error);
608         if (ret < 0)
609                 return ret;
610         if (attr->ingress)
611                 return rte_flow_error_set(error, ENOTSUP,
612                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
613                                           NULL,
614                                           "pattern not supported for ingress");
615         return 0;
616 }
617
618 /**
619  * Validate vport item.
620  *
621  * @param[in] dev
622  *   Pointer to the rte_eth_dev structure.
623  * @param[in] item
624  *   Item specification.
625  * @param[in] attr
626  *   Attributes of flow that includes this item.
627  * @param[in] item_flags
628  *   Bit-fields that holds the items detected until now.
629  * @param[out] error
630  *   Pointer to error structure.
631  *
632  * @return
633  *   0 on success, a negative errno value otherwise and rte_errno is set.
634  */
635 static int
636 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
637                               const struct rte_flow_item *item,
638                               const struct rte_flow_attr *attr,
639                               uint64_t item_flags,
640                               struct rte_flow_error *error)
641 {
642         const struct rte_flow_item_port_id *spec = item->spec;
643         const struct rte_flow_item_port_id *mask = item->mask;
644         const struct rte_flow_item_port_id switch_mask = {
645                         .id = 0xffffffff,
646         };
647         uint16_t esw_domain_id;
648         uint16_t item_port_esw_domain_id;
649         int ret;
650
651         if (!attr->transfer)
652                 return rte_flow_error_set(error, EINVAL,
653                                           RTE_FLOW_ERROR_TYPE_ITEM,
654                                           NULL,
655                                           "match on port id is valid only"
656                                           " when transfer flag is enabled");
657         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
658                 return rte_flow_error_set(error, ENOTSUP,
659                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
660                                           "multiple source ports are not"
661                                           " supported");
662         if (!mask)
663                 mask = &switch_mask;
664         if (mask->id != 0xffffffff)
665                 return rte_flow_error_set(error, ENOTSUP,
666                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
667                                            mask,
668                                            "no support for partial mask on"
669                                            " \"id\" field");
670         ret = mlx5_flow_item_acceptable
671                                 (item, (const uint8_t *)mask,
672                                  (const uint8_t *)&rte_flow_item_port_id_mask,
673                                  sizeof(struct rte_flow_item_port_id),
674                                  error);
675         if (ret)
676                 return ret;
677         if (!spec)
678                 return 0;
679         ret = mlx5_port_to_eswitch_info(spec->id, &item_port_esw_domain_id,
680                                         NULL);
681         if (ret)
682                 return rte_flow_error_set(error, -ret,
683                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
684                                           "failed to obtain E-Switch info for"
685                                           " port");
686         ret = mlx5_port_to_eswitch_info(dev->data->port_id,
687                                         &esw_domain_id, NULL);
688         if (ret < 0)
689                 return rte_flow_error_set(error, -ret,
690                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
691                                           NULL,
692                                           "failed to obtain E-Switch info");
693         if (item_port_esw_domain_id != esw_domain_id)
694                 return rte_flow_error_set(error, -ret,
695                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
696                                           "cannot match on a port from a"
697                                           " different E-Switch");
698         return 0;
699 }
700
701 /**
702  * Validate count action.
703  *
704  * @param[in] dev
705  *   device otr.
706  * @param[out] error
707  *   Pointer to error structure.
708  *
709  * @return
710  *   0 on success, a negative errno value otherwise and rte_errno is set.
711  */
712 static int
713 flow_dv_validate_action_count(struct rte_eth_dev *dev,
714                               struct rte_flow_error *error)
715 {
716         struct mlx5_priv *priv = dev->data->dev_private;
717
718         if (!priv->config.devx)
719                 goto notsup_err;
720 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
721         return 0;
722 #endif
723 notsup_err:
724         return rte_flow_error_set
725                       (error, ENOTSUP,
726                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
727                        NULL,
728                        "count action not supported");
729 }
730
731 /**
732  * Validate the L2 encap action.
733  *
734  * @param[in] action_flags
735  *   Holds the actions detected until now.
736  * @param[in] action
737  *   Pointer to the encap action.
738  * @param[in] attr
739  *   Pointer to flow attributes
740  * @param[out] error
741  *   Pointer to error structure.
742  *
743  * @return
744  *   0 on success, a negative errno value otherwise and rte_errno is set.
745  */
746 static int
747 flow_dv_validate_action_l2_encap(uint64_t action_flags,
748                                  const struct rte_flow_action *action,
749                                  const struct rte_flow_attr *attr,
750                                  struct rte_flow_error *error)
751 {
752         if (!(action->conf))
753                 return rte_flow_error_set(error, EINVAL,
754                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
755                                           "configuration cannot be null");
756         if (action_flags & MLX5_FLOW_ACTION_DROP)
757                 return rte_flow_error_set(error, EINVAL,
758                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
759                                           "can't drop and encap in same flow");
760         if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
761                 return rte_flow_error_set(error, EINVAL,
762                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
763                                           "can only have a single encap or"
764                                           " decap action in a flow");
765         if (!attr->transfer && attr->ingress)
766                 return rte_flow_error_set(error, ENOTSUP,
767                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
768                                           NULL,
769                                           "encap action not supported for "
770                                           "ingress");
771         return 0;
772 }
773
774 /**
775  * Validate the L2 decap action.
776  *
777  * @param[in] action_flags
778  *   Holds the actions detected until now.
779  * @param[in] attr
780  *   Pointer to flow attributes
781  * @param[out] error
782  *   Pointer to error structure.
783  *
784  * @return
785  *   0 on success, a negative errno value otherwise and rte_errno is set.
786  */
787 static int
788 flow_dv_validate_action_l2_decap(uint64_t action_flags,
789                                  const struct rte_flow_attr *attr,
790                                  struct rte_flow_error *error)
791 {
792         if (action_flags & MLX5_FLOW_ACTION_DROP)
793                 return rte_flow_error_set(error, EINVAL,
794                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
795                                           "can't drop and decap in same flow");
796         if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
797                 return rte_flow_error_set(error, EINVAL,
798                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
799                                           "can only have a single encap or"
800                                           " decap action in a flow");
801         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
802                 return rte_flow_error_set(error, EINVAL,
803                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
804                                           "can't have decap action after"
805                                           " modify action");
806         if (attr->egress)
807                 return rte_flow_error_set(error, ENOTSUP,
808                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
809                                           NULL,
810                                           "decap action not supported for "
811                                           "egress");
812         return 0;
813 }
814
815 /**
816  * Validate the raw encap action.
817  *
818  * @param[in] action_flags
819  *   Holds the actions detected until now.
820  * @param[in] action
821  *   Pointer to the encap action.
822  * @param[in] attr
823  *   Pointer to flow attributes
824  * @param[out] error
825  *   Pointer to error structure.
826  *
827  * @return
828  *   0 on success, a negative errno value otherwise and rte_errno is set.
829  */
830 static int
831 flow_dv_validate_action_raw_encap(uint64_t action_flags,
832                                   const struct rte_flow_action *action,
833                                   const struct rte_flow_attr *attr,
834                                   struct rte_flow_error *error)
835 {
836         if (!(action->conf))
837                 return rte_flow_error_set(error, EINVAL,
838                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
839                                           "configuration cannot be null");
840         if (action_flags & MLX5_FLOW_ACTION_DROP)
841                 return rte_flow_error_set(error, EINVAL,
842                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
843                                           "can't drop and encap in same flow");
844         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
845                 return rte_flow_error_set(error, EINVAL,
846                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
847                                           "can only have a single encap"
848                                           " action in a flow");
849         /* encap without preceding decap is not supported for ingress */
850         if (!attr->transfer &&  attr->ingress &&
851             !(action_flags & MLX5_FLOW_ACTION_RAW_DECAP))
852                 return rte_flow_error_set(error, ENOTSUP,
853                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
854                                           NULL,
855                                           "encap action not supported for "
856                                           "ingress");
857         return 0;
858 }
859
860 /**
861  * Validate the raw decap action.
862  *
863  * @param[in] action_flags
864  *   Holds the actions detected until now.
865  * @param[in] action
866  *   Pointer to the encap action.
867  * @param[in] attr
868  *   Pointer to flow attributes
869  * @param[out] error
870  *   Pointer to error structure.
871  *
872  * @return
873  *   0 on success, a negative errno value otherwise and rte_errno is set.
874  */
875 static int
876 flow_dv_validate_action_raw_decap(uint64_t action_flags,
877                                   const struct rte_flow_action *action,
878                                   const struct rte_flow_attr *attr,
879                                   struct rte_flow_error *error)
880 {
881         if (action_flags & MLX5_FLOW_ACTION_DROP)
882                 return rte_flow_error_set(error, EINVAL,
883                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
884                                           "can't drop and decap in same flow");
885         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
886                 return rte_flow_error_set(error, EINVAL,
887                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
888                                           "can't have encap action before"
889                                           " decap action");
890         if (action_flags & MLX5_FLOW_DECAP_ACTIONS)
891                 return rte_flow_error_set(error, EINVAL,
892                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
893                                           "can only have a single decap"
894                                           " action in a flow");
895         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
896                 return rte_flow_error_set(error, EINVAL,
897                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
898                                           "can't have decap action after"
899                                           " modify action");
900         /* decap action is valid on egress only if it is followed by encap */
901         if (attr->egress) {
902                 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
903                        action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
904                        action++) {
905                 }
906                 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
907                         return rte_flow_error_set
908                                         (error, ENOTSUP,
909                                          RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
910                                          NULL, "decap action not supported"
911                                          " for egress");
912         }
913         return 0;
914 }
915
916 /**
917  * Find existing encap/decap resource or create and register a new one.
918  *
919  * @param dev[in, out]
920  *   Pointer to rte_eth_dev structure.
921  * @param[in, out] resource
922  *   Pointer to encap/decap resource.
923  * @parm[in, out] dev_flow
924  *   Pointer to the dev_flow.
925  * @param[out] error
926  *   pointer to error structure.
927  *
928  * @return
929  *   0 on success otherwise -errno and errno is set.
930  */
931 static int
932 flow_dv_encap_decap_resource_register
933                         (struct rte_eth_dev *dev,
934                          struct mlx5_flow_dv_encap_decap_resource *resource,
935                          struct mlx5_flow *dev_flow,
936                          struct rte_flow_error *error)
937 {
938         struct mlx5_priv *priv = dev->data->dev_private;
939         struct mlx5_ibv_shared *sh = priv->sh;
940         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
941         struct rte_flow *flow = dev_flow->flow;
942         struct mlx5dv_dr_ns *ns;
943
944         resource->flags = flow->group ? 0 : 1;
945         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
946                 ns = sh->fdb_ns;
947         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
948                 ns = sh->rx_ns;
949         else
950                 ns = sh->tx_ns;
951
952         /* Lookup a matching resource from cache. */
953         LIST_FOREACH(cache_resource, &sh->encaps_decaps, next) {
954                 if (resource->reformat_type == cache_resource->reformat_type &&
955                     resource->ft_type == cache_resource->ft_type &&
956                     resource->flags == cache_resource->flags &&
957                     resource->size == cache_resource->size &&
958                     !memcmp((const void *)resource->buf,
959                             (const void *)cache_resource->buf,
960                             resource->size)) {
961                         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
962                                 (void *)cache_resource,
963                                 rte_atomic32_read(&cache_resource->refcnt));
964                         rte_atomic32_inc(&cache_resource->refcnt);
965                         dev_flow->dv.encap_decap = cache_resource;
966                         return 0;
967                 }
968         }
969         /* Register new encap/decap resource. */
970         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
971         if (!cache_resource)
972                 return rte_flow_error_set(error, ENOMEM,
973                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
974                                           "cannot allocate resource memory");
975         *cache_resource = *resource;
976         cache_resource->verbs_action =
977                 mlx5_glue->dv_create_flow_action_packet_reformat
978                         (sh->ctx, cache_resource->reformat_type,
979                          cache_resource->ft_type, ns, cache_resource->flags,
980                          cache_resource->size,
981                          (cache_resource->size ? cache_resource->buf : NULL));
982         if (!cache_resource->verbs_action) {
983                 rte_free(cache_resource);
984                 return rte_flow_error_set(error, ENOMEM,
985                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
986                                           NULL, "cannot create action");
987         }
988         rte_atomic32_init(&cache_resource->refcnt);
989         rte_atomic32_inc(&cache_resource->refcnt);
990         LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next);
991         dev_flow->dv.encap_decap = cache_resource;
992         DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
993                 (void *)cache_resource,
994                 rte_atomic32_read(&cache_resource->refcnt));
995         return 0;
996 }
997
998 /**
999  * Find existing table jump resource or create and register a new one.
1000  *
1001  * @param dev[in, out]
1002  *   Pointer to rte_eth_dev structure.
1003  * @param[in, out] resource
1004  *   Pointer to jump table resource.
1005  * @parm[in, out] dev_flow
1006  *   Pointer to the dev_flow.
1007  * @param[out] error
1008  *   pointer to error structure.
1009  *
1010  * @return
1011  *   0 on success otherwise -errno and errno is set.
1012  */
1013 static int
1014 flow_dv_jump_tbl_resource_register
1015                         (struct rte_eth_dev *dev,
1016                          struct mlx5_flow_dv_jump_tbl_resource *resource,
1017                          struct mlx5_flow *dev_flow,
1018                          struct rte_flow_error *error)
1019 {
1020         struct mlx5_priv *priv = dev->data->dev_private;
1021         struct mlx5_ibv_shared *sh = priv->sh;
1022         struct mlx5_flow_dv_jump_tbl_resource *cache_resource;
1023
1024         /* Lookup a matching resource from cache. */
1025         LIST_FOREACH(cache_resource, &sh->jump_tbl, next) {
1026                 if (resource->tbl == cache_resource->tbl) {
1027                         DRV_LOG(DEBUG, "jump table resource resource %p: refcnt %d++",
1028                                 (void *)cache_resource,
1029                                 rte_atomic32_read(&cache_resource->refcnt));
1030                         rte_atomic32_inc(&cache_resource->refcnt);
1031                         dev_flow->dv.jump = cache_resource;
1032                         return 0;
1033                 }
1034         }
1035         /* Register new jump table resource. */
1036         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1037         if (!cache_resource)
1038                 return rte_flow_error_set(error, ENOMEM,
1039                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1040                                           "cannot allocate resource memory");
1041         *cache_resource = *resource;
1042         cache_resource->action =
1043                 mlx5_glue->dr_create_flow_action_dest_flow_tbl
1044                 (resource->tbl->obj);
1045         if (!cache_resource->action) {
1046                 rte_free(cache_resource);
1047                 return rte_flow_error_set(error, ENOMEM,
1048                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1049                                           NULL, "cannot create action");
1050         }
1051         rte_atomic32_init(&cache_resource->refcnt);
1052         rte_atomic32_inc(&cache_resource->refcnt);
1053         LIST_INSERT_HEAD(&sh->jump_tbl, cache_resource, next);
1054         dev_flow->dv.jump = cache_resource;
1055         DRV_LOG(DEBUG, "new jump table  resource %p: refcnt %d++",
1056                 (void *)cache_resource,
1057                 rte_atomic32_read(&cache_resource->refcnt));
1058         return 0;
1059 }
1060
1061 /**
1062  * Find existing table port ID resource or create and register a new one.
1063  *
1064  * @param dev[in, out]
1065  *   Pointer to rte_eth_dev structure.
1066  * @param[in, out] resource
1067  *   Pointer to port ID action resource.
1068  * @parm[in, out] dev_flow
1069  *   Pointer to the dev_flow.
1070  * @param[out] error
1071  *   pointer to error structure.
1072  *
1073  * @return
1074  *   0 on success otherwise -errno and errno is set.
1075  */
1076 static int
1077 flow_dv_port_id_action_resource_register
1078                         (struct rte_eth_dev *dev,
1079                          struct mlx5_flow_dv_port_id_action_resource *resource,
1080                          struct mlx5_flow *dev_flow,
1081                          struct rte_flow_error *error)
1082 {
1083         struct mlx5_priv *priv = dev->data->dev_private;
1084         struct mlx5_ibv_shared *sh = priv->sh;
1085         struct mlx5_flow_dv_port_id_action_resource *cache_resource;
1086
1087         /* Lookup a matching resource from cache. */
1088         LIST_FOREACH(cache_resource, &sh->port_id_action_list, next) {
1089                 if (resource->port_id == cache_resource->port_id) {
1090                         DRV_LOG(DEBUG, "port id action resource resource %p: "
1091                                 "refcnt %d++",
1092                                 (void *)cache_resource,
1093                                 rte_atomic32_read(&cache_resource->refcnt));
1094                         rte_atomic32_inc(&cache_resource->refcnt);
1095                         dev_flow->dv.port_id_action = cache_resource;
1096                         return 0;
1097                 }
1098         }
1099         /* Register new port id action resource. */
1100         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1101         if (!cache_resource)
1102                 return rte_flow_error_set(error, ENOMEM,
1103                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1104                                           "cannot allocate resource memory");
1105         *cache_resource = *resource;
1106         cache_resource->action =
1107                 mlx5_glue->dr_create_flow_action_dest_vport(priv->sh->fdb_ns,
1108                                                             resource->port_id);
1109         if (!cache_resource->action) {
1110                 rte_free(cache_resource);
1111                 return rte_flow_error_set(error, ENOMEM,
1112                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1113                                           NULL, "cannot create action");
1114         }
1115         rte_atomic32_init(&cache_resource->refcnt);
1116         rte_atomic32_inc(&cache_resource->refcnt);
1117         LIST_INSERT_HEAD(&sh->port_id_action_list, cache_resource, next);
1118         dev_flow->dv.port_id_action = cache_resource;
1119         DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
1120                 (void *)cache_resource,
1121                 rte_atomic32_read(&cache_resource->refcnt));
1122         return 0;
1123 }
1124
1125 /**
1126  * Get the size of specific rte_flow_item_type
1127  *
1128  * @param[in] item_type
1129  *   Tested rte_flow_item_type.
1130  *
1131  * @return
1132  *   sizeof struct item_type, 0 if void or irrelevant.
1133  */
1134 static size_t
1135 flow_dv_get_item_len(const enum rte_flow_item_type item_type)
1136 {
1137         size_t retval;
1138
1139         switch (item_type) {
1140         case RTE_FLOW_ITEM_TYPE_ETH:
1141                 retval = sizeof(struct rte_flow_item_eth);
1142                 break;
1143         case RTE_FLOW_ITEM_TYPE_VLAN:
1144                 retval = sizeof(struct rte_flow_item_vlan);
1145                 break;
1146         case RTE_FLOW_ITEM_TYPE_IPV4:
1147                 retval = sizeof(struct rte_flow_item_ipv4);
1148                 break;
1149         case RTE_FLOW_ITEM_TYPE_IPV6:
1150                 retval = sizeof(struct rte_flow_item_ipv6);
1151                 break;
1152         case RTE_FLOW_ITEM_TYPE_UDP:
1153                 retval = sizeof(struct rte_flow_item_udp);
1154                 break;
1155         case RTE_FLOW_ITEM_TYPE_TCP:
1156                 retval = sizeof(struct rte_flow_item_tcp);
1157                 break;
1158         case RTE_FLOW_ITEM_TYPE_VXLAN:
1159                 retval = sizeof(struct rte_flow_item_vxlan);
1160                 break;
1161         case RTE_FLOW_ITEM_TYPE_GRE:
1162                 retval = sizeof(struct rte_flow_item_gre);
1163                 break;
1164         case RTE_FLOW_ITEM_TYPE_NVGRE:
1165                 retval = sizeof(struct rte_flow_item_nvgre);
1166                 break;
1167         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1168                 retval = sizeof(struct rte_flow_item_vxlan_gpe);
1169                 break;
1170         case RTE_FLOW_ITEM_TYPE_MPLS:
1171                 retval = sizeof(struct rte_flow_item_mpls);
1172                 break;
1173         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
1174         default:
1175                 retval = 0;
1176                 break;
1177         }
1178         return retval;
1179 }
1180
1181 #define MLX5_ENCAP_IPV4_VERSION         0x40
1182 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
1183 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
1184 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
1185 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
1186 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
1187 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
1188
1189 /**
1190  * Convert the encap action data from list of rte_flow_item to raw buffer
1191  *
1192  * @param[in] items
1193  *   Pointer to rte_flow_item objects list.
1194  * @param[out] buf
1195  *   Pointer to the output buffer.
1196  * @param[out] size
1197  *   Pointer to the output buffer size.
1198  * @param[out] error
1199  *   Pointer to the error structure.
1200  *
1201  * @return
1202  *   0 on success, a negative errno value otherwise and rte_errno is set.
1203  */
1204 static int
1205 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
1206                            size_t *size, struct rte_flow_error *error)
1207 {
1208         struct ether_hdr *eth = NULL;
1209         struct vlan_hdr *vlan = NULL;
1210         struct ipv4_hdr *ipv4 = NULL;
1211         struct ipv6_hdr *ipv6 = NULL;
1212         struct udp_hdr *udp = NULL;
1213         struct vxlan_hdr *vxlan = NULL;
1214         struct vxlan_gpe_hdr *vxlan_gpe = NULL;
1215         struct gre_hdr *gre = NULL;
1216         size_t len;
1217         size_t temp_size = 0;
1218
1219         if (!items)
1220                 return rte_flow_error_set(error, EINVAL,
1221                                           RTE_FLOW_ERROR_TYPE_ACTION,
1222                                           NULL, "invalid empty data");
1223         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1224                 len = flow_dv_get_item_len(items->type);
1225                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
1226                         return rte_flow_error_set(error, EINVAL,
1227                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1228                                                   (void *)items->type,
1229                                                   "items total size is too big"
1230                                                   " for encap action");
1231                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
1232                 switch (items->type) {
1233                 case RTE_FLOW_ITEM_TYPE_ETH:
1234                         eth = (struct ether_hdr *)&buf[temp_size];
1235                         break;
1236                 case RTE_FLOW_ITEM_TYPE_VLAN:
1237                         vlan = (struct vlan_hdr *)&buf[temp_size];
1238                         if (!eth)
1239                                 return rte_flow_error_set(error, EINVAL,
1240                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1241                                                 (void *)items->type,
1242                                                 "eth header not found");
1243                         if (!eth->ether_type)
1244                                 eth->ether_type = RTE_BE16(ETHER_TYPE_VLAN);
1245                         break;
1246                 case RTE_FLOW_ITEM_TYPE_IPV4:
1247                         ipv4 = (struct ipv4_hdr *)&buf[temp_size];
1248                         if (!vlan && !eth)
1249                                 return rte_flow_error_set(error, EINVAL,
1250                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1251                                                 (void *)items->type,
1252                                                 "neither eth nor vlan"
1253                                                 " header found");
1254                         if (vlan && !vlan->eth_proto)
1255                                 vlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv4);
1256                         else if (eth && !eth->ether_type)
1257                                 eth->ether_type = RTE_BE16(ETHER_TYPE_IPv4);
1258                         if (!ipv4->version_ihl)
1259                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
1260                                                     MLX5_ENCAP_IPV4_IHL_MIN;
1261                         if (!ipv4->time_to_live)
1262                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
1263                         break;
1264                 case RTE_FLOW_ITEM_TYPE_IPV6:
1265                         ipv6 = (struct ipv6_hdr *)&buf[temp_size];
1266                         if (!vlan && !eth)
1267                                 return rte_flow_error_set(error, EINVAL,
1268                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1269                                                 (void *)items->type,
1270                                                 "neither eth nor vlan"
1271                                                 " header found");
1272                         if (vlan && !vlan->eth_proto)
1273                                 vlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv6);
1274                         else if (eth && !eth->ether_type)
1275                                 eth->ether_type = RTE_BE16(ETHER_TYPE_IPv6);
1276                         if (!ipv6->vtc_flow)
1277                                 ipv6->vtc_flow =
1278                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
1279                         if (!ipv6->hop_limits)
1280                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
1281                         break;
1282                 case RTE_FLOW_ITEM_TYPE_UDP:
1283                         udp = (struct udp_hdr *)&buf[temp_size];
1284                         if (!ipv4 && !ipv6)
1285                                 return rte_flow_error_set(error, EINVAL,
1286                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1287                                                 (void *)items->type,
1288                                                 "ip header not found");
1289                         if (ipv4 && !ipv4->next_proto_id)
1290                                 ipv4->next_proto_id = IPPROTO_UDP;
1291                         else if (ipv6 && !ipv6->proto)
1292                                 ipv6->proto = IPPROTO_UDP;
1293                         break;
1294                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1295                         vxlan = (struct vxlan_hdr *)&buf[temp_size];
1296                         if (!udp)
1297                                 return rte_flow_error_set(error, EINVAL,
1298                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1299                                                 (void *)items->type,
1300                                                 "udp header not found");
1301                         if (!udp->dst_port)
1302                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
1303                         if (!vxlan->vx_flags)
1304                                 vxlan->vx_flags =
1305                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
1306                         break;
1307                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1308                         vxlan_gpe = (struct vxlan_gpe_hdr *)&buf[temp_size];
1309                         if (!udp)
1310                                 return rte_flow_error_set(error, EINVAL,
1311                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1312                                                 (void *)items->type,
1313                                                 "udp header not found");
1314                         if (!vxlan_gpe->proto)
1315                                 return rte_flow_error_set(error, EINVAL,
1316                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1317                                                 (void *)items->type,
1318                                                 "next protocol not found");
1319                         if (!udp->dst_port)
1320                                 udp->dst_port =
1321                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
1322                         if (!vxlan_gpe->vx_flags)
1323                                 vxlan_gpe->vx_flags =
1324                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
1325                         break;
1326                 case RTE_FLOW_ITEM_TYPE_GRE:
1327                 case RTE_FLOW_ITEM_TYPE_NVGRE:
1328                         gre = (struct gre_hdr *)&buf[temp_size];
1329                         if (!gre->proto)
1330                                 return rte_flow_error_set(error, EINVAL,
1331                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1332                                                 (void *)items->type,
1333                                                 "next protocol not found");
1334                         if (!ipv4 && !ipv6)
1335                                 return rte_flow_error_set(error, EINVAL,
1336                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1337                                                 (void *)items->type,
1338                                                 "ip header not found");
1339                         if (ipv4 && !ipv4->next_proto_id)
1340                                 ipv4->next_proto_id = IPPROTO_GRE;
1341                         else if (ipv6 && !ipv6->proto)
1342                                 ipv6->proto = IPPROTO_GRE;
1343                         break;
1344                 case RTE_FLOW_ITEM_TYPE_VOID:
1345                         break;
1346                 default:
1347                         return rte_flow_error_set(error, EINVAL,
1348                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1349                                                   (void *)items->type,
1350                                                   "unsupported item type");
1351                         break;
1352                 }
1353                 temp_size += len;
1354         }
1355         *size = temp_size;
1356         return 0;
1357 }
1358
1359 /**
1360  * Convert L2 encap action to DV specification.
1361  *
1362  * @param[in] dev
1363  *   Pointer to rte_eth_dev structure.
1364  * @param[in] action
1365  *   Pointer to action structure.
1366  * @param[in, out] dev_flow
1367  *   Pointer to the mlx5_flow.
1368  * @param[in] transfer
1369  *   Mark if the flow is E-Switch flow.
1370  * @param[out] error
1371  *   Pointer to the error structure.
1372  *
1373  * @return
1374  *   0 on success, a negative errno value otherwise and rte_errno is set.
1375  */
1376 static int
1377 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
1378                                const struct rte_flow_action *action,
1379                                struct mlx5_flow *dev_flow,
1380                                uint8_t transfer,
1381                                struct rte_flow_error *error)
1382 {
1383         const struct rte_flow_item *encap_data;
1384         const struct rte_flow_action_raw_encap *raw_encap_data;
1385         struct mlx5_flow_dv_encap_decap_resource res = {
1386                 .reformat_type =
1387                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
1388                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
1389                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
1390         };
1391
1392         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
1393                 raw_encap_data =
1394                         (const struct rte_flow_action_raw_encap *)action->conf;
1395                 res.size = raw_encap_data->size;
1396                 memcpy(res.buf, raw_encap_data->data, res.size);
1397         } else {
1398                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
1399                         encap_data =
1400                                 ((const struct rte_flow_action_vxlan_encap *)
1401                                                 action->conf)->definition;
1402                 else
1403                         encap_data =
1404                                 ((const struct rte_flow_action_nvgre_encap *)
1405                                                 action->conf)->definition;
1406                 if (flow_dv_convert_encap_data(encap_data, res.buf,
1407                                                &res.size, error))
1408                         return -rte_errno;
1409         }
1410         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1411                 return rte_flow_error_set(error, EINVAL,
1412                                           RTE_FLOW_ERROR_TYPE_ACTION,
1413                                           NULL, "can't create L2 encap action");
1414         return 0;
1415 }
1416
1417 /**
1418  * Convert L2 decap action to DV specification.
1419  *
1420  * @param[in] dev
1421  *   Pointer to rte_eth_dev structure.
1422  * @param[in, out] dev_flow
1423  *   Pointer to the mlx5_flow.
1424  * @param[in] transfer
1425  *   Mark if the flow is E-Switch flow.
1426  * @param[out] error
1427  *   Pointer to the error structure.
1428  *
1429  * @return
1430  *   0 on success, a negative errno value otherwise and rte_errno is set.
1431  */
1432 static int
1433 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
1434                                struct mlx5_flow *dev_flow,
1435                                uint8_t transfer,
1436                                struct rte_flow_error *error)
1437 {
1438         struct mlx5_flow_dv_encap_decap_resource res = {
1439                 .size = 0,
1440                 .reformat_type =
1441                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
1442                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
1443                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
1444         };
1445
1446         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1447                 return rte_flow_error_set(error, EINVAL,
1448                                           RTE_FLOW_ERROR_TYPE_ACTION,
1449                                           NULL, "can't create L2 decap action");
1450         return 0;
1451 }
1452
1453 /**
1454  * Convert raw decap/encap (L3 tunnel) action to DV specification.
1455  *
1456  * @param[in] dev
1457  *   Pointer to rte_eth_dev structure.
1458  * @param[in] action
1459  *   Pointer to action structure.
1460  * @param[in, out] dev_flow
1461  *   Pointer to the mlx5_flow.
1462  * @param[in] attr
1463  *   Pointer to the flow attributes.
1464  * @param[out] error
1465  *   Pointer to the error structure.
1466  *
1467  * @return
1468  *   0 on success, a negative errno value otherwise and rte_errno is set.
1469  */
1470 static int
1471 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
1472                                 const struct rte_flow_action *action,
1473                                 struct mlx5_flow *dev_flow,
1474                                 const struct rte_flow_attr *attr,
1475                                 struct rte_flow_error *error)
1476 {
1477         const struct rte_flow_action_raw_encap *encap_data;
1478         struct mlx5_flow_dv_encap_decap_resource res;
1479
1480         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
1481         res.size = encap_data->size;
1482         memcpy(res.buf, encap_data->data, res.size);
1483         res.reformat_type = attr->egress ?
1484                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL :
1485                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
1486         if (attr->transfer)
1487                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
1488         else
1489                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
1490                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
1491         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1492                 return rte_flow_error_set(error, EINVAL,
1493                                           RTE_FLOW_ERROR_TYPE_ACTION,
1494                                           NULL, "can't create encap action");
1495         return 0;
1496 }
1497
1498 /**
1499  * Validate the modify-header actions.
1500  *
1501  * @param[in] action_flags
1502  *   Holds the actions detected until now.
1503  * @param[in] action
1504  *   Pointer to the modify action.
1505  * @param[out] error
1506  *   Pointer to error structure.
1507  *
1508  * @return
1509  *   0 on success, a negative errno value otherwise and rte_errno is set.
1510  */
1511 static int
1512 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
1513                                    const struct rte_flow_action *action,
1514                                    struct rte_flow_error *error)
1515 {
1516         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
1517                 return rte_flow_error_set(error, EINVAL,
1518                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1519                                           NULL, "action configuration not set");
1520         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
1521                 return rte_flow_error_set(error, EINVAL,
1522                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1523                                           "can't have encap action before"
1524                                           " modify action");
1525         return 0;
1526 }
1527
1528 /**
1529  * Validate the modify-header MAC address actions.
1530  *
1531  * @param[in] action_flags
1532  *   Holds the actions detected until now.
1533  * @param[in] action
1534  *   Pointer to the modify action.
1535  * @param[in] item_flags
1536  *   Holds the items detected.
1537  * @param[out] error
1538  *   Pointer to error structure.
1539  *
1540  * @return
1541  *   0 on success, a negative errno value otherwise and rte_errno is set.
1542  */
1543 static int
1544 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
1545                                    const struct rte_flow_action *action,
1546                                    const uint64_t item_flags,
1547                                    struct rte_flow_error *error)
1548 {
1549         int ret = 0;
1550
1551         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1552         if (!ret) {
1553                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
1554                         return rte_flow_error_set(error, EINVAL,
1555                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1556                                                   NULL,
1557                                                   "no L2 item in pattern");
1558         }
1559         return ret;
1560 }
1561
1562 /**
1563  * Validate the modify-header IPv4 address actions.
1564  *
1565  * @param[in] action_flags
1566  *   Holds the actions detected until now.
1567  * @param[in] action
1568  *   Pointer to the modify action.
1569  * @param[in] item_flags
1570  *   Holds the items detected.
1571  * @param[out] error
1572  *   Pointer to error structure.
1573  *
1574  * @return
1575  *   0 on success, a negative errno value otherwise and rte_errno is set.
1576  */
1577 static int
1578 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
1579                                     const struct rte_flow_action *action,
1580                                     const uint64_t item_flags,
1581                                     struct rte_flow_error *error)
1582 {
1583         int ret = 0;
1584
1585         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1586         if (!ret) {
1587                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
1588                         return rte_flow_error_set(error, EINVAL,
1589                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1590                                                   NULL,
1591                                                   "no ipv4 item in pattern");
1592         }
1593         return ret;
1594 }
1595
1596 /**
1597  * Validate the modify-header IPv6 address actions.
1598  *
1599  * @param[in] action_flags
1600  *   Holds the actions detected until now.
1601  * @param[in] action
1602  *   Pointer to the modify action.
1603  * @param[in] item_flags
1604  *   Holds the items detected.
1605  * @param[out] error
1606  *   Pointer to error structure.
1607  *
1608  * @return
1609  *   0 on success, a negative errno value otherwise and rte_errno is set.
1610  */
1611 static int
1612 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
1613                                     const struct rte_flow_action *action,
1614                                     const uint64_t item_flags,
1615                                     struct rte_flow_error *error)
1616 {
1617         int ret = 0;
1618
1619         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1620         if (!ret) {
1621                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
1622                         return rte_flow_error_set(error, EINVAL,
1623                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1624                                                   NULL,
1625                                                   "no ipv6 item in pattern");
1626         }
1627         return ret;
1628 }
1629
1630 /**
1631  * Validate the modify-header TP actions.
1632  *
1633  * @param[in] action_flags
1634  *   Holds the actions detected until now.
1635  * @param[in] action
1636  *   Pointer to the modify action.
1637  * @param[in] item_flags
1638  *   Holds the items detected.
1639  * @param[out] error
1640  *   Pointer to error structure.
1641  *
1642  * @return
1643  *   0 on success, a negative errno value otherwise and rte_errno is set.
1644  */
1645 static int
1646 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
1647                                   const struct rte_flow_action *action,
1648                                   const uint64_t item_flags,
1649                                   struct rte_flow_error *error)
1650 {
1651         int ret = 0;
1652
1653         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1654         if (!ret) {
1655                 if (!(item_flags & MLX5_FLOW_LAYER_L4))
1656                         return rte_flow_error_set(error, EINVAL,
1657                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1658                                                   NULL, "no transport layer "
1659                                                   "in pattern");
1660         }
1661         return ret;
1662 }
1663
1664 /**
1665  * Validate the modify-header TTL actions.
1666  *
1667  * @param[in] action_flags
1668  *   Holds the actions detected until now.
1669  * @param[in] action
1670  *   Pointer to the modify action.
1671  * @param[in] item_flags
1672  *   Holds the items detected.
1673  * @param[out] error
1674  *   Pointer to error structure.
1675  *
1676  * @return
1677  *   0 on success, a negative errno value otherwise and rte_errno is set.
1678  */
1679 static int
1680 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
1681                                    const struct rte_flow_action *action,
1682                                    const uint64_t item_flags,
1683                                    struct rte_flow_error *error)
1684 {
1685         int ret = 0;
1686
1687         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1688         if (!ret) {
1689                 if (!(item_flags & MLX5_FLOW_LAYER_L3))
1690                         return rte_flow_error_set(error, EINVAL,
1691                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1692                                                   NULL,
1693                                                   "no IP protocol in pattern");
1694         }
1695         return ret;
1696 }
1697
1698 /**
1699  * Validate jump action.
1700  *
1701  * @param[in] action
1702  *   Pointer to the modify action.
1703  * @param[in] group
1704  *   The group of the current flow.
1705  * @param[out] error
1706  *   Pointer to error structure.
1707  *
1708  * @return
1709  *   0 on success, a negative errno value otherwise and rte_errno is set.
1710  */
1711 static int
1712 flow_dv_validate_action_jump(const struct rte_flow_action *action,
1713                              uint32_t group,
1714                              struct rte_flow_error *error)
1715 {
1716         if (action->type != RTE_FLOW_ACTION_TYPE_JUMP && !action->conf)
1717                 return rte_flow_error_set(error, EINVAL,
1718                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1719                                           NULL, "action configuration not set");
1720         if (group >= ((const struct rte_flow_action_jump *)action->conf)->group)
1721                 return rte_flow_error_set(error, EINVAL,
1722                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1723                                           "target group must be higher then"
1724                                           " the current flow group");
1725         return 0;
1726 }
1727
1728 /*
1729  * Validate the port_id action.
1730  *
1731  * @param[in] dev
1732  *   Pointer to rte_eth_dev structure.
1733  * @param[in] action_flags
1734  *   Bit-fields that holds the actions detected until now.
1735  * @param[in] action
1736  *   Port_id RTE action structure.
1737  * @param[in] attr
1738  *   Attributes of flow that includes this action.
1739  * @param[out] error
1740  *   Pointer to error structure.
1741  *
1742  * @return
1743  *   0 on success, a negative errno value otherwise and rte_errno is set.
1744  */
1745 static int
1746 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
1747                                 uint64_t action_flags,
1748                                 const struct rte_flow_action *action,
1749                                 const struct rte_flow_attr *attr,
1750                                 struct rte_flow_error *error)
1751 {
1752         const struct rte_flow_action_port_id *port_id;
1753         uint16_t port;
1754         uint16_t esw_domain_id;
1755         uint16_t act_port_domain_id;
1756         int ret;
1757
1758         if (!attr->transfer)
1759                 return rte_flow_error_set(error, ENOTSUP,
1760                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1761                                           NULL,
1762                                           "port id action is valid in transfer"
1763                                           " mode only");
1764         if (!action || !action->conf)
1765                 return rte_flow_error_set(error, ENOTSUP,
1766                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1767                                           NULL,
1768                                           "port id action parameters must be"
1769                                           " specified");
1770         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
1771                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
1772                 return rte_flow_error_set(error, EINVAL,
1773                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1774                                           "can have only one fate actions in"
1775                                           " a flow");
1776         ret = mlx5_port_to_eswitch_info(dev->data->port_id,
1777                                         &esw_domain_id, NULL);
1778         if (ret < 0)
1779                 return rte_flow_error_set(error, -ret,
1780                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1781                                           NULL,
1782                                           "failed to obtain E-Switch info");
1783         port_id = action->conf;
1784         port = port_id->original ? dev->data->port_id : port_id->id;
1785         ret = mlx5_port_to_eswitch_info(port, &act_port_domain_id, NULL);
1786         if (ret)
1787                 return rte_flow_error_set
1788                                 (error, -ret,
1789                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
1790                                  "failed to obtain E-Switch port id for port");
1791         if (act_port_domain_id != esw_domain_id)
1792                 return rte_flow_error_set
1793                                 (error, -ret,
1794                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1795                                  "port does not belong to"
1796                                  " E-Switch being configured");
1797         return 0;
1798 }
1799
1800 /**
1801  * Find existing modify-header resource or create and register a new one.
1802  *
1803  * @param dev[in, out]
1804  *   Pointer to rte_eth_dev structure.
1805  * @param[in, out] resource
1806  *   Pointer to modify-header resource.
1807  * @parm[in, out] dev_flow
1808  *   Pointer to the dev_flow.
1809  * @param[out] error
1810  *   pointer to error structure.
1811  *
1812  * @return
1813  *   0 on success otherwise -errno and errno is set.
1814  */
1815 static int
1816 flow_dv_modify_hdr_resource_register
1817                         (struct rte_eth_dev *dev,
1818                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1819                          struct mlx5_flow *dev_flow,
1820                          struct rte_flow_error *error)
1821 {
1822         struct mlx5_priv *priv = dev->data->dev_private;
1823         struct mlx5_ibv_shared *sh = priv->sh;
1824         struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
1825         struct mlx5dv_dr_ns *ns;
1826
1827         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
1828                 ns = sh->fdb_ns;
1829         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
1830                 ns = sh->tx_ns;
1831         else
1832                 ns = sh->rx_ns;
1833         /* Lookup a matching resource from cache. */
1834         LIST_FOREACH(cache_resource, &sh->modify_cmds, next) {
1835                 if (resource->ft_type == cache_resource->ft_type &&
1836                     resource->actions_num == cache_resource->actions_num &&
1837                     !memcmp((const void *)resource->actions,
1838                             (const void *)cache_resource->actions,
1839                             (resource->actions_num *
1840                                             sizeof(resource->actions[0])))) {
1841                         DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
1842                                 (void *)cache_resource,
1843                                 rte_atomic32_read(&cache_resource->refcnt));
1844                         rte_atomic32_inc(&cache_resource->refcnt);
1845                         dev_flow->dv.modify_hdr = cache_resource;
1846                         return 0;
1847                 }
1848         }
1849         /* Register new modify-header resource. */
1850         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1851         if (!cache_resource)
1852                 return rte_flow_error_set(error, ENOMEM,
1853                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1854                                           "cannot allocate resource memory");
1855         *cache_resource = *resource;
1856         cache_resource->verbs_action =
1857                 mlx5_glue->dv_create_flow_action_modify_header
1858                                         (sh->ctx, cache_resource->ft_type,
1859                                          ns, 0,
1860                                          cache_resource->actions_num *
1861                                          sizeof(cache_resource->actions[0]),
1862                                          (uint64_t *)cache_resource->actions);
1863         if (!cache_resource->verbs_action) {
1864                 rte_free(cache_resource);
1865                 return rte_flow_error_set(error, ENOMEM,
1866                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1867                                           NULL, "cannot create action");
1868         }
1869         rte_atomic32_init(&cache_resource->refcnt);
1870         rte_atomic32_inc(&cache_resource->refcnt);
1871         LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
1872         dev_flow->dv.modify_hdr = cache_resource;
1873         DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
1874                 (void *)cache_resource,
1875                 rte_atomic32_read(&cache_resource->refcnt));
1876         return 0;
1877 }
1878
1879 /**
1880  * Get or create a flow counter.
1881  *
1882  * @param[in] dev
1883  *   Pointer to the Ethernet device structure.
1884  * @param[in] shared
1885  *   Indicate if this counter is shared with other flows.
1886  * @param[in] id
1887  *   Counter identifier.
1888  *
1889  * @return
1890  *   pointer to flow counter on success, NULL otherwise and rte_errno is set.
1891  */
1892 static struct mlx5_flow_counter *
1893 flow_dv_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
1894 {
1895         struct mlx5_priv *priv = dev->data->dev_private;
1896         struct mlx5_flow_counter *cnt = NULL;
1897         struct mlx5_devx_counter_set *dcs = NULL;
1898         int ret;
1899
1900         if (!priv->config.devx) {
1901                 ret = -ENOTSUP;
1902                 goto error_exit;
1903         }
1904         if (shared) {
1905                 LIST_FOREACH(cnt, &priv->flow_counters, next) {
1906                         if (cnt->shared && cnt->id == id) {
1907                                 cnt->ref_cnt++;
1908                                 return cnt;
1909                         }
1910                 }
1911         }
1912         cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
1913         dcs = rte_calloc(__func__, 1, sizeof(*dcs), 0);
1914         if (!dcs || !cnt) {
1915                 ret = -ENOMEM;
1916                 goto error_exit;
1917         }
1918         ret = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, dcs);
1919         if (ret)
1920                 goto error_exit;
1921         struct mlx5_flow_counter tmpl = {
1922                 .shared = shared,
1923                 .ref_cnt = 1,
1924                 .id = id,
1925                 .dcs = dcs,
1926         };
1927         tmpl.action = mlx5_glue->dv_create_flow_action_counter(dcs->obj, 0);
1928         if (!tmpl.action) {
1929                 ret = errno;
1930                 goto error_exit;
1931         }
1932         *cnt = tmpl;
1933         LIST_INSERT_HEAD(&priv->flow_counters, cnt, next);
1934         return cnt;
1935 error_exit:
1936         rte_free(cnt);
1937         rte_free(dcs);
1938         rte_errno = -ret;
1939         return NULL;
1940 }
1941
1942 /**
1943  * Release a flow counter.
1944  *
1945  * @param[in] counter
1946  *   Pointer to the counter handler.
1947  */
1948 static void
1949 flow_dv_counter_release(struct mlx5_flow_counter *counter)
1950 {
1951         int ret;
1952
1953         if (!counter)
1954                 return;
1955         if (--counter->ref_cnt == 0) {
1956                 ret = mlx5_devx_cmd_flow_counter_free(counter->dcs->obj);
1957                 if (ret)
1958                         DRV_LOG(ERR, "Failed to free devx counters, %d", ret);
1959                 LIST_REMOVE(counter, next);
1960                 rte_free(counter->dcs);
1961                 rte_free(counter);
1962         }
1963 }
1964
1965 /**
1966  * Verify the @p attributes will be correctly understood by the NIC and store
1967  * them in the @p flow if everything is correct.
1968  *
1969  * @param[in] dev
1970  *   Pointer to dev struct.
1971  * @param[in] attributes
1972  *   Pointer to flow attributes
1973  * @param[out] error
1974  *   Pointer to error structure.
1975  *
1976  * @return
1977  *   0 on success, a negative errno value otherwise and rte_errno is set.
1978  */
1979 static int
1980 flow_dv_validate_attributes(struct rte_eth_dev *dev,
1981                             const struct rte_flow_attr *attributes,
1982                             struct rte_flow_error *error)
1983 {
1984         struct mlx5_priv *priv = dev->data->dev_private;
1985         uint32_t priority_max = priv->config.flow_prio - 1;
1986
1987 #ifndef HAVE_MLX5DV_DR
1988         if (attributes->group)
1989                 return rte_flow_error_set(error, ENOTSUP,
1990                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1991                                           NULL,
1992                                           "groups is not supported");
1993 #endif
1994         if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
1995             attributes->priority >= priority_max)
1996                 return rte_flow_error_set(error, ENOTSUP,
1997                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1998                                           NULL,
1999                                           "priority out of range");
2000         if (attributes->transfer) {
2001                 if (!priv->config.dv_esw_en)
2002                         return rte_flow_error_set
2003                                 (error, ENOTSUP,
2004                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2005                                  "E-Switch dr is not supported");
2006                 if (!(priv->representor || priv->master))
2007                         return rte_flow_error_set
2008                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2009                                  NULL, "E-Switch configurationd can only be"
2010                                  " done by a master or a representor device");
2011                 if (attributes->egress)
2012                         return rte_flow_error_set
2013                                 (error, ENOTSUP,
2014                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
2015                                  "egress is not supported");
2016                 if (attributes->group >= MLX5_MAX_TABLES_FDB)
2017                         return rte_flow_error_set
2018                                 (error, EINVAL,
2019                                  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
2020                                  NULL, "group must be smaller than "
2021                                  RTE_STR(MLX5_MAX_FDB_TABLES));
2022         }
2023         if (!(attributes->egress ^ attributes->ingress))
2024                 return rte_flow_error_set(error, ENOTSUP,
2025                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
2026                                           "must specify exactly one of "
2027                                           "ingress or egress");
2028         return 0;
2029 }
2030
2031 /**
2032  * Internal validation function. For validating both actions and items.
2033  *
2034  * @param[in] dev
2035  *   Pointer to the rte_eth_dev structure.
2036  * @param[in] attr
2037  *   Pointer to the flow attributes.
2038  * @param[in] items
2039  *   Pointer to the list of items.
2040  * @param[in] actions
2041  *   Pointer to the list of actions.
2042  * @param[out] error
2043  *   Pointer to the error structure.
2044  *
2045  * @return
2046  *   0 on success, a negative errno value otherwise and rte_errno is set.
2047  */
2048 static int
2049 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
2050                  const struct rte_flow_item items[],
2051                  const struct rte_flow_action actions[],
2052                  struct rte_flow_error *error)
2053 {
2054         int ret;
2055         uint64_t action_flags = 0;
2056         uint64_t item_flags = 0;
2057         uint64_t last_item = 0;
2058         uint8_t next_protocol = 0xff;
2059         int actions_n = 0;
2060
2061         if (items == NULL)
2062                 return -1;
2063         ret = flow_dv_validate_attributes(dev, attr, error);
2064         if (ret < 0)
2065                 return ret;
2066         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2067                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2068                 switch (items->type) {
2069                 case RTE_FLOW_ITEM_TYPE_VOID:
2070                         break;
2071                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
2072                         ret = flow_dv_validate_item_port_id
2073                                         (dev, items, attr, item_flags, error);
2074                         if (ret < 0)
2075                                 return ret;
2076                         last_item |= MLX5_FLOW_ITEM_PORT_ID;
2077                         break;
2078                 case RTE_FLOW_ITEM_TYPE_ETH:
2079                         ret = mlx5_flow_validate_item_eth(items, item_flags,
2080                                                           error);
2081                         if (ret < 0)
2082                                 return ret;
2083                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
2084                                              MLX5_FLOW_LAYER_OUTER_L2;
2085                         break;
2086                 case RTE_FLOW_ITEM_TYPE_VLAN:
2087                         ret = mlx5_flow_validate_item_vlan(items, item_flags,
2088                                                            error);
2089                         if (ret < 0)
2090                                 return ret;
2091                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
2092                                              MLX5_FLOW_LAYER_OUTER_VLAN;
2093                         break;
2094                 case RTE_FLOW_ITEM_TYPE_IPV4:
2095                         ret = mlx5_flow_validate_item_ipv4(items, item_flags,
2096                                                            NULL, error);
2097                         if (ret < 0)
2098                                 return ret;
2099                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
2100                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
2101                         if (items->mask != NULL &&
2102                             ((const struct rte_flow_item_ipv4 *)
2103                              items->mask)->hdr.next_proto_id) {
2104                                 next_protocol =
2105                                         ((const struct rte_flow_item_ipv4 *)
2106                                          (items->spec))->hdr.next_proto_id;
2107                                 next_protocol &=
2108                                         ((const struct rte_flow_item_ipv4 *)
2109                                          (items->mask))->hdr.next_proto_id;
2110                         } else {
2111                                 /* Reset for inner layer. */
2112                                 next_protocol = 0xff;
2113                         }
2114                         break;
2115                 case RTE_FLOW_ITEM_TYPE_IPV6:
2116                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
2117                                                            NULL, error);
2118                         if (ret < 0)
2119                                 return ret;
2120                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
2121                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
2122                         if (items->mask != NULL &&
2123                             ((const struct rte_flow_item_ipv6 *)
2124                              items->mask)->hdr.proto) {
2125                                 next_protocol =
2126                                         ((const struct rte_flow_item_ipv6 *)
2127                                          items->spec)->hdr.proto;
2128                                 next_protocol &=
2129                                         ((const struct rte_flow_item_ipv6 *)
2130                                          items->mask)->hdr.proto;
2131                         } else {
2132                                 /* Reset for inner layer. */
2133                                 next_protocol = 0xff;
2134                         }
2135                         break;
2136                 case RTE_FLOW_ITEM_TYPE_TCP:
2137                         ret = mlx5_flow_validate_item_tcp
2138                                                 (items, item_flags,
2139                                                  next_protocol,
2140                                                  &rte_flow_item_tcp_mask,
2141                                                  error);
2142                         if (ret < 0)
2143                                 return ret;
2144                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
2145                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
2146                         break;
2147                 case RTE_FLOW_ITEM_TYPE_UDP:
2148                         ret = mlx5_flow_validate_item_udp(items, item_flags,
2149                                                           next_protocol,
2150                                                           error);
2151                         if (ret < 0)
2152                                 return ret;
2153                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
2154                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
2155                         break;
2156                 case RTE_FLOW_ITEM_TYPE_GRE:
2157                 case RTE_FLOW_ITEM_TYPE_NVGRE:
2158                         ret = mlx5_flow_validate_item_gre(items, item_flags,
2159                                                           next_protocol, error);
2160                         if (ret < 0)
2161                                 return ret;
2162                         last_item = MLX5_FLOW_LAYER_GRE;
2163                         break;
2164                 case RTE_FLOW_ITEM_TYPE_VXLAN:
2165                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
2166                                                             error);
2167                         if (ret < 0)
2168                                 return ret;
2169                         last_item = MLX5_FLOW_LAYER_VXLAN;
2170                         break;
2171                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
2172                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
2173                                                                 item_flags, dev,
2174                                                                 error);
2175                         if (ret < 0)
2176                                 return ret;
2177                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
2178                         break;
2179                 case RTE_FLOW_ITEM_TYPE_MPLS:
2180                         ret = mlx5_flow_validate_item_mpls(dev, items,
2181                                                            item_flags,
2182                                                            last_item, error);
2183                         if (ret < 0)
2184                                 return ret;
2185                         last_item = MLX5_FLOW_LAYER_MPLS;
2186                         break;
2187                 case RTE_FLOW_ITEM_TYPE_META:
2188                         ret = flow_dv_validate_item_meta(dev, items, attr,
2189                                                          error);
2190                         if (ret < 0)
2191                                 return ret;
2192                         last_item = MLX5_FLOW_ITEM_METADATA;
2193                         break;
2194                 default:
2195                         return rte_flow_error_set(error, ENOTSUP,
2196                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2197                                                   NULL, "item not supported");
2198                 }
2199                 item_flags |= last_item;
2200         }
2201         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2202                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
2203                         return rte_flow_error_set(error, ENOTSUP,
2204                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2205                                                   actions, "too many actions");
2206                 switch (actions->type) {
2207                 case RTE_FLOW_ACTION_TYPE_VOID:
2208                         break;
2209                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
2210                         ret = flow_dv_validate_action_port_id(dev,
2211                                                               action_flags,
2212                                                               actions,
2213                                                               attr,
2214                                                               error);
2215                         if (ret)
2216                                 return ret;
2217                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
2218                         ++actions_n;
2219                         break;
2220                 case RTE_FLOW_ACTION_TYPE_FLAG:
2221                         ret = mlx5_flow_validate_action_flag(action_flags,
2222                                                              attr, error);
2223                         if (ret < 0)
2224                                 return ret;
2225                         action_flags |= MLX5_FLOW_ACTION_FLAG;
2226                         ++actions_n;
2227                         break;
2228                 case RTE_FLOW_ACTION_TYPE_MARK:
2229                         ret = mlx5_flow_validate_action_mark(actions,
2230                                                              action_flags,
2231                                                              attr, error);
2232                         if (ret < 0)
2233                                 return ret;
2234                         action_flags |= MLX5_FLOW_ACTION_MARK;
2235                         ++actions_n;
2236                         break;
2237                 case RTE_FLOW_ACTION_TYPE_DROP:
2238                         ret = mlx5_flow_validate_action_drop(action_flags,
2239                                                              attr, error);
2240                         if (ret < 0)
2241                                 return ret;
2242                         action_flags |= MLX5_FLOW_ACTION_DROP;
2243                         ++actions_n;
2244                         break;
2245                 case RTE_FLOW_ACTION_TYPE_QUEUE:
2246                         ret = mlx5_flow_validate_action_queue(actions,
2247                                                               action_flags, dev,
2248                                                               attr, error);
2249                         if (ret < 0)
2250                                 return ret;
2251                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
2252                         ++actions_n;
2253                         break;
2254                 case RTE_FLOW_ACTION_TYPE_RSS:
2255                         ret = mlx5_flow_validate_action_rss(actions,
2256                                                             action_flags, dev,
2257                                                             attr, item_flags,
2258                                                             error);
2259                         if (ret < 0)
2260                                 return ret;
2261                         action_flags |= MLX5_FLOW_ACTION_RSS;
2262                         ++actions_n;
2263                         break;
2264                 case RTE_FLOW_ACTION_TYPE_COUNT:
2265                         ret = flow_dv_validate_action_count(dev, error);
2266                         if (ret < 0)
2267                                 return ret;
2268                         action_flags |= MLX5_FLOW_ACTION_COUNT;
2269                         ++actions_n;
2270                         break;
2271                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
2272                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
2273                         ret = flow_dv_validate_action_l2_encap(action_flags,
2274                                                                actions, attr,
2275                                                                error);
2276                         if (ret < 0)
2277                                 return ret;
2278                         action_flags |= actions->type ==
2279                                         RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
2280                                         MLX5_FLOW_ACTION_VXLAN_ENCAP :
2281                                         MLX5_FLOW_ACTION_NVGRE_ENCAP;
2282                         ++actions_n;
2283                         break;
2284                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
2285                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
2286                         ret = flow_dv_validate_action_l2_decap(action_flags,
2287                                                                attr, error);
2288                         if (ret < 0)
2289                                 return ret;
2290                         action_flags |= actions->type ==
2291                                         RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
2292                                         MLX5_FLOW_ACTION_VXLAN_DECAP :
2293                                         MLX5_FLOW_ACTION_NVGRE_DECAP;
2294                         ++actions_n;
2295                         break;
2296                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
2297                         ret = flow_dv_validate_action_raw_encap(action_flags,
2298                                                                 actions, attr,
2299                                                                 error);
2300                         if (ret < 0)
2301                                 return ret;
2302                         action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
2303                         ++actions_n;
2304                         break;
2305                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
2306                         ret = flow_dv_validate_action_raw_decap(action_flags,
2307                                                                 actions, attr,
2308                                                                 error);
2309                         if (ret < 0)
2310                                 return ret;
2311                         action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
2312                         ++actions_n;
2313                         break;
2314                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
2315                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
2316                         ret = flow_dv_validate_action_modify_mac(action_flags,
2317                                                                  actions,
2318                                                                  item_flags,
2319                                                                  error);
2320                         if (ret < 0)
2321                                 return ret;
2322                         /* Count all modify-header actions as one action. */
2323                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2324                                 ++actions_n;
2325                         action_flags |= actions->type ==
2326                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
2327                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
2328                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
2329                         break;
2330
2331                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
2332                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
2333                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
2334                                                                   actions,
2335                                                                   item_flags,
2336                                                                   error);
2337                         if (ret < 0)
2338                                 return ret;
2339                         /* Count all modify-header actions as one action. */
2340                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2341                                 ++actions_n;
2342                         action_flags |= actions->type ==
2343                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
2344                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
2345                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
2346                         break;
2347                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
2348                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
2349                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
2350                                                                   actions,
2351                                                                   item_flags,
2352                                                                   error);
2353                         if (ret < 0)
2354                                 return ret;
2355                         /* Count all modify-header actions as one action. */
2356                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2357                                 ++actions_n;
2358                         action_flags |= actions->type ==
2359                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
2360                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
2361                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
2362                         break;
2363                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
2364                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
2365                         ret = flow_dv_validate_action_modify_tp(action_flags,
2366                                                                 actions,
2367                                                                 item_flags,
2368                                                                 error);
2369                         if (ret < 0)
2370                                 return ret;
2371                         /* Count all modify-header actions as one action. */
2372                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2373                                 ++actions_n;
2374                         action_flags |= actions->type ==
2375                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
2376                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
2377                                                 MLX5_FLOW_ACTION_SET_TP_DST;
2378                         break;
2379                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
2380                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
2381                         ret = flow_dv_validate_action_modify_ttl(action_flags,
2382                                                                  actions,
2383                                                                  item_flags,
2384                                                                  error);
2385                         if (ret < 0)
2386                                 return ret;
2387                         /* Count all modify-header actions as one action. */
2388                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2389                                 ++actions_n;
2390                         action_flags |= actions->type ==
2391                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
2392                                                 MLX5_FLOW_ACTION_SET_TTL :
2393                                                 MLX5_FLOW_ACTION_DEC_TTL;
2394                         break;
2395                 case RTE_FLOW_ACTION_TYPE_JUMP:
2396                         ret = flow_dv_validate_action_jump(actions,
2397                                                            attr->group, error);
2398                         if (ret)
2399                                 return ret;
2400                         ++actions_n;
2401                         action_flags |= MLX5_FLOW_ACTION_JUMP;
2402                         break;
2403                 default:
2404                         return rte_flow_error_set(error, ENOTSUP,
2405                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2406                                                   actions,
2407                                                   "action not supported");
2408                 }
2409         }
2410         /* Eswitch has few restrictions on using items and actions */
2411         if (attr->transfer) {
2412                 if (action_flags & MLX5_FLOW_ACTION_FLAG)
2413                         return rte_flow_error_set(error, ENOTSUP,
2414                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2415                                                   NULL,
2416                                                   "unsupported action FLAG");
2417                 if (action_flags & MLX5_FLOW_ACTION_MARK)
2418                         return rte_flow_error_set(error, ENOTSUP,
2419                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2420                                                   NULL,
2421                                                   "unsupported action MARK");
2422                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
2423                         return rte_flow_error_set(error, ENOTSUP,
2424                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2425                                                   NULL,
2426                                                   "unsupported action QUEUE");
2427                 if (action_flags & MLX5_FLOW_ACTION_RSS)
2428                         return rte_flow_error_set(error, ENOTSUP,
2429                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2430                                                   NULL,
2431                                                   "unsupported action RSS");
2432                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
2433                         return rte_flow_error_set(error, EINVAL,
2434                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2435                                                   actions,
2436                                                   "no fate action is found");
2437         } else {
2438                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
2439                         return rte_flow_error_set(error, EINVAL,
2440                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2441                                                   actions,
2442                                                   "no fate action is found");
2443         }
2444         return 0;
2445 }
2446
2447 /**
2448  * Internal preparation function. Allocates the DV flow size,
2449  * this size is constant.
2450  *
2451  * @param[in] attr
2452  *   Pointer to the flow attributes.
2453  * @param[in] items
2454  *   Pointer to the list of items.
2455  * @param[in] actions
2456  *   Pointer to the list of actions.
2457  * @param[out] error
2458  *   Pointer to the error structure.
2459  *
2460  * @return
2461  *   Pointer to mlx5_flow object on success,
2462  *   otherwise NULL and rte_errno is set.
2463  */
2464 static struct mlx5_flow *
2465 flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
2466                 const struct rte_flow_item items[] __rte_unused,
2467                 const struct rte_flow_action actions[] __rte_unused,
2468                 struct rte_flow_error *error)
2469 {
2470         uint32_t size = sizeof(struct mlx5_flow);
2471         struct mlx5_flow *flow;
2472
2473         flow = rte_calloc(__func__, 1, size, 0);
2474         if (!flow) {
2475                 rte_flow_error_set(error, ENOMEM,
2476                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2477                                    "not enough memory to create flow");
2478                 return NULL;
2479         }
2480         flow->dv.value.size = MLX5_ST_SZ_DB(fte_match_param);
2481         return flow;
2482 }
2483
2484 #ifndef NDEBUG
2485 /**
2486  * Sanity check for match mask and value. Similar to check_valid_spec() in
2487  * kernel driver. If unmasked bit is present in value, it returns failure.
2488  *
2489  * @param match_mask
2490  *   pointer to match mask buffer.
2491  * @param match_value
2492  *   pointer to match value buffer.
2493  *
2494  * @return
2495  *   0 if valid, -EINVAL otherwise.
2496  */
2497 static int
2498 flow_dv_check_valid_spec(void *match_mask, void *match_value)
2499 {
2500         uint8_t *m = match_mask;
2501         uint8_t *v = match_value;
2502         unsigned int i;
2503
2504         for (i = 0; i < MLX5_ST_SZ_DB(fte_match_param); ++i) {
2505                 if (v[i] & ~m[i]) {
2506                         DRV_LOG(ERR,
2507                                 "match_value differs from match_criteria"
2508                                 " %p[%u] != %p[%u]",
2509                                 match_value, i, match_mask, i);
2510                         return -EINVAL;
2511                 }
2512         }
2513         return 0;
2514 }
2515 #endif
2516
2517 /**
2518  * Add Ethernet item to matcher and to the value.
2519  *
2520  * @param[in, out] matcher
2521  *   Flow matcher.
2522  * @param[in, out] key
2523  *   Flow matcher value.
2524  * @param[in] item
2525  *   Flow pattern to translate.
2526  * @param[in] inner
2527  *   Item is inner pattern.
2528  */
2529 static void
2530 flow_dv_translate_item_eth(void *matcher, void *key,
2531                            const struct rte_flow_item *item, int inner)
2532 {
2533         const struct rte_flow_item_eth *eth_m = item->mask;
2534         const struct rte_flow_item_eth *eth_v = item->spec;
2535         const struct rte_flow_item_eth nic_mask = {
2536                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
2537                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
2538                 .type = RTE_BE16(0xffff),
2539         };
2540         void *headers_m;
2541         void *headers_v;
2542         char *l24_v;
2543         unsigned int i;
2544
2545         if (!eth_v)
2546                 return;
2547         if (!eth_m)
2548                 eth_m = &nic_mask;
2549         if (inner) {
2550                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2551                                          inner_headers);
2552                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2553         } else {
2554                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2555                                          outer_headers);
2556                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2557         }
2558         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
2559                &eth_m->dst, sizeof(eth_m->dst));
2560         /* The value must be in the range of the mask. */
2561         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
2562         for (i = 0; i < sizeof(eth_m->dst); ++i)
2563                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
2564         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
2565                &eth_m->src, sizeof(eth_m->src));
2566         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
2567         /* The value must be in the range of the mask. */
2568         for (i = 0; i < sizeof(eth_m->dst); ++i)
2569                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
2570         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
2571                  rte_be_to_cpu_16(eth_m->type));
2572         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
2573         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
2574 }
2575
2576 /**
2577  * Add VLAN item to matcher and to the value.
2578  *
2579  * @param[in, out] matcher
2580  *   Flow matcher.
2581  * @param[in, out] key
2582  *   Flow matcher value.
2583  * @param[in] item
2584  *   Flow pattern to translate.
2585  * @param[in] inner
2586  *   Item is inner pattern.
2587  */
2588 static void
2589 flow_dv_translate_item_vlan(void *matcher, void *key,
2590                             const struct rte_flow_item *item,
2591                             int inner)
2592 {
2593         const struct rte_flow_item_vlan *vlan_m = item->mask;
2594         const struct rte_flow_item_vlan *vlan_v = item->spec;
2595         const struct rte_flow_item_vlan nic_mask = {
2596                 .tci = RTE_BE16(0x0fff),
2597                 .inner_type = RTE_BE16(0xffff),
2598         };
2599         void *headers_m;
2600         void *headers_v;
2601         uint16_t tci_m;
2602         uint16_t tci_v;
2603
2604         if (!vlan_v)
2605                 return;
2606         if (!vlan_m)
2607                 vlan_m = &nic_mask;
2608         if (inner) {
2609                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2610                                          inner_headers);
2611                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2612         } else {
2613                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2614                                          outer_headers);
2615                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2616         }
2617         tci_m = rte_be_to_cpu_16(vlan_m->tci);
2618         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
2619         MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
2620         MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
2621         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
2622         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
2623         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
2624         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
2625         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
2626         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
2627 }
2628
2629 /**
2630  * Add IPV4 item to matcher and to the value.
2631  *
2632  * @param[in, out] matcher
2633  *   Flow matcher.
2634  * @param[in, out] key
2635  *   Flow matcher value.
2636  * @param[in] item
2637  *   Flow pattern to translate.
2638  * @param[in] inner
2639  *   Item is inner pattern.
2640  * @param[in] group
2641  *   The group to insert the rule.
2642  */
2643 static void
2644 flow_dv_translate_item_ipv4(void *matcher, void *key,
2645                             const struct rte_flow_item *item,
2646                             int inner, uint32_t group)
2647 {
2648         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
2649         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
2650         const struct rte_flow_item_ipv4 nic_mask = {
2651                 .hdr = {
2652                         .src_addr = RTE_BE32(0xffffffff),
2653                         .dst_addr = RTE_BE32(0xffffffff),
2654                         .type_of_service = 0xff,
2655                         .next_proto_id = 0xff,
2656                 },
2657         };
2658         void *headers_m;
2659         void *headers_v;
2660         char *l24_m;
2661         char *l24_v;
2662         uint8_t tos;
2663
2664         if (inner) {
2665                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2666                                          inner_headers);
2667                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2668         } else {
2669                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2670                                          outer_headers);
2671                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2672         }
2673         if (group == 0)
2674                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
2675         else
2676                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x4);
2677         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
2678         if (!ipv4_v)
2679                 return;
2680         if (!ipv4_m)
2681                 ipv4_m = &nic_mask;
2682         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2683                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
2684         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2685                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
2686         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
2687         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
2688         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2689                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
2690         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2691                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
2692         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
2693         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
2694         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
2695         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
2696                  ipv4_m->hdr.type_of_service);
2697         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
2698         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
2699                  ipv4_m->hdr.type_of_service >> 2);
2700         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
2701         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
2702                  ipv4_m->hdr.next_proto_id);
2703         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2704                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
2705 }
2706
2707 /**
2708  * Add IPV6 item to matcher and to the value.
2709  *
2710  * @param[in, out] matcher
2711  *   Flow matcher.
2712  * @param[in, out] key
2713  *   Flow matcher value.
2714  * @param[in] item
2715  *   Flow pattern to translate.
2716  * @param[in] inner
2717  *   Item is inner pattern.
2718  * @param[in] group
2719  *   The group to insert the rule.
2720  */
2721 static void
2722 flow_dv_translate_item_ipv6(void *matcher, void *key,
2723                             const struct rte_flow_item *item,
2724                             int inner, uint32_t group)
2725 {
2726         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
2727         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
2728         const struct rte_flow_item_ipv6 nic_mask = {
2729                 .hdr = {
2730                         .src_addr =
2731                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
2732                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
2733                         .dst_addr =
2734                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
2735                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
2736                         .vtc_flow = RTE_BE32(0xffffffff),
2737                         .proto = 0xff,
2738                         .hop_limits = 0xff,
2739                 },
2740         };
2741         void *headers_m;
2742         void *headers_v;
2743         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2744         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2745         char *l24_m;
2746         char *l24_v;
2747         uint32_t vtc_m;
2748         uint32_t vtc_v;
2749         int i;
2750         int size;
2751
2752         if (inner) {
2753                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2754                                          inner_headers);
2755                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2756         } else {
2757                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2758                                          outer_headers);
2759                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2760         }
2761         if (group == 0)
2762                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
2763         else
2764                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x6);
2765         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
2766         if (!ipv6_v)
2767                 return;
2768         if (!ipv6_m)
2769                 ipv6_m = &nic_mask;
2770         size = sizeof(ipv6_m->hdr.dst_addr);
2771         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2772                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
2773         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2774                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
2775         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
2776         for (i = 0; i < size; ++i)
2777                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
2778         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2779                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
2780         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2781                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
2782         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
2783         for (i = 0; i < size; ++i)
2784                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
2785         /* TOS. */
2786         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
2787         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
2788         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
2789         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
2790         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
2791         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
2792         /* Label. */
2793         if (inner) {
2794                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
2795                          vtc_m);
2796                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
2797                          vtc_v);
2798         } else {
2799                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
2800                          vtc_m);
2801                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
2802                          vtc_v);
2803         }
2804         /* Protocol. */
2805         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
2806                  ipv6_m->hdr.proto);
2807         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2808                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
2809 }
2810
2811 /**
2812  * Add TCP item to matcher and to the value.
2813  *
2814  * @param[in, out] matcher
2815  *   Flow matcher.
2816  * @param[in, out] key
2817  *   Flow matcher value.
2818  * @param[in] item
2819  *   Flow pattern to translate.
2820  * @param[in] inner
2821  *   Item is inner pattern.
2822  */
2823 static void
2824 flow_dv_translate_item_tcp(void *matcher, void *key,
2825                            const struct rte_flow_item *item,
2826                            int inner)
2827 {
2828         const struct rte_flow_item_tcp *tcp_m = item->mask;
2829         const struct rte_flow_item_tcp *tcp_v = item->spec;
2830         void *headers_m;
2831         void *headers_v;
2832
2833         if (inner) {
2834                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2835                                          inner_headers);
2836                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2837         } else {
2838                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2839                                          outer_headers);
2840                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2841         }
2842         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2843         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
2844         if (!tcp_v)
2845                 return;
2846         if (!tcp_m)
2847                 tcp_m = &rte_flow_item_tcp_mask;
2848         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
2849                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
2850         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
2851                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
2852         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
2853                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
2854         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
2855                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
2856 }
2857
2858 /**
2859  * Add UDP item to matcher and to the value.
2860  *
2861  * @param[in, out] matcher
2862  *   Flow matcher.
2863  * @param[in, out] key
2864  *   Flow matcher value.
2865  * @param[in] item
2866  *   Flow pattern to translate.
2867  * @param[in] inner
2868  *   Item is inner pattern.
2869  */
2870 static void
2871 flow_dv_translate_item_udp(void *matcher, void *key,
2872                            const struct rte_flow_item *item,
2873                            int inner)
2874 {
2875         const struct rte_flow_item_udp *udp_m = item->mask;
2876         const struct rte_flow_item_udp *udp_v = item->spec;
2877         void *headers_m;
2878         void *headers_v;
2879
2880         if (inner) {
2881                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2882                                          inner_headers);
2883                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2884         } else {
2885                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2886                                          outer_headers);
2887                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2888         }
2889         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2890         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
2891         if (!udp_v)
2892                 return;
2893         if (!udp_m)
2894                 udp_m = &rte_flow_item_udp_mask;
2895         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
2896                  rte_be_to_cpu_16(udp_m->hdr.src_port));
2897         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
2898                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
2899         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
2900                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
2901         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
2902                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
2903 }
2904
2905 /**
2906  * Add GRE item to matcher and to the value.
2907  *
2908  * @param[in, out] matcher
2909  *   Flow matcher.
2910  * @param[in, out] key
2911  *   Flow matcher value.
2912  * @param[in] item
2913  *   Flow pattern to translate.
2914  * @param[in] inner
2915  *   Item is inner pattern.
2916  */
2917 static void
2918 flow_dv_translate_item_gre(void *matcher, void *key,
2919                            const struct rte_flow_item *item,
2920                            int inner)
2921 {
2922         const struct rte_flow_item_gre *gre_m = item->mask;
2923         const struct rte_flow_item_gre *gre_v = item->spec;
2924         void *headers_m;
2925         void *headers_v;
2926         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2927         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2928
2929         if (inner) {
2930                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2931                                          inner_headers);
2932                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2933         } else {
2934                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2935                                          outer_headers);
2936                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2937         }
2938         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2939         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
2940         if (!gre_v)
2941                 return;
2942         if (!gre_m)
2943                 gre_m = &rte_flow_item_gre_mask;
2944         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
2945                  rte_be_to_cpu_16(gre_m->protocol));
2946         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
2947                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
2948 }
2949
2950 /**
2951  * Add NVGRE item to matcher and to the value.
2952  *
2953  * @param[in, out] matcher
2954  *   Flow matcher.
2955  * @param[in, out] key
2956  *   Flow matcher value.
2957  * @param[in] item
2958  *   Flow pattern to translate.
2959  * @param[in] inner
2960  *   Item is inner pattern.
2961  */
2962 static void
2963 flow_dv_translate_item_nvgre(void *matcher, void *key,
2964                              const struct rte_flow_item *item,
2965                              int inner)
2966 {
2967         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
2968         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
2969         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2970         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2971         const char *tni_flow_id_m = (const char *)nvgre_m->tni;
2972         const char *tni_flow_id_v = (const char *)nvgre_v->tni;
2973         char *gre_key_m;
2974         char *gre_key_v;
2975         int size;
2976         int i;
2977
2978         flow_dv_translate_item_gre(matcher, key, item, inner);
2979         if (!nvgre_v)
2980                 return;
2981         if (!nvgre_m)
2982                 nvgre_m = &rte_flow_item_nvgre_mask;
2983         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
2984         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
2985         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
2986         memcpy(gre_key_m, tni_flow_id_m, size);
2987         for (i = 0; i < size; ++i)
2988                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
2989 }
2990
2991 /**
2992  * Add VXLAN item to matcher and to the value.
2993  *
2994  * @param[in, out] matcher
2995  *   Flow matcher.
2996  * @param[in, out] key
2997  *   Flow matcher value.
2998  * @param[in] item
2999  *   Flow pattern to translate.
3000  * @param[in] inner
3001  *   Item is inner pattern.
3002  */
3003 static void
3004 flow_dv_translate_item_vxlan(void *matcher, void *key,
3005                              const struct rte_flow_item *item,
3006                              int inner)
3007 {
3008         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
3009         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
3010         void *headers_m;
3011         void *headers_v;
3012         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3013         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3014         char *vni_m;
3015         char *vni_v;
3016         uint16_t dport;
3017         int size;
3018         int i;
3019
3020         if (inner) {
3021                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3022                                          inner_headers);
3023                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3024         } else {
3025                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3026                                          outer_headers);
3027                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3028         }
3029         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
3030                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
3031         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
3032                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
3033                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
3034         }
3035         if (!vxlan_v)
3036                 return;
3037         if (!vxlan_m)
3038                 vxlan_m = &rte_flow_item_vxlan_mask;
3039         size = sizeof(vxlan_m->vni);
3040         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
3041         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
3042         memcpy(vni_m, vxlan_m->vni, size);
3043         for (i = 0; i < size; ++i)
3044                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
3045 }
3046
3047 /**
3048  * Add MPLS item to matcher and to the value.
3049  *
3050  * @param[in, out] matcher
3051  *   Flow matcher.
3052  * @param[in, out] key
3053  *   Flow matcher value.
3054  * @param[in] item
3055  *   Flow pattern to translate.
3056  * @param[in] prev_layer
3057  *   The protocol layer indicated in previous item.
3058  * @param[in] inner
3059  *   Item is inner pattern.
3060  */
3061 static void
3062 flow_dv_translate_item_mpls(void *matcher, void *key,
3063                             const struct rte_flow_item *item,
3064                             uint64_t prev_layer,
3065                             int inner)
3066 {
3067         const uint32_t *in_mpls_m = item->mask;
3068         const uint32_t *in_mpls_v = item->spec;
3069         uint32_t *out_mpls_m = 0;
3070         uint32_t *out_mpls_v = 0;
3071         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3072         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3073         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
3074                                      misc_parameters_2);
3075         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
3076         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
3077         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3078
3079         switch (prev_layer) {
3080         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
3081                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
3082                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
3083                          MLX5_UDP_PORT_MPLS);
3084                 break;
3085         case MLX5_FLOW_LAYER_GRE:
3086                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
3087                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
3088                          ETHER_TYPE_MPLS);
3089                 break;
3090         default:
3091                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
3092                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
3093                          IPPROTO_MPLS);
3094                 break;
3095         }
3096         if (!in_mpls_v)
3097                 return;
3098         if (!in_mpls_m)
3099                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
3100         switch (prev_layer) {
3101         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
3102                 out_mpls_m =
3103                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
3104                                                  outer_first_mpls_over_udp);
3105                 out_mpls_v =
3106                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
3107                                                  outer_first_mpls_over_udp);
3108                 break;
3109         case MLX5_FLOW_LAYER_GRE:
3110                 out_mpls_m =
3111                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
3112                                                  outer_first_mpls_over_gre);
3113                 out_mpls_v =
3114                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
3115                                                  outer_first_mpls_over_gre);
3116                 break;
3117         default:
3118                 /* Inner MPLS not over GRE is not supported. */
3119                 if (!inner) {
3120                         out_mpls_m =
3121                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
3122                                                          misc2_m,
3123                                                          outer_first_mpls);
3124                         out_mpls_v =
3125                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
3126                                                          misc2_v,
3127                                                          outer_first_mpls);
3128                 }
3129                 break;
3130         }
3131         if (out_mpls_m && out_mpls_v) {
3132                 *out_mpls_m = *in_mpls_m;
3133                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
3134         }
3135 }
3136
3137 /**
3138  * Add META item to matcher
3139  *
3140  * @param[in, out] matcher
3141  *   Flow matcher.
3142  * @param[in, out] key
3143  *   Flow matcher value.
3144  * @param[in] item
3145  *   Flow pattern to translate.
3146  * @param[in] inner
3147  *   Item is inner pattern.
3148  */
3149 static void
3150 flow_dv_translate_item_meta(void *matcher, void *key,
3151                             const struct rte_flow_item *item)
3152 {
3153         const struct rte_flow_item_meta *meta_m;
3154         const struct rte_flow_item_meta *meta_v;
3155         void *misc2_m =
3156                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
3157         void *misc2_v =
3158                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
3159
3160         meta_m = (const void *)item->mask;
3161         if (!meta_m)
3162                 meta_m = &rte_flow_item_meta_mask;
3163         meta_v = (const void *)item->spec;
3164         if (meta_v) {
3165                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a,
3166                          rte_be_to_cpu_32(meta_m->data));
3167                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a,
3168                          rte_be_to_cpu_32(meta_v->data & meta_m->data));
3169         }
3170 }
3171
3172 /**
3173  * Add source vport match to the specified matcher.
3174  *
3175  * @param[in, out] matcher
3176  *   Flow matcher.
3177  * @param[in, out] key
3178  *   Flow matcher value.
3179  * @param[in] port
3180  *   Source vport value to match
3181  * @param[in] mask
3182  *   Mask
3183  */
3184 static void
3185 flow_dv_translate_item_source_vport(void *matcher, void *key,
3186                                     int16_t port, uint16_t mask)
3187 {
3188         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3189         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3190
3191         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
3192         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
3193 }
3194
3195 /**
3196  * Translate port-id item to eswitch match on  port-id.
3197  *
3198  * @param[in] dev
3199  *   The devich to configure through.
3200  * @param[in, out] matcher
3201  *   Flow matcher.
3202  * @param[in, out] key
3203  *   Flow matcher value.
3204  * @param[in] item
3205  *   Flow pattern to translate.
3206  *
3207  * @return
3208  *   0 on success, a negative errno value otherwise.
3209  */
3210 static int
3211 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
3212                                void *key, const struct rte_flow_item *item)
3213 {
3214         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
3215         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
3216         uint16_t mask, val, id;
3217         int ret;
3218
3219         mask = pid_m ? pid_m->id : 0xffff;
3220         id = pid_v ? pid_v->id : dev->data->port_id;
3221         ret = mlx5_port_to_eswitch_info(id, NULL, &val);
3222         if (ret)
3223                 return ret;
3224         flow_dv_translate_item_source_vport(matcher, key, val, mask);
3225         return 0;
3226 }
3227
3228 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
3229
3230 #define HEADER_IS_ZERO(match_criteria, headers)                              \
3231         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
3232                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
3233
3234 /**
3235  * Calculate flow matcher enable bitmap.
3236  *
3237  * @param match_criteria
3238  *   Pointer to flow matcher criteria.
3239  *
3240  * @return
3241  *   Bitmap of enabled fields.
3242  */
3243 static uint8_t
3244 flow_dv_matcher_enable(uint32_t *match_criteria)
3245 {
3246         uint8_t match_criteria_enable;
3247
3248         match_criteria_enable =
3249                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
3250                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
3251         match_criteria_enable |=
3252                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
3253                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
3254         match_criteria_enable |=
3255                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
3256                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
3257         match_criteria_enable |=
3258                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
3259                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
3260 #ifdef HAVE_MLX5DV_DR
3261         match_criteria_enable |=
3262                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
3263                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
3264 #endif
3265         return match_criteria_enable;
3266 }
3267
3268
3269 /**
3270  * Get a flow table.
3271  *
3272  * @param dev[in, out]
3273  *   Pointer to rte_eth_dev structure.
3274  * @param[in] table_id
3275  *   Table id to use.
3276  * @param[in] egress
3277  *   Direction of the table.
3278  * @param[in] transfer
3279  *   E-Switch or NIC flow.
3280  * @param[out] error
3281  *   pointer to error structure.
3282  *
3283  * @return
3284  *   Returns tables resource based on the index, NULL in case of failed.
3285  */
3286 static struct mlx5_flow_tbl_resource *
3287 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
3288                          uint32_t table_id, uint8_t egress,
3289                          uint8_t transfer,
3290                          struct rte_flow_error *error)
3291 {
3292         struct mlx5_priv *priv = dev->data->dev_private;
3293         struct mlx5_ibv_shared *sh = priv->sh;
3294         struct mlx5_flow_tbl_resource *tbl;
3295
3296 #ifdef HAVE_MLX5DV_DR
3297         if (transfer) {
3298                 tbl = &sh->fdb_tbl[table_id];
3299                 if (!tbl->obj)
3300                         tbl->obj = mlx5_glue->dr_create_flow_tbl
3301                                 (sh->fdb_ns, table_id);
3302         } else if (egress) {
3303                 tbl = &sh->tx_tbl[table_id];
3304                 if (!tbl->obj)
3305                         tbl->obj = mlx5_glue->dr_create_flow_tbl
3306                                 (sh->tx_ns, table_id);
3307         } else {
3308                 tbl = &sh->rx_tbl[table_id];
3309                 if (!tbl->obj)
3310                         tbl->obj = mlx5_glue->dr_create_flow_tbl
3311                                 (sh->rx_ns, table_id);
3312         }
3313         if (!tbl->obj) {
3314                 rte_flow_error_set(error, ENOMEM,
3315                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3316                                    NULL, "cannot create table");
3317                 return NULL;
3318         }
3319         rte_atomic32_inc(&tbl->refcnt);
3320         return tbl;
3321 #else
3322         (void)error;
3323         (void)tbl;
3324         if (transfer)
3325                 return &sh->fdb_tbl[table_id];
3326         else if (egress)
3327                 return &sh->tx_tbl[table_id];
3328         else
3329                 return &sh->rx_tbl[table_id];
3330 #endif
3331 }
3332
3333 /**
3334  * Release a flow table.
3335  *
3336  * @param[in] tbl
3337  *   Table resource to be released.
3338  *
3339  * @return
3340  *   Returns 0 if table was released, else return 1;
3341  */
3342 static int
3343 flow_dv_tbl_resource_release(struct mlx5_flow_tbl_resource *tbl)
3344 {
3345         if (!tbl)
3346                 return 0;
3347         if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
3348                 mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
3349                 tbl->obj = NULL;
3350                 return 0;
3351         }
3352         return 1;
3353 }
3354
3355 /**
3356  * Register the flow matcher.
3357  *
3358  * @param dev[in, out]
3359  *   Pointer to rte_eth_dev structure.
3360  * @param[in, out] matcher
3361  *   Pointer to flow matcher.
3362  * @parm[in, out] dev_flow
3363  *   Pointer to the dev_flow.
3364  * @param[out] error
3365  *   pointer to error structure.
3366  *
3367  * @return
3368  *   0 on success otherwise -errno and errno is set.
3369  */
3370 static int
3371 flow_dv_matcher_register(struct rte_eth_dev *dev,
3372                          struct mlx5_flow_dv_matcher *matcher,
3373                          struct mlx5_flow *dev_flow,
3374                          struct rte_flow_error *error)
3375 {
3376         struct mlx5_priv *priv = dev->data->dev_private;
3377         struct mlx5_ibv_shared *sh = priv->sh;
3378         struct mlx5_flow_dv_matcher *cache_matcher;
3379         struct mlx5dv_flow_matcher_attr dv_attr = {
3380                 .type = IBV_FLOW_ATTR_NORMAL,
3381                 .match_mask = (void *)&matcher->mask,
3382         };
3383         struct mlx5_flow_tbl_resource *tbl = NULL;
3384
3385         /* Lookup from cache. */
3386         LIST_FOREACH(cache_matcher, &sh->matchers, next) {
3387                 if (matcher->crc == cache_matcher->crc &&
3388                     matcher->priority == cache_matcher->priority &&
3389                     matcher->egress == cache_matcher->egress &&
3390                     matcher->group == cache_matcher->group &&
3391                     matcher->transfer == cache_matcher->transfer &&
3392                     !memcmp((const void *)matcher->mask.buf,
3393                             (const void *)cache_matcher->mask.buf,
3394                             cache_matcher->mask.size)) {
3395                         DRV_LOG(DEBUG,
3396                                 "priority %hd use %s matcher %p: refcnt %d++",
3397                                 cache_matcher->priority,
3398                                 cache_matcher->egress ? "tx" : "rx",
3399                                 (void *)cache_matcher,
3400                                 rte_atomic32_read(&cache_matcher->refcnt));
3401                         rte_atomic32_inc(&cache_matcher->refcnt);
3402                         dev_flow->dv.matcher = cache_matcher;
3403                         return 0;
3404                 }
3405         }
3406         /* Register new matcher. */
3407         cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
3408         if (!cache_matcher)
3409                 return rte_flow_error_set(error, ENOMEM,
3410                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3411                                           "cannot allocate matcher memory");
3412         tbl = flow_dv_tbl_resource_get(dev, matcher->group * MLX5_GROUP_FACTOR,
3413                                        matcher->egress, matcher->transfer,
3414                                        error);
3415         if (!tbl) {
3416                 rte_free(cache_matcher);
3417                 return rte_flow_error_set(error, ENOMEM,
3418                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3419                                           NULL, "cannot create table");
3420         }
3421         *cache_matcher = *matcher;
3422         dv_attr.match_criteria_enable =
3423                 flow_dv_matcher_enable(cache_matcher->mask.buf);
3424         dv_attr.priority = matcher->priority;
3425         if (matcher->egress)
3426                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
3427         cache_matcher->matcher_object =
3428                 mlx5_glue->dv_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj);
3429         if (!cache_matcher->matcher_object) {
3430                 rte_free(cache_matcher);
3431 #ifdef HAVE_MLX5DV_DR
3432                 flow_dv_tbl_resource_release(tbl);
3433 #endif
3434                 return rte_flow_error_set(error, ENOMEM,
3435                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3436                                           NULL, "cannot create matcher");
3437         }
3438         rte_atomic32_inc(&cache_matcher->refcnt);
3439         LIST_INSERT_HEAD(&sh->matchers, cache_matcher, next);
3440         dev_flow->dv.matcher = cache_matcher;
3441         DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
3442                 cache_matcher->priority,
3443                 cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
3444                 rte_atomic32_read(&cache_matcher->refcnt));
3445         rte_atomic32_inc(&tbl->refcnt);
3446         return 0;
3447 }
3448
3449 /**
3450  * Find existing tag resource or create and register a new one.
3451  *
3452  * @param dev[in, out]
3453  *   Pointer to rte_eth_dev structure.
3454  * @param[in, out] resource
3455  *   Pointer to tag resource.
3456  * @parm[in, out] dev_flow
3457  *   Pointer to the dev_flow.
3458  * @param[out] error
3459  *   pointer to error structure.
3460  *
3461  * @return
3462  *   0 on success otherwise -errno and errno is set.
3463  */
3464 static int
3465 flow_dv_tag_resource_register
3466                         (struct rte_eth_dev *dev,
3467                          struct mlx5_flow_dv_tag_resource *resource,
3468                          struct mlx5_flow *dev_flow,
3469                          struct rte_flow_error *error)
3470 {
3471         struct mlx5_priv *priv = dev->data->dev_private;
3472         struct mlx5_ibv_shared *sh = priv->sh;
3473         struct mlx5_flow_dv_tag_resource *cache_resource;
3474
3475         /* Lookup a matching resource from cache. */
3476         LIST_FOREACH(cache_resource, &sh->tags, next) {
3477                 if (resource->tag == cache_resource->tag) {
3478                         DRV_LOG(DEBUG, "tag resource %p: refcnt %d++",
3479                                 (void *)cache_resource,
3480                                 rte_atomic32_read(&cache_resource->refcnt));
3481                         rte_atomic32_inc(&cache_resource->refcnt);
3482                         dev_flow->flow->tag_resource = cache_resource;
3483                         return 0;
3484                 }
3485         }
3486         /* Register new  resource. */
3487         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
3488         if (!cache_resource)
3489                 return rte_flow_error_set(error, ENOMEM,
3490                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3491                                           "cannot allocate resource memory");
3492         *cache_resource = *resource;
3493         cache_resource->action = mlx5_glue->dv_create_flow_action_tag
3494                 (resource->tag);
3495         if (!cache_resource->action) {
3496                 rte_free(cache_resource);
3497                 return rte_flow_error_set(error, ENOMEM,
3498                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3499                                           NULL, "cannot create action");
3500         }
3501         rte_atomic32_init(&cache_resource->refcnt);
3502         rte_atomic32_inc(&cache_resource->refcnt);
3503         LIST_INSERT_HEAD(&sh->tags, cache_resource, next);
3504         dev_flow->flow->tag_resource = cache_resource;
3505         DRV_LOG(DEBUG, "new tag resource %p: refcnt %d++",
3506                 (void *)cache_resource,
3507                 rte_atomic32_read(&cache_resource->refcnt));
3508         return 0;
3509 }
3510
3511 /**
3512  * Release the tag.
3513  *
3514  * @param dev
3515  *   Pointer to Ethernet device.
3516  * @param flow
3517  *   Pointer to mlx5_flow.
3518  *
3519  * @return
3520  *   1 while a reference on it exists, 0 when freed.
3521  */
3522 static int
3523 flow_dv_tag_release(struct rte_eth_dev *dev,
3524                     struct mlx5_flow_dv_tag_resource *tag)
3525 {
3526         assert(tag);
3527         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
3528                 dev->data->port_id, (void *)tag,
3529                 rte_atomic32_read(&tag->refcnt));
3530         if (rte_atomic32_dec_and_test(&tag->refcnt)) {
3531                 claim_zero(mlx5_glue->destroy_flow_action(tag->action));
3532                 LIST_REMOVE(tag, next);
3533                 DRV_LOG(DEBUG, "port %u tag %p: removed",
3534                         dev->data->port_id, (void *)tag);
3535                 rte_free(tag);
3536                 return 0;
3537         }
3538         return 1;
3539 }
3540
3541 /**
3542  * Translate port ID action to vport.
3543  *
3544  * @param[in] dev
3545  *   Pointer to rte_eth_dev structure.
3546  * @param[in] action
3547  *   Pointer to the port ID action.
3548  * @param[out] dst_port_id
3549  *   The target port ID.
3550  * @param[out] error
3551  *   Pointer to the error structure.
3552  *
3553  * @return
3554  *   0 on success, a negative errno value otherwise and rte_errno is set.
3555  */
3556 static int
3557 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
3558                                  const struct rte_flow_action *action,
3559                                  uint32_t *dst_port_id,
3560                                  struct rte_flow_error *error)
3561 {
3562         uint32_t port;
3563         uint16_t port_id;
3564         int ret;
3565         const struct rte_flow_action_port_id *conf =
3566                         (const struct rte_flow_action_port_id *)action->conf;
3567
3568         port = conf->original ? dev->data->port_id : conf->id;
3569         ret = mlx5_port_to_eswitch_info(port, NULL, &port_id);
3570         if (ret)
3571                 return rte_flow_error_set(error, -ret,
3572                                           RTE_FLOW_ERROR_TYPE_ACTION,
3573                                           NULL,
3574                                           "No eswitch info was found for port");
3575         *dst_port_id = port_id;
3576         return 0;
3577 }
3578
3579 /**
3580  * Fill the flow with DV spec.
3581  *
3582  * @param[in] dev
3583  *   Pointer to rte_eth_dev structure.
3584  * @param[in, out] dev_flow
3585  *   Pointer to the sub flow.
3586  * @param[in] attr
3587  *   Pointer to the flow attributes.
3588  * @param[in] items
3589  *   Pointer to the list of items.
3590  * @param[in] actions
3591  *   Pointer to the list of actions.
3592  * @param[out] error
3593  *   Pointer to the error structure.
3594  *
3595  * @return
3596  *   0 on success, a negative errno value otherwise and rte_errno is set.
3597  */
3598 static int
3599 flow_dv_translate(struct rte_eth_dev *dev,
3600                   struct mlx5_flow *dev_flow,
3601                   const struct rte_flow_attr *attr,
3602                   const struct rte_flow_item items[],
3603                   const struct rte_flow_action actions[],
3604                   struct rte_flow_error *error)
3605 {
3606         struct mlx5_priv *priv = dev->data->dev_private;
3607         struct rte_flow *flow = dev_flow->flow;
3608         uint64_t item_flags = 0;
3609         uint64_t last_item = 0;
3610         uint64_t action_flags = 0;
3611         uint64_t priority = attr->priority;
3612         struct mlx5_flow_dv_matcher matcher = {
3613                 .mask = {
3614                         .size = sizeof(matcher.mask.buf),
3615                 },
3616         };
3617         int actions_n = 0;
3618         bool actions_end = false;
3619         struct mlx5_flow_dv_modify_hdr_resource res = {
3620                 .ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3621                                           MLX5DV_FLOW_TABLE_TYPE_NIC_RX
3622         };
3623         union flow_dv_attr flow_attr = { .attr = 0 };
3624         struct mlx5_flow_dv_tag_resource tag_resource;
3625         uint32_t modify_action_position = UINT32_MAX;
3626
3627         if (attr->transfer)
3628                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3629         if (priority == MLX5_FLOW_PRIO_RSVD)
3630                 priority = priv->config.flow_prio - 1;
3631         for (; !actions_end ; actions++) {
3632                 const struct rte_flow_action_queue *queue;
3633                 const struct rte_flow_action_rss *rss;
3634                 const struct rte_flow_action *action = actions;
3635                 const struct rte_flow_action_count *count = action->conf;
3636                 const uint8_t *rss_key;
3637                 const struct rte_flow_action_jump *jump_data;
3638                 struct mlx5_flow_dv_jump_tbl_resource jump_tbl_resource;
3639                 struct mlx5_flow_tbl_resource *tbl;
3640                 uint32_t port_id = 0;
3641                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
3642
3643                 switch (actions->type) {
3644                 case RTE_FLOW_ACTION_TYPE_VOID:
3645                         break;
3646                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
3647                         if (flow_dv_translate_action_port_id(dev, action,
3648                                                              &port_id, error))
3649                                 return -rte_errno;
3650                         port_id_resource.port_id = port_id;
3651                         if (flow_dv_port_id_action_resource_register
3652                             (dev, &port_id_resource, dev_flow, error))
3653                                 return -rte_errno;
3654                         dev_flow->dv.actions[actions_n++] =
3655                                 dev_flow->dv.port_id_action->action;
3656                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
3657                         break;
3658                 case RTE_FLOW_ACTION_TYPE_FLAG:
3659                         tag_resource.tag =
3660                                 mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
3661                         if (!flow->tag_resource)
3662                                 if (flow_dv_tag_resource_register
3663                                     (dev, &tag_resource, dev_flow, error))
3664                                         return errno;
3665                         dev_flow->dv.actions[actions_n++] =
3666                                 flow->tag_resource->action;
3667                         action_flags |= MLX5_FLOW_ACTION_FLAG;
3668                         break;
3669                 case RTE_FLOW_ACTION_TYPE_MARK:
3670                         tag_resource.tag = mlx5_flow_mark_set
3671                               (((const struct rte_flow_action_mark *)
3672                                (actions->conf))->id);
3673                         if (!flow->tag_resource)
3674                                 if (flow_dv_tag_resource_register
3675                                     (dev, &tag_resource, dev_flow, error))
3676                                         return errno;
3677                         dev_flow->dv.actions[actions_n++] =
3678                                 flow->tag_resource->action;
3679                         action_flags |= MLX5_FLOW_ACTION_MARK;
3680                         break;
3681                 case RTE_FLOW_ACTION_TYPE_DROP:
3682                         action_flags |= MLX5_FLOW_ACTION_DROP;
3683                         break;
3684                 case RTE_FLOW_ACTION_TYPE_QUEUE:
3685                         queue = actions->conf;
3686                         flow->rss.queue_num = 1;
3687                         (*flow->queue)[0] = queue->index;
3688                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
3689                         break;
3690                 case RTE_FLOW_ACTION_TYPE_RSS:
3691                         rss = actions->conf;
3692                         if (flow->queue)
3693                                 memcpy((*flow->queue), rss->queue,
3694                                        rss->queue_num * sizeof(uint16_t));
3695                         flow->rss.queue_num = rss->queue_num;
3696                         /* NULL RSS key indicates default RSS key. */
3697                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
3698                         memcpy(flow->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
3699                         /* RSS type 0 indicates default RSS type ETH_RSS_IP. */
3700                         flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
3701                         flow->rss.level = rss->level;
3702                         action_flags |= MLX5_FLOW_ACTION_RSS;
3703                         break;
3704                 case RTE_FLOW_ACTION_TYPE_COUNT:
3705                         if (!priv->config.devx) {
3706                                 rte_errno = ENOTSUP;
3707                                 goto cnt_err;
3708                         }
3709                         flow->counter = flow_dv_counter_new(dev, count->shared,
3710                                                             count->id);
3711                         if (flow->counter == NULL)
3712                                 goto cnt_err;
3713                         dev_flow->dv.actions[actions_n++] =
3714                                 flow->counter->action;
3715                         action_flags |= MLX5_FLOW_ACTION_COUNT;
3716                         break;
3717 cnt_err:
3718                         if (rte_errno == ENOTSUP)
3719                                 return rte_flow_error_set
3720                                               (error, ENOTSUP,
3721                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3722                                                NULL,
3723                                                "count action not supported");
3724                         else
3725                                 return rte_flow_error_set
3726                                                 (error, rte_errno,
3727                                                  RTE_FLOW_ERROR_TYPE_ACTION,
3728                                                  action,
3729                                                  "cannot create counter"
3730                                                   " object.");
3731                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3732                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3733                         if (flow_dv_create_action_l2_encap(dev, actions,
3734                                                            dev_flow,
3735                                                            attr->transfer,
3736                                                            error))
3737                                 return -rte_errno;
3738                         dev_flow->dv.actions[actions_n++] =
3739                                 dev_flow->dv.encap_decap->verbs_action;
3740                         action_flags |= actions->type ==
3741                                         RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
3742                                         MLX5_FLOW_ACTION_VXLAN_ENCAP :
3743                                         MLX5_FLOW_ACTION_NVGRE_ENCAP;
3744                         break;
3745                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
3746                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
3747                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
3748                                                            attr->transfer,
3749                                                            error))
3750                                 return -rte_errno;
3751                         dev_flow->dv.actions[actions_n++] =
3752                                 dev_flow->dv.encap_decap->verbs_action;
3753                         action_flags |= actions->type ==
3754                                         RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
3755                                         MLX5_FLOW_ACTION_VXLAN_DECAP :
3756                                         MLX5_FLOW_ACTION_NVGRE_DECAP;
3757                         break;
3758                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3759                         /* Handle encap with preceding decap. */
3760                         if (action_flags & MLX5_FLOW_ACTION_RAW_DECAP) {
3761                                 if (flow_dv_create_action_raw_encap
3762                                         (dev, actions, dev_flow, attr, error))
3763                                         return -rte_errno;
3764                                 dev_flow->dv.actions[actions_n++] =
3765                                         dev_flow->dv.encap_decap->verbs_action;
3766                         } else {
3767                                 /* Handle encap without preceding decap. */
3768                                 if (flow_dv_create_action_l2_encap
3769                                     (dev, actions, dev_flow, attr->transfer,
3770                                      error))
3771                                         return -rte_errno;
3772                                 dev_flow->dv.actions[actions_n++] =
3773                                         dev_flow->dv.encap_decap->verbs_action;
3774                         }
3775                         action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
3776                         break;
3777                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
3778                         /* Check if this decap is followed by encap. */
3779                         for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
3780                                action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
3781                                action++) {
3782                         }
3783                         /* Handle decap only if it isn't followed by encap. */
3784                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
3785                                 if (flow_dv_create_action_l2_decap
3786                                     (dev, dev_flow, attr->transfer, error))
3787                                         return -rte_errno;
3788                                 dev_flow->dv.actions[actions_n++] =
3789                                         dev_flow->dv.encap_decap->verbs_action;
3790                         }
3791                         /* If decap is followed by encap, handle it at encap. */
3792                         action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
3793                         break;
3794                 case RTE_FLOW_ACTION_TYPE_JUMP:
3795                         jump_data = action->conf;
3796                         tbl = flow_dv_tbl_resource_get(dev, jump_data->group *
3797                                                        MLX5_GROUP_FACTOR,
3798                                                        attr->egress,
3799                                                        attr->transfer, error);
3800                         if (!tbl)
3801                                 return rte_flow_error_set
3802                                                 (error, errno,
3803                                                  RTE_FLOW_ERROR_TYPE_ACTION,
3804                                                  NULL,
3805                                                  "cannot create jump action.");
3806                         jump_tbl_resource.tbl = tbl;
3807                         if (flow_dv_jump_tbl_resource_register
3808                             (dev, &jump_tbl_resource, dev_flow, error)) {
3809                                 flow_dv_tbl_resource_release(tbl);
3810                                 return rte_flow_error_set
3811                                                 (error, errno,
3812                                                  RTE_FLOW_ERROR_TYPE_ACTION,
3813                                                  NULL,
3814                                                  "cannot create jump action.");
3815                         }
3816                         dev_flow->dv.actions[actions_n++] =
3817                                 dev_flow->dv.jump->action;
3818                         action_flags |= MLX5_FLOW_ACTION_JUMP;
3819                         break;
3820                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
3821                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
3822                         if (flow_dv_convert_action_modify_mac(&res, actions,
3823                                                               error))
3824                                 return -rte_errno;
3825                         action_flags |= actions->type ==
3826                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
3827                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
3828                                         MLX5_FLOW_ACTION_SET_MAC_DST;
3829                         break;
3830                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
3831                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
3832                         if (flow_dv_convert_action_modify_ipv4(&res, actions,
3833                                                                error))
3834                                 return -rte_errno;
3835                         action_flags |= actions->type ==
3836                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
3837                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
3838                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
3839                         break;
3840                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
3841                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
3842                         if (flow_dv_convert_action_modify_ipv6(&res, actions,
3843                                                                error))
3844                                 return -rte_errno;
3845                         action_flags |= actions->type ==
3846                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
3847                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
3848                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
3849                         break;
3850                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
3851                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
3852                         if (flow_dv_convert_action_modify_tp(&res, actions,
3853                                                              items, &flow_attr,
3854                                                              error))
3855                                 return -rte_errno;
3856                         action_flags |= actions->type ==
3857                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
3858                                         MLX5_FLOW_ACTION_SET_TP_SRC :
3859                                         MLX5_FLOW_ACTION_SET_TP_DST;
3860                         break;
3861                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
3862                         if (flow_dv_convert_action_modify_dec_ttl(&res, items,
3863                                                                   &flow_attr,
3864                                                                   error))
3865                                 return -rte_errno;
3866                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
3867                         break;
3868                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
3869                         if (flow_dv_convert_action_modify_ttl(&res, actions,
3870                                                              items, &flow_attr,
3871                                                              error))
3872                                 return -rte_errno;
3873                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
3874                         break;
3875                 case RTE_FLOW_ACTION_TYPE_END:
3876                         actions_end = true;
3877                         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) {
3878                                 /* create modify action if needed. */
3879                                 if (flow_dv_modify_hdr_resource_register
3880                                                                 (dev, &res,
3881                                                                  dev_flow,
3882                                                                  error))
3883                                         return -rte_errno;
3884                                 dev_flow->dv.actions[modify_action_position] =
3885                                         dev_flow->dv.modify_hdr->verbs_action;
3886                         }
3887                         break;
3888                 default:
3889                         break;
3890                 }
3891                 if ((action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) &&
3892                     modify_action_position == UINT32_MAX)
3893                         modify_action_position = actions_n++;
3894         }
3895         dev_flow->dv.actions_n = actions_n;
3896         flow->actions = action_flags;
3897         if (attr->ingress && !attr->transfer &&
3898             (priv->representor || priv->master)) {
3899                 /* It was validated - we support unidirection flows only. */
3900                 assert(!attr->egress);
3901                 /*
3902                  * Add matching on source vport index only
3903                  * for ingress rules in E-Switch configurations.
3904                  */
3905                 flow_dv_translate_item_source_vport(matcher.mask.buf,
3906                                                     dev_flow->dv.value.buf,
3907                                                     priv->vport_id,
3908                                                     0xffff);
3909         }
3910         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3911                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
3912                 void *match_mask = matcher.mask.buf;
3913                 void *match_value = dev_flow->dv.value.buf;
3914
3915                 switch (items->type) {
3916                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
3917                         flow_dv_translate_item_port_id(dev, match_mask,
3918                                                        match_value, items);
3919                         last_item = MLX5_FLOW_ITEM_PORT_ID;
3920                         break;
3921                 case RTE_FLOW_ITEM_TYPE_ETH:
3922                         flow_dv_translate_item_eth(match_mask, match_value,
3923                                                    items, tunnel);
3924                         matcher.priority = MLX5_PRIORITY_MAP_L2;
3925                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
3926                                              MLX5_FLOW_LAYER_OUTER_L2;
3927                         break;
3928                 case RTE_FLOW_ITEM_TYPE_VLAN:
3929                         flow_dv_translate_item_vlan(match_mask, match_value,
3930                                                     items, tunnel);
3931                         matcher.priority = MLX5_PRIORITY_MAP_L2;
3932                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
3933                                               MLX5_FLOW_LAYER_INNER_VLAN) :
3934                                              (MLX5_FLOW_LAYER_OUTER_L2 |
3935                                               MLX5_FLOW_LAYER_OUTER_VLAN);
3936                         break;
3937                 case RTE_FLOW_ITEM_TYPE_IPV4:
3938                         flow_dv_translate_item_ipv4(match_mask, match_value,
3939                                                     items, tunnel, attr->group);
3940                         matcher.priority = MLX5_PRIORITY_MAP_L3;
3941                         dev_flow->dv.hash_fields |=
3942                                 mlx5_flow_hashfields_adjust
3943                                         (dev_flow, tunnel,
3944                                          MLX5_IPV4_LAYER_TYPES,
3945                                          MLX5_IPV4_IBV_RX_HASH);
3946                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
3947                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3948                         break;
3949                 case RTE_FLOW_ITEM_TYPE_IPV6:
3950                         flow_dv_translate_item_ipv6(match_mask, match_value,
3951                                                     items, tunnel, attr->group);
3952                         matcher.priority = MLX5_PRIORITY_MAP_L3;
3953                         dev_flow->dv.hash_fields |=
3954                                 mlx5_flow_hashfields_adjust
3955                                         (dev_flow, tunnel,
3956                                          MLX5_IPV6_LAYER_TYPES,
3957                                          MLX5_IPV6_IBV_RX_HASH);
3958                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
3959                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3960                         break;
3961                 case RTE_FLOW_ITEM_TYPE_TCP:
3962                         flow_dv_translate_item_tcp(match_mask, match_value,
3963                                                    items, tunnel);
3964                         matcher.priority = MLX5_PRIORITY_MAP_L4;
3965                         dev_flow->dv.hash_fields |=
3966                                 mlx5_flow_hashfields_adjust
3967                                         (dev_flow, tunnel, ETH_RSS_TCP,
3968                                          IBV_RX_HASH_SRC_PORT_TCP |
3969                                          IBV_RX_HASH_DST_PORT_TCP);
3970                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
3971                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
3972                         break;
3973                 case RTE_FLOW_ITEM_TYPE_UDP:
3974                         flow_dv_translate_item_udp(match_mask, match_value,
3975                                                    items, tunnel);
3976                         matcher.priority = MLX5_PRIORITY_MAP_L4;
3977                         dev_flow->dv.hash_fields |=
3978                                 mlx5_flow_hashfields_adjust
3979                                         (dev_flow, tunnel, ETH_RSS_UDP,
3980                                          IBV_RX_HASH_SRC_PORT_UDP |
3981                                          IBV_RX_HASH_DST_PORT_UDP);
3982                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
3983                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
3984                         break;
3985                 case RTE_FLOW_ITEM_TYPE_GRE:
3986                         flow_dv_translate_item_gre(match_mask, match_value,
3987                                                    items, tunnel);
3988                         last_item = MLX5_FLOW_LAYER_GRE;
3989                         break;
3990                 case RTE_FLOW_ITEM_TYPE_NVGRE:
3991                         flow_dv_translate_item_nvgre(match_mask, match_value,
3992                                                      items, tunnel);
3993                         last_item = MLX5_FLOW_LAYER_GRE;
3994                         break;
3995                 case RTE_FLOW_ITEM_TYPE_VXLAN:
3996                         flow_dv_translate_item_vxlan(match_mask, match_value,
3997                                                      items, tunnel);
3998                         last_item = MLX5_FLOW_LAYER_VXLAN;
3999                         break;
4000                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4001                         flow_dv_translate_item_vxlan(match_mask, match_value,
4002                                                      items, tunnel);
4003                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
4004                         break;
4005                 case RTE_FLOW_ITEM_TYPE_MPLS:
4006                         flow_dv_translate_item_mpls(match_mask, match_value,
4007                                                     items, last_item, tunnel);
4008                         last_item = MLX5_FLOW_LAYER_MPLS;
4009                         break;
4010                 case RTE_FLOW_ITEM_TYPE_META:
4011                         flow_dv_translate_item_meta(match_mask, match_value,
4012                                                     items);
4013                         last_item = MLX5_FLOW_ITEM_METADATA;
4014                         break;
4015                 default:
4016                         break;
4017                 }
4018                 item_flags |= last_item;
4019         }
4020         assert(!flow_dv_check_valid_spec(matcher.mask.buf,
4021                                          dev_flow->dv.value.buf));
4022         dev_flow->layers = item_flags;
4023         /* Register matcher. */
4024         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
4025                                     matcher.mask.size);
4026         matcher.priority = mlx5_flow_adjust_priority(dev, priority,
4027                                                      matcher.priority);
4028         matcher.egress = attr->egress;
4029         matcher.group = attr->group;
4030         matcher.transfer = attr->transfer;
4031         if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
4032                 return -rte_errno;
4033         return 0;
4034 }
4035
4036 /**
4037  * Apply the flow to the NIC.
4038  *
4039  * @param[in] dev
4040  *   Pointer to the Ethernet device structure.
4041  * @param[in, out] flow
4042  *   Pointer to flow structure.
4043  * @param[out] error
4044  *   Pointer to error structure.
4045  *
4046  * @return
4047  *   0 on success, a negative errno value otherwise and rte_errno is set.
4048  */
4049 static int
4050 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
4051               struct rte_flow_error *error)
4052 {
4053         struct mlx5_flow_dv *dv;
4054         struct mlx5_flow *dev_flow;
4055         struct mlx5_priv *priv = dev->data->dev_private;
4056         int n;
4057         int err;
4058
4059         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
4060                 dv = &dev_flow->dv;
4061                 n = dv->actions_n;
4062                 if (flow->actions & MLX5_FLOW_ACTION_DROP) {
4063                         if (flow->transfer) {
4064                                 dv->actions[n++] = priv->sh->esw_drop_action;
4065                         } else {
4066                                 dv->hrxq = mlx5_hrxq_drop_new(dev);
4067                                 if (!dv->hrxq) {
4068                                         rte_flow_error_set
4069                                                 (error, errno,
4070                                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4071                                                  NULL,
4072                                                  "cannot get drop hash queue");
4073                                         goto error;
4074                                 }
4075                                 dv->actions[n++] = dv->hrxq->action;
4076                         }
4077                 } else if (flow->actions &
4078                            (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
4079                         struct mlx5_hrxq *hrxq;
4080
4081                         hrxq = mlx5_hrxq_get(dev, flow->key,
4082                                              MLX5_RSS_HASH_KEY_LEN,
4083                                              dv->hash_fields,
4084                                              (*flow->queue),
4085                                              flow->rss.queue_num);
4086                         if (!hrxq)
4087                                 hrxq = mlx5_hrxq_new
4088                                         (dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
4089                                          dv->hash_fields, (*flow->queue),
4090                                          flow->rss.queue_num,
4091                                          !!(dev_flow->layers &
4092                                             MLX5_FLOW_LAYER_TUNNEL));
4093                         if (!hrxq) {
4094                                 rte_flow_error_set
4095                                         (error, rte_errno,
4096                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4097                                          "cannot get hash queue");
4098                                 goto error;
4099                         }
4100                         dv->hrxq = hrxq;
4101                         dv->actions[n++] = dv->hrxq->action;
4102                 }
4103                 dv->flow =
4104                         mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
4105                                                   (void *)&dv->value, n,
4106                                                   dv->actions);
4107                 if (!dv->flow) {
4108                         rte_flow_error_set(error, errno,
4109                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4110                                            NULL,
4111                                            "hardware refuses to create flow");
4112                         goto error;
4113                 }
4114         }
4115         return 0;
4116 error:
4117         err = rte_errno; /* Save rte_errno before cleanup. */
4118         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
4119                 struct mlx5_flow_dv *dv = &dev_flow->dv;
4120                 if (dv->hrxq) {
4121                         if (flow->actions & MLX5_FLOW_ACTION_DROP)
4122                                 mlx5_hrxq_drop_release(dev);
4123                         else
4124                                 mlx5_hrxq_release(dev, dv->hrxq);
4125                         dv->hrxq = NULL;
4126                 }
4127         }
4128         rte_errno = err; /* Restore rte_errno. */
4129         return -rte_errno;
4130 }
4131
4132 /**
4133  * Release the flow matcher.
4134  *
4135  * @param dev
4136  *   Pointer to Ethernet device.
4137  * @param flow
4138  *   Pointer to mlx5_flow.
4139  *
4140  * @return
4141  *   1 while a reference on it exists, 0 when freed.
4142  */
4143 static int
4144 flow_dv_matcher_release(struct rte_eth_dev *dev,
4145                         struct mlx5_flow *flow)
4146 {
4147         struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
4148         struct mlx5_priv *priv = dev->data->dev_private;
4149         struct mlx5_ibv_shared *sh = priv->sh;
4150         struct mlx5_flow_tbl_resource *tbl;
4151
4152         assert(matcher->matcher_object);
4153         DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
4154                 dev->data->port_id, (void *)matcher,
4155                 rte_atomic32_read(&matcher->refcnt));
4156         if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
4157                 claim_zero(mlx5_glue->dv_destroy_flow_matcher
4158                            (matcher->matcher_object));
4159                 LIST_REMOVE(matcher, next);
4160                 if (matcher->egress)
4161                         tbl = &sh->tx_tbl[matcher->group];
4162                 else
4163                         tbl = &sh->rx_tbl[matcher->group];
4164                 flow_dv_tbl_resource_release(tbl);
4165                 rte_free(matcher);
4166                 DRV_LOG(DEBUG, "port %u matcher %p: removed",
4167                         dev->data->port_id, (void *)matcher);
4168                 return 0;
4169         }
4170         return 1;
4171 }
4172
4173 /**
4174  * Release an encap/decap resource.
4175  *
4176  * @param flow
4177  *   Pointer to mlx5_flow.
4178  *
4179  * @return
4180  *   1 while a reference on it exists, 0 when freed.
4181  */
4182 static int
4183 flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
4184 {
4185         struct mlx5_flow_dv_encap_decap_resource *cache_resource =
4186                                                 flow->dv.encap_decap;
4187
4188         assert(cache_resource->verbs_action);
4189         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
4190                 (void *)cache_resource,
4191                 rte_atomic32_read(&cache_resource->refcnt));
4192         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
4193                 claim_zero(mlx5_glue->destroy_flow_action
4194                                 (cache_resource->verbs_action));
4195                 LIST_REMOVE(cache_resource, next);
4196                 rte_free(cache_resource);
4197                 DRV_LOG(DEBUG, "encap/decap resource %p: removed",
4198                         (void *)cache_resource);
4199                 return 0;
4200         }
4201         return 1;
4202 }
4203
4204 /**
4205  * Release an jump to table action resource.
4206  *
4207  * @param flow
4208  *   Pointer to mlx5_flow.
4209  *
4210  * @return
4211  *   1 while a reference on it exists, 0 when freed.
4212  */
4213 static int
4214 flow_dv_jump_tbl_resource_release(struct mlx5_flow *flow)
4215 {
4216         struct mlx5_flow_dv_jump_tbl_resource *cache_resource =
4217                                                 flow->dv.jump;
4218
4219         assert(cache_resource->action);
4220         DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--",
4221                 (void *)cache_resource,
4222                 rte_atomic32_read(&cache_resource->refcnt));
4223         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
4224                 claim_zero(mlx5_glue->destroy_flow_action
4225                                 (cache_resource->action));
4226                 LIST_REMOVE(cache_resource, next);
4227                 flow_dv_tbl_resource_release(cache_resource->tbl);
4228                 rte_free(cache_resource);
4229                 DRV_LOG(DEBUG, "jump table resource %p: removed",
4230                         (void *)cache_resource);
4231                 return 0;
4232         }
4233         return 1;
4234 }
4235
4236 /**
4237  * Release a modify-header resource.
4238  *
4239  * @param flow
4240  *   Pointer to mlx5_flow.
4241  *
4242  * @return
4243  *   1 while a reference on it exists, 0 when freed.
4244  */
4245 static int
4246 flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
4247 {
4248         struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
4249                                                 flow->dv.modify_hdr;
4250
4251         assert(cache_resource->verbs_action);
4252         DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
4253                 (void *)cache_resource,
4254                 rte_atomic32_read(&cache_resource->refcnt));
4255         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
4256                 claim_zero(mlx5_glue->destroy_flow_action
4257                                 (cache_resource->verbs_action));
4258                 LIST_REMOVE(cache_resource, next);
4259                 rte_free(cache_resource);
4260                 DRV_LOG(DEBUG, "modify-header resource %p: removed",
4261                         (void *)cache_resource);
4262                 return 0;
4263         }
4264         return 1;
4265 }
4266
4267 /**
4268  * Release port ID action resource.
4269  *
4270  * @param flow
4271  *   Pointer to mlx5_flow.
4272  *
4273  * @return
4274  *   1 while a reference on it exists, 0 when freed.
4275  */
4276 static int
4277 flow_dv_port_id_action_resource_release(struct mlx5_flow *flow)
4278 {
4279         struct mlx5_flow_dv_port_id_action_resource *cache_resource =
4280                 flow->dv.port_id_action;
4281
4282         assert(cache_resource->action);
4283         DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--",
4284                 (void *)cache_resource,
4285                 rte_atomic32_read(&cache_resource->refcnt));
4286         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
4287                 claim_zero(mlx5_glue->destroy_flow_action
4288                                 (cache_resource->action));
4289                 LIST_REMOVE(cache_resource, next);
4290                 rte_free(cache_resource);
4291                 DRV_LOG(DEBUG, "port id action resource %p: removed",
4292                         (void *)cache_resource);
4293                 return 0;
4294         }
4295         return 1;
4296 }
4297
4298 /**
4299  * Remove the flow from the NIC but keeps it in memory.
4300  *
4301  * @param[in] dev
4302  *   Pointer to Ethernet device.
4303  * @param[in, out] flow
4304  *   Pointer to flow structure.
4305  */
4306 static void
4307 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
4308 {
4309         struct mlx5_flow_dv *dv;
4310         struct mlx5_flow *dev_flow;
4311
4312         if (!flow)
4313                 return;
4314         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
4315                 dv = &dev_flow->dv;
4316                 if (dv->flow) {
4317                         claim_zero(mlx5_glue->dv_destroy_flow(dv->flow));
4318                         dv->flow = NULL;
4319                 }
4320                 if (dv->hrxq) {
4321                         if (flow->actions & MLX5_FLOW_ACTION_DROP)
4322                                 mlx5_hrxq_drop_release(dev);
4323                         else
4324                                 mlx5_hrxq_release(dev, dv->hrxq);
4325                         dv->hrxq = NULL;
4326                 }
4327         }
4328 }
4329
4330 /**
4331  * Remove the flow from the NIC and the memory.
4332  *
4333  * @param[in] dev
4334  *   Pointer to the Ethernet device structure.
4335  * @param[in, out] flow
4336  *   Pointer to flow structure.
4337  */
4338 static void
4339 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
4340 {
4341         struct mlx5_flow *dev_flow;
4342
4343         if (!flow)
4344                 return;
4345         flow_dv_remove(dev, flow);
4346         if (flow->counter) {
4347                 flow_dv_counter_release(flow->counter);
4348                 flow->counter = NULL;
4349         }
4350         if (flow->tag_resource) {
4351                 flow_dv_tag_release(dev, flow->tag_resource);
4352                 flow->tag_resource = NULL;
4353         }
4354         while (!LIST_EMPTY(&flow->dev_flows)) {
4355                 dev_flow = LIST_FIRST(&flow->dev_flows);
4356                 LIST_REMOVE(dev_flow, next);
4357                 if (dev_flow->dv.matcher)
4358                         flow_dv_matcher_release(dev, dev_flow);
4359                 if (dev_flow->dv.encap_decap)
4360                         flow_dv_encap_decap_resource_release(dev_flow);
4361                 if (dev_flow->dv.modify_hdr)
4362                         flow_dv_modify_hdr_resource_release(dev_flow);
4363                 if (dev_flow->dv.jump)
4364                         flow_dv_jump_tbl_resource_release(dev_flow);
4365                 if (dev_flow->dv.port_id_action)
4366                         flow_dv_port_id_action_resource_release(dev_flow);
4367                 rte_free(dev_flow);
4368         }
4369 }
4370
4371 /**
4372  * Query a dv flow  rule for its statistics via devx.
4373  *
4374  * @param[in] dev
4375  *   Pointer to Ethernet device.
4376  * @param[in] flow
4377  *   Pointer to the sub flow.
4378  * @param[out] data
4379  *   data retrieved by the query.
4380  * @param[out] error
4381  *   Perform verbose error reporting if not NULL.
4382  *
4383  * @return
4384  *   0 on success, a negative errno value otherwise and rte_errno is set.
4385  */
4386 static int
4387 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
4388                     void *data, struct rte_flow_error *error)
4389 {
4390         struct mlx5_priv *priv = dev->data->dev_private;
4391         struct rte_flow_query_count *qc = data;
4392         uint64_t pkts = 0;
4393         uint64_t bytes = 0;
4394         int err;
4395
4396         if (!priv->config.devx)
4397                 return rte_flow_error_set(error, ENOTSUP,
4398                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4399                                           NULL,
4400                                           "counters are not supported");
4401         if (flow->counter) {
4402                 err = mlx5_devx_cmd_flow_counter_query
4403                                                 (flow->counter->dcs,
4404                                                  qc->reset, &pkts, &bytes);
4405                 if (err)
4406                         return rte_flow_error_set
4407                                 (error, err,
4408                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4409                                  NULL,
4410                                  "cannot read counters");
4411                 qc->hits_set = 1;
4412                 qc->bytes_set = 1;
4413                 qc->hits = pkts - flow->counter->hits;
4414                 qc->bytes = bytes - flow->counter->bytes;
4415                 if (qc->reset) {
4416                         flow->counter->hits = pkts;
4417                         flow->counter->bytes = bytes;
4418                 }
4419                 return 0;
4420         }
4421         return rte_flow_error_set(error, EINVAL,
4422                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4423                                   NULL,
4424                                   "counters are not available");
4425 }
4426
4427 /**
4428  * Query a flow.
4429  *
4430  * @see rte_flow_query()
4431  * @see rte_flow_ops
4432  */
4433 static int
4434 flow_dv_query(struct rte_eth_dev *dev,
4435               struct rte_flow *flow __rte_unused,
4436               const struct rte_flow_action *actions __rte_unused,
4437               void *data __rte_unused,
4438               struct rte_flow_error *error __rte_unused)
4439 {
4440         int ret = -EINVAL;
4441
4442         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
4443                 switch (actions->type) {
4444                 case RTE_FLOW_ACTION_TYPE_VOID:
4445                         break;
4446                 case RTE_FLOW_ACTION_TYPE_COUNT:
4447                         ret = flow_dv_query_count(dev, flow, data, error);
4448                         break;
4449                 default:
4450                         return rte_flow_error_set(error, ENOTSUP,
4451                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4452                                                   actions,
4453                                                   "action not supported");
4454                 }
4455         }
4456         return ret;
4457 }
4458
4459 /*
4460  * Mutex-protected thunk to flow_dv_translate().
4461  */
4462 static int
4463 flow_d_translate(struct rte_eth_dev *dev,
4464                  struct mlx5_flow *dev_flow,
4465                  const struct rte_flow_attr *attr,
4466                  const struct rte_flow_item items[],
4467                  const struct rte_flow_action actions[],
4468                  struct rte_flow_error *error)
4469 {
4470         int ret;
4471
4472         flow_d_shared_lock(dev);
4473         ret = flow_dv_translate(dev, dev_flow, attr, items, actions, error);
4474         flow_d_shared_unlock(dev);
4475         return ret;
4476 }
4477
4478 /*
4479  * Mutex-protected thunk to flow_dv_apply().
4480  */
4481 static int
4482 flow_d_apply(struct rte_eth_dev *dev,
4483              struct rte_flow *flow,
4484              struct rte_flow_error *error)
4485 {
4486         int ret;
4487
4488         flow_d_shared_lock(dev);
4489         ret = flow_dv_apply(dev, flow, error);
4490         flow_d_shared_unlock(dev);
4491         return ret;
4492 }
4493
4494 /*
4495  * Mutex-protected thunk to flow_dv_remove().
4496  */
4497 static void
4498 flow_d_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
4499 {
4500         flow_d_shared_lock(dev);
4501         flow_dv_remove(dev, flow);
4502         flow_d_shared_unlock(dev);
4503 }
4504
4505 /*
4506  * Mutex-protected thunk to flow_dv_destroy().
4507  */
4508 static void
4509 flow_d_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
4510 {
4511         flow_d_shared_lock(dev);
4512         flow_dv_destroy(dev, flow);
4513         flow_d_shared_unlock(dev);
4514 }
4515
4516 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
4517         .validate = flow_dv_validate,
4518         .prepare = flow_dv_prepare,
4519         .translate = flow_d_translate,
4520         .apply = flow_d_apply,
4521         .remove = flow_d_remove,
4522         .destroy = flow_d_destroy,
4523         .query = flow_dv_query,
4524 };
4525
4526 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */