net: add rte prefix to ether defines
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9
10 /* Verbs header. */
11 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
12 #ifdef PEDANTIC
13 #pragma GCC diagnostic ignored "-Wpedantic"
14 #endif
15 #include <infiniband/verbs.h>
16 #ifdef PEDANTIC
17 #pragma GCC diagnostic error "-Wpedantic"
18 #endif
19
20 #include <rte_common.h>
21 #include <rte_ether.h>
22 #include <rte_ethdev_driver.h>
23 #include <rte_flow.h>
24 #include <rte_flow_driver.h>
25 #include <rte_malloc.h>
26 #include <rte_ip.h>
27 #include <rte_gre.h>
28
29 #include "mlx5.h"
30 #include "mlx5_defs.h"
31 #include "mlx5_glue.h"
32 #include "mlx5_flow.h"
33 #include "mlx5_prm.h"
34 #include "mlx5_rxtx.h"
35
36 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
37
38 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
39 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
40 #endif
41
42 #ifndef HAVE_MLX5DV_DR_ESWITCH
43 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
44 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
45 #endif
46 #endif
47
48 #ifndef HAVE_MLX5DV_DR
49 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
50 #endif
51
52 union flow_dv_attr {
53         struct {
54                 uint32_t valid:1;
55                 uint32_t ipv4:1;
56                 uint32_t ipv6:1;
57                 uint32_t tcp:1;
58                 uint32_t udp:1;
59                 uint32_t reserved:27;
60         };
61         uint32_t attr;
62 };
63
64 /**
65  * Initialize flow attributes structure according to flow items' types.
66  *
67  * @param[in] item
68  *   Pointer to item specification.
69  * @param[out] attr
70  *   Pointer to flow attributes structure.
71  */
72 static void
73 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr)
74 {
75         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
76                 switch (item->type) {
77                 case RTE_FLOW_ITEM_TYPE_IPV4:
78                         attr->ipv4 = 1;
79                         break;
80                 case RTE_FLOW_ITEM_TYPE_IPV6:
81                         attr->ipv6 = 1;
82                         break;
83                 case RTE_FLOW_ITEM_TYPE_UDP:
84                         attr->udp = 1;
85                         break;
86                 case RTE_FLOW_ITEM_TYPE_TCP:
87                         attr->tcp = 1;
88                         break;
89                 default:
90                         break;
91                 }
92         }
93         attr->valid = 1;
94 }
95
96 struct field_modify_info {
97         uint32_t size; /* Size of field in protocol header, in bytes. */
98         uint32_t offset; /* Offset of field in protocol header, in bytes. */
99         enum mlx5_modification_field id;
100 };
101
102 struct field_modify_info modify_eth[] = {
103         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
104         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
105         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
106         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
107         {0, 0, 0},
108 };
109
110 struct field_modify_info modify_ipv4[] = {
111         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
112         {4, 12, MLX5_MODI_OUT_SIPV4},
113         {4, 16, MLX5_MODI_OUT_DIPV4},
114         {0, 0, 0},
115 };
116
117 struct field_modify_info modify_ipv6[] = {
118         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
119         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
120         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
121         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
122         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
123         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
124         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
125         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
126         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
127         {0, 0, 0},
128 };
129
130 struct field_modify_info modify_udp[] = {
131         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
132         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
133         {0, 0, 0},
134 };
135
136 struct field_modify_info modify_tcp[] = {
137         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
138         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
139         {0, 0, 0},
140 };
141
142 /**
143  * Acquire the synchronizing object to protect multithreaded access
144  * to shared dv context. Lock occurs only if context is actually
145  * shared, i.e. we have multiport IB device and representors are
146  * created.
147  *
148  * @param[in] dev
149  *   Pointer to the rte_eth_dev structure.
150  */
151 static void
152 flow_d_shared_lock(struct rte_eth_dev *dev)
153 {
154         struct mlx5_priv *priv = dev->data->dev_private;
155         struct mlx5_ibv_shared *sh = priv->sh;
156
157         if (sh->dv_refcnt > 1) {
158                 int ret;
159
160                 ret = pthread_mutex_lock(&sh->dv_mutex);
161                 assert(!ret);
162                 (void)ret;
163         }
164 }
165
166 static void
167 flow_d_shared_unlock(struct rte_eth_dev *dev)
168 {
169         struct mlx5_priv *priv = dev->data->dev_private;
170         struct mlx5_ibv_shared *sh = priv->sh;
171
172         if (sh->dv_refcnt > 1) {
173                 int ret;
174
175                 ret = pthread_mutex_unlock(&sh->dv_mutex);
176                 assert(!ret);
177                 (void)ret;
178         }
179 }
180
181 /**
182  * Convert modify-header action to DV specification.
183  *
184  * @param[in] item
185  *   Pointer to item specification.
186  * @param[in] field
187  *   Pointer to field modification information.
188  * @param[in,out] resource
189  *   Pointer to the modify-header resource.
190  * @param[in] type
191  *   Type of modification.
192  * @param[out] error
193  *   Pointer to the error structure.
194  *
195  * @return
196  *   0 on success, a negative errno value otherwise and rte_errno is set.
197  */
198 static int
199 flow_dv_convert_modify_action(struct rte_flow_item *item,
200                               struct field_modify_info *field,
201                               struct mlx5_flow_dv_modify_hdr_resource *resource,
202                               uint32_t type,
203                               struct rte_flow_error *error)
204 {
205         uint32_t i = resource->actions_num;
206         struct mlx5_modification_cmd *actions = resource->actions;
207         const uint8_t *spec = item->spec;
208         const uint8_t *mask = item->mask;
209         uint32_t set;
210
211         while (field->size) {
212                 set = 0;
213                 /* Generate modify command for each mask segment. */
214                 memcpy(&set, &mask[field->offset], field->size);
215                 if (set) {
216                         if (i >= MLX5_MODIFY_NUM)
217                                 return rte_flow_error_set(error, EINVAL,
218                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
219                                          "too many items to modify");
220                         actions[i].action_type = type;
221                         actions[i].field = field->id;
222                         actions[i].length = field->size ==
223                                         4 ? 0 : field->size * 8;
224                         rte_memcpy(&actions[i].data[4 - field->size],
225                                    &spec[field->offset], field->size);
226                         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
227                         ++i;
228                 }
229                 if (resource->actions_num != i)
230                         resource->actions_num = i;
231                 field++;
232         }
233         if (!resource->actions_num)
234                 return rte_flow_error_set(error, EINVAL,
235                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
236                                           "invalid modification flow item");
237         return 0;
238 }
239
240 /**
241  * Convert modify-header set IPv4 address action to DV specification.
242  *
243  * @param[in,out] resource
244  *   Pointer to the modify-header resource.
245  * @param[in] action
246  *   Pointer to action specification.
247  * @param[out] error
248  *   Pointer to the error structure.
249  *
250  * @return
251  *   0 on success, a negative errno value otherwise and rte_errno is set.
252  */
253 static int
254 flow_dv_convert_action_modify_ipv4
255                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
256                          const struct rte_flow_action *action,
257                          struct rte_flow_error *error)
258 {
259         const struct rte_flow_action_set_ipv4 *conf =
260                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
261         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
262         struct rte_flow_item_ipv4 ipv4;
263         struct rte_flow_item_ipv4 ipv4_mask;
264
265         memset(&ipv4, 0, sizeof(ipv4));
266         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
267         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
268                 ipv4.hdr.src_addr = conf->ipv4_addr;
269                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
270         } else {
271                 ipv4.hdr.dst_addr = conf->ipv4_addr;
272                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
273         }
274         item.spec = &ipv4;
275         item.mask = &ipv4_mask;
276         return flow_dv_convert_modify_action(&item, modify_ipv4, resource,
277                                              MLX5_MODIFICATION_TYPE_SET, error);
278 }
279
280 /**
281  * Convert modify-header set IPv6 address action to DV specification.
282  *
283  * @param[in,out] resource
284  *   Pointer to the modify-header resource.
285  * @param[in] action
286  *   Pointer to action specification.
287  * @param[out] error
288  *   Pointer to the error structure.
289  *
290  * @return
291  *   0 on success, a negative errno value otherwise and rte_errno is set.
292  */
293 static int
294 flow_dv_convert_action_modify_ipv6
295                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
296                          const struct rte_flow_action *action,
297                          struct rte_flow_error *error)
298 {
299         const struct rte_flow_action_set_ipv6 *conf =
300                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
301         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
302         struct rte_flow_item_ipv6 ipv6;
303         struct rte_flow_item_ipv6 ipv6_mask;
304
305         memset(&ipv6, 0, sizeof(ipv6));
306         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
307         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
308                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
309                        sizeof(ipv6.hdr.src_addr));
310                 memcpy(&ipv6_mask.hdr.src_addr,
311                        &rte_flow_item_ipv6_mask.hdr.src_addr,
312                        sizeof(ipv6.hdr.src_addr));
313         } else {
314                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
315                        sizeof(ipv6.hdr.dst_addr));
316                 memcpy(&ipv6_mask.hdr.dst_addr,
317                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
318                        sizeof(ipv6.hdr.dst_addr));
319         }
320         item.spec = &ipv6;
321         item.mask = &ipv6_mask;
322         return flow_dv_convert_modify_action(&item, modify_ipv6, resource,
323                                              MLX5_MODIFICATION_TYPE_SET, error);
324 }
325
326 /**
327  * Convert modify-header set MAC address action to DV specification.
328  *
329  * @param[in,out] resource
330  *   Pointer to the modify-header resource.
331  * @param[in] action
332  *   Pointer to action specification.
333  * @param[out] error
334  *   Pointer to the error structure.
335  *
336  * @return
337  *   0 on success, a negative errno value otherwise and rte_errno is set.
338  */
339 static int
340 flow_dv_convert_action_modify_mac
341                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
342                          const struct rte_flow_action *action,
343                          struct rte_flow_error *error)
344 {
345         const struct rte_flow_action_set_mac *conf =
346                 (const struct rte_flow_action_set_mac *)(action->conf);
347         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
348         struct rte_flow_item_eth eth;
349         struct rte_flow_item_eth eth_mask;
350
351         memset(&eth, 0, sizeof(eth));
352         memset(&eth_mask, 0, sizeof(eth_mask));
353         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
354                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
355                        sizeof(eth.src.addr_bytes));
356                 memcpy(&eth_mask.src.addr_bytes,
357                        &rte_flow_item_eth_mask.src.addr_bytes,
358                        sizeof(eth_mask.src.addr_bytes));
359         } else {
360                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
361                        sizeof(eth.dst.addr_bytes));
362                 memcpy(&eth_mask.dst.addr_bytes,
363                        &rte_flow_item_eth_mask.dst.addr_bytes,
364                        sizeof(eth_mask.dst.addr_bytes));
365         }
366         item.spec = &eth;
367         item.mask = &eth_mask;
368         return flow_dv_convert_modify_action(&item, modify_eth, resource,
369                                              MLX5_MODIFICATION_TYPE_SET, error);
370 }
371
372 /**
373  * Convert modify-header set TP action to DV specification.
374  *
375  * @param[in,out] resource
376  *   Pointer to the modify-header resource.
377  * @param[in] action
378  *   Pointer to action specification.
379  * @param[in] items
380  *   Pointer to rte_flow_item objects list.
381  * @param[in] attr
382  *   Pointer to flow attributes structure.
383  * @param[out] error
384  *   Pointer to the error structure.
385  *
386  * @return
387  *   0 on success, a negative errno value otherwise and rte_errno is set.
388  */
389 static int
390 flow_dv_convert_action_modify_tp
391                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
392                          const struct rte_flow_action *action,
393                          const struct rte_flow_item *items,
394                          union flow_dv_attr *attr,
395                          struct rte_flow_error *error)
396 {
397         const struct rte_flow_action_set_tp *conf =
398                 (const struct rte_flow_action_set_tp *)(action->conf);
399         struct rte_flow_item item;
400         struct rte_flow_item_udp udp;
401         struct rte_flow_item_udp udp_mask;
402         struct rte_flow_item_tcp tcp;
403         struct rte_flow_item_tcp tcp_mask;
404         struct field_modify_info *field;
405
406         if (!attr->valid)
407                 flow_dv_attr_init(items, attr);
408         if (attr->udp) {
409                 memset(&udp, 0, sizeof(udp));
410                 memset(&udp_mask, 0, sizeof(udp_mask));
411                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
412                         udp.hdr.src_port = conf->port;
413                         udp_mask.hdr.src_port =
414                                         rte_flow_item_udp_mask.hdr.src_port;
415                 } else {
416                         udp.hdr.dst_port = conf->port;
417                         udp_mask.hdr.dst_port =
418                                         rte_flow_item_udp_mask.hdr.dst_port;
419                 }
420                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
421                 item.spec = &udp;
422                 item.mask = &udp_mask;
423                 field = modify_udp;
424         }
425         if (attr->tcp) {
426                 memset(&tcp, 0, sizeof(tcp));
427                 memset(&tcp_mask, 0, sizeof(tcp_mask));
428                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
429                         tcp.hdr.src_port = conf->port;
430                         tcp_mask.hdr.src_port =
431                                         rte_flow_item_tcp_mask.hdr.src_port;
432                 } else {
433                         tcp.hdr.dst_port = conf->port;
434                         tcp_mask.hdr.dst_port =
435                                         rte_flow_item_tcp_mask.hdr.dst_port;
436                 }
437                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
438                 item.spec = &tcp;
439                 item.mask = &tcp_mask;
440                 field = modify_tcp;
441         }
442         return flow_dv_convert_modify_action(&item, field, resource,
443                                              MLX5_MODIFICATION_TYPE_SET, error);
444 }
445
446 /**
447  * Convert modify-header set TTL action to DV specification.
448  *
449  * @param[in,out] resource
450  *   Pointer to the modify-header resource.
451  * @param[in] action
452  *   Pointer to action specification.
453  * @param[in] items
454  *   Pointer to rte_flow_item objects list.
455  * @param[in] attr
456  *   Pointer to flow attributes structure.
457  * @param[out] error
458  *   Pointer to the error structure.
459  *
460  * @return
461  *   0 on success, a negative errno value otherwise and rte_errno is set.
462  */
463 static int
464 flow_dv_convert_action_modify_ttl
465                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
466                          const struct rte_flow_action *action,
467                          const struct rte_flow_item *items,
468                          union flow_dv_attr *attr,
469                          struct rte_flow_error *error)
470 {
471         const struct rte_flow_action_set_ttl *conf =
472                 (const struct rte_flow_action_set_ttl *)(action->conf);
473         struct rte_flow_item item;
474         struct rte_flow_item_ipv4 ipv4;
475         struct rte_flow_item_ipv4 ipv4_mask;
476         struct rte_flow_item_ipv6 ipv6;
477         struct rte_flow_item_ipv6 ipv6_mask;
478         struct field_modify_info *field;
479
480         if (!attr->valid)
481                 flow_dv_attr_init(items, attr);
482         if (attr->ipv4) {
483                 memset(&ipv4, 0, sizeof(ipv4));
484                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
485                 ipv4.hdr.time_to_live = conf->ttl_value;
486                 ipv4_mask.hdr.time_to_live = 0xFF;
487                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
488                 item.spec = &ipv4;
489                 item.mask = &ipv4_mask;
490                 field = modify_ipv4;
491         }
492         if (attr->ipv6) {
493                 memset(&ipv6, 0, sizeof(ipv6));
494                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
495                 ipv6.hdr.hop_limits = conf->ttl_value;
496                 ipv6_mask.hdr.hop_limits = 0xFF;
497                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
498                 item.spec = &ipv6;
499                 item.mask = &ipv6_mask;
500                 field = modify_ipv6;
501         }
502         return flow_dv_convert_modify_action(&item, field, resource,
503                                              MLX5_MODIFICATION_TYPE_SET, error);
504 }
505
506 /**
507  * Convert modify-header decrement TTL action to DV specification.
508  *
509  * @param[in,out] resource
510  *   Pointer to the modify-header resource.
511  * @param[in] action
512  *   Pointer to action specification.
513  * @param[in] items
514  *   Pointer to rte_flow_item objects list.
515  * @param[in] attr
516  *   Pointer to flow attributes structure.
517  * @param[out] error
518  *   Pointer to the error structure.
519  *
520  * @return
521  *   0 on success, a negative errno value otherwise and rte_errno is set.
522  */
523 static int
524 flow_dv_convert_action_modify_dec_ttl
525                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
526                          const struct rte_flow_item *items,
527                          union flow_dv_attr *attr,
528                          struct rte_flow_error *error)
529 {
530         struct rte_flow_item item;
531         struct rte_flow_item_ipv4 ipv4;
532         struct rte_flow_item_ipv4 ipv4_mask;
533         struct rte_flow_item_ipv6 ipv6;
534         struct rte_flow_item_ipv6 ipv6_mask;
535         struct field_modify_info *field;
536
537         if (!attr->valid)
538                 flow_dv_attr_init(items, attr);
539         if (attr->ipv4) {
540                 memset(&ipv4, 0, sizeof(ipv4));
541                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
542                 ipv4.hdr.time_to_live = 0xFF;
543                 ipv4_mask.hdr.time_to_live = 0xFF;
544                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
545                 item.spec = &ipv4;
546                 item.mask = &ipv4_mask;
547                 field = modify_ipv4;
548         }
549         if (attr->ipv6) {
550                 memset(&ipv6, 0, sizeof(ipv6));
551                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
552                 ipv6.hdr.hop_limits = 0xFF;
553                 ipv6_mask.hdr.hop_limits = 0xFF;
554                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
555                 item.spec = &ipv6;
556                 item.mask = &ipv6_mask;
557                 field = modify_ipv6;
558         }
559         return flow_dv_convert_modify_action(&item, field, resource,
560                                              MLX5_MODIFICATION_TYPE_ADD, error);
561 }
562
563 /**
564  * Validate META item.
565  *
566  * @param[in] dev
567  *   Pointer to the rte_eth_dev structure.
568  * @param[in] item
569  *   Item specification.
570  * @param[in] attr
571  *   Attributes of flow that includes this item.
572  * @param[out] error
573  *   Pointer to error structure.
574  *
575  * @return
576  *   0 on success, a negative errno value otherwise and rte_errno is set.
577  */
578 static int
579 flow_dv_validate_item_meta(struct rte_eth_dev *dev,
580                            const struct rte_flow_item *item,
581                            const struct rte_flow_attr *attr,
582                            struct rte_flow_error *error)
583 {
584         const struct rte_flow_item_meta *spec = item->spec;
585         const struct rte_flow_item_meta *mask = item->mask;
586         const struct rte_flow_item_meta nic_mask = {
587                 .data = RTE_BE32(UINT32_MAX)
588         };
589         int ret;
590         uint64_t offloads = dev->data->dev_conf.txmode.offloads;
591
592         if (!(offloads & DEV_TX_OFFLOAD_MATCH_METADATA))
593                 return rte_flow_error_set(error, EPERM,
594                                           RTE_FLOW_ERROR_TYPE_ITEM,
595                                           NULL,
596                                           "match on metadata offload "
597                                           "configuration is off for this port");
598         if (!spec)
599                 return rte_flow_error_set(error, EINVAL,
600                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
601                                           item->spec,
602                                           "data cannot be empty");
603         if (!spec->data)
604                 return rte_flow_error_set(error, EINVAL,
605                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
606                                           NULL,
607                                           "data cannot be zero");
608         if (!mask)
609                 mask = &rte_flow_item_meta_mask;
610         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
611                                         (const uint8_t *)&nic_mask,
612                                         sizeof(struct rte_flow_item_meta),
613                                         error);
614         if (ret < 0)
615                 return ret;
616         if (attr->ingress)
617                 return rte_flow_error_set(error, ENOTSUP,
618                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
619                                           NULL,
620                                           "pattern not supported for ingress");
621         return 0;
622 }
623
624 /**
625  * Validate vport item.
626  *
627  * @param[in] dev
628  *   Pointer to the rte_eth_dev structure.
629  * @param[in] item
630  *   Item specification.
631  * @param[in] attr
632  *   Attributes of flow that includes this item.
633  * @param[in] item_flags
634  *   Bit-fields that holds the items detected until now.
635  * @param[out] error
636  *   Pointer to error structure.
637  *
638  * @return
639  *   0 on success, a negative errno value otherwise and rte_errno is set.
640  */
641 static int
642 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
643                               const struct rte_flow_item *item,
644                               const struct rte_flow_attr *attr,
645                               uint64_t item_flags,
646                               struct rte_flow_error *error)
647 {
648         const struct rte_flow_item_port_id *spec = item->spec;
649         const struct rte_flow_item_port_id *mask = item->mask;
650         const struct rte_flow_item_port_id switch_mask = {
651                         .id = 0xffffffff,
652         };
653         uint16_t esw_domain_id;
654         uint16_t item_port_esw_domain_id;
655         int ret;
656
657         if (!attr->transfer)
658                 return rte_flow_error_set(error, EINVAL,
659                                           RTE_FLOW_ERROR_TYPE_ITEM,
660                                           NULL,
661                                           "match on port id is valid only"
662                                           " when transfer flag is enabled");
663         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
664                 return rte_flow_error_set(error, ENOTSUP,
665                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
666                                           "multiple source ports are not"
667                                           " supported");
668         if (!mask)
669                 mask = &switch_mask;
670         if (mask->id != 0xffffffff)
671                 return rte_flow_error_set(error, ENOTSUP,
672                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
673                                            mask,
674                                            "no support for partial mask on"
675                                            " \"id\" field");
676         ret = mlx5_flow_item_acceptable
677                                 (item, (const uint8_t *)mask,
678                                  (const uint8_t *)&rte_flow_item_port_id_mask,
679                                  sizeof(struct rte_flow_item_port_id),
680                                  error);
681         if (ret)
682                 return ret;
683         if (!spec)
684                 return 0;
685         ret = mlx5_port_to_eswitch_info(spec->id, &item_port_esw_domain_id,
686                                         NULL);
687         if (ret)
688                 return rte_flow_error_set(error, -ret,
689                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
690                                           "failed to obtain E-Switch info for"
691                                           " port");
692         ret = mlx5_port_to_eswitch_info(dev->data->port_id,
693                                         &esw_domain_id, NULL);
694         if (ret < 0)
695                 return rte_flow_error_set(error, -ret,
696                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
697                                           NULL,
698                                           "failed to obtain E-Switch info");
699         if (item_port_esw_domain_id != esw_domain_id)
700                 return rte_flow_error_set(error, -ret,
701                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
702                                           "cannot match on a port from a"
703                                           " different E-Switch");
704         return 0;
705 }
706
707 /**
708  * Validate count action.
709  *
710  * @param[in] dev
711  *   device otr.
712  * @param[out] error
713  *   Pointer to error structure.
714  *
715  * @return
716  *   0 on success, a negative errno value otherwise and rte_errno is set.
717  */
718 static int
719 flow_dv_validate_action_count(struct rte_eth_dev *dev,
720                               struct rte_flow_error *error)
721 {
722         struct mlx5_priv *priv = dev->data->dev_private;
723
724         if (!priv->config.devx)
725                 goto notsup_err;
726 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
727         return 0;
728 #endif
729 notsup_err:
730         return rte_flow_error_set
731                       (error, ENOTSUP,
732                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
733                        NULL,
734                        "count action not supported");
735 }
736
737 /**
738  * Validate the L2 encap action.
739  *
740  * @param[in] action_flags
741  *   Holds the actions detected until now.
742  * @param[in] action
743  *   Pointer to the encap action.
744  * @param[in] attr
745  *   Pointer to flow attributes
746  * @param[out] error
747  *   Pointer to error structure.
748  *
749  * @return
750  *   0 on success, a negative errno value otherwise and rte_errno is set.
751  */
752 static int
753 flow_dv_validate_action_l2_encap(uint64_t action_flags,
754                                  const struct rte_flow_action *action,
755                                  const struct rte_flow_attr *attr,
756                                  struct rte_flow_error *error)
757 {
758         if (!(action->conf))
759                 return rte_flow_error_set(error, EINVAL,
760                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
761                                           "configuration cannot be null");
762         if (action_flags & MLX5_FLOW_ACTION_DROP)
763                 return rte_flow_error_set(error, EINVAL,
764                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
765                                           "can't drop and encap in same flow");
766         if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
767                 return rte_flow_error_set(error, EINVAL,
768                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
769                                           "can only have a single encap or"
770                                           " decap action in a flow");
771         if (!attr->transfer && attr->ingress)
772                 return rte_flow_error_set(error, ENOTSUP,
773                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
774                                           NULL,
775                                           "encap action not supported for "
776                                           "ingress");
777         return 0;
778 }
779
780 /**
781  * Validate the L2 decap action.
782  *
783  * @param[in] action_flags
784  *   Holds the actions detected until now.
785  * @param[in] attr
786  *   Pointer to flow attributes
787  * @param[out] error
788  *   Pointer to error structure.
789  *
790  * @return
791  *   0 on success, a negative errno value otherwise and rte_errno is set.
792  */
793 static int
794 flow_dv_validate_action_l2_decap(uint64_t action_flags,
795                                  const struct rte_flow_attr *attr,
796                                  struct rte_flow_error *error)
797 {
798         if (action_flags & MLX5_FLOW_ACTION_DROP)
799                 return rte_flow_error_set(error, EINVAL,
800                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
801                                           "can't drop and decap in same flow");
802         if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
803                 return rte_flow_error_set(error, EINVAL,
804                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
805                                           "can only have a single encap or"
806                                           " decap action in a flow");
807         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
808                 return rte_flow_error_set(error, EINVAL,
809                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
810                                           "can't have decap action after"
811                                           " modify action");
812         if (attr->egress)
813                 return rte_flow_error_set(error, ENOTSUP,
814                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
815                                           NULL,
816                                           "decap action not supported for "
817                                           "egress");
818         return 0;
819 }
820
821 /**
822  * Validate the raw encap action.
823  *
824  * @param[in] action_flags
825  *   Holds the actions detected until now.
826  * @param[in] action
827  *   Pointer to the encap action.
828  * @param[in] attr
829  *   Pointer to flow attributes
830  * @param[out] error
831  *   Pointer to error structure.
832  *
833  * @return
834  *   0 on success, a negative errno value otherwise and rte_errno is set.
835  */
836 static int
837 flow_dv_validate_action_raw_encap(uint64_t action_flags,
838                                   const struct rte_flow_action *action,
839                                   const struct rte_flow_attr *attr,
840                                   struct rte_flow_error *error)
841 {
842         if (!(action->conf))
843                 return rte_flow_error_set(error, EINVAL,
844                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
845                                           "configuration cannot be null");
846         if (action_flags & MLX5_FLOW_ACTION_DROP)
847                 return rte_flow_error_set(error, EINVAL,
848                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
849                                           "can't drop and encap in same flow");
850         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
851                 return rte_flow_error_set(error, EINVAL,
852                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
853                                           "can only have a single encap"
854                                           " action in a flow");
855         /* encap without preceding decap is not supported for ingress */
856         if (!attr->transfer &&  attr->ingress &&
857             !(action_flags & MLX5_FLOW_ACTION_RAW_DECAP))
858                 return rte_flow_error_set(error, ENOTSUP,
859                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
860                                           NULL,
861                                           "encap action not supported for "
862                                           "ingress");
863         return 0;
864 }
865
866 /**
867  * Validate the raw decap action.
868  *
869  * @param[in] action_flags
870  *   Holds the actions detected until now.
871  * @param[in] action
872  *   Pointer to the encap action.
873  * @param[in] attr
874  *   Pointer to flow attributes
875  * @param[out] error
876  *   Pointer to error structure.
877  *
878  * @return
879  *   0 on success, a negative errno value otherwise and rte_errno is set.
880  */
881 static int
882 flow_dv_validate_action_raw_decap(uint64_t action_flags,
883                                   const struct rte_flow_action *action,
884                                   const struct rte_flow_attr *attr,
885                                   struct rte_flow_error *error)
886 {
887         if (action_flags & MLX5_FLOW_ACTION_DROP)
888                 return rte_flow_error_set(error, EINVAL,
889                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
890                                           "can't drop and decap in same flow");
891         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
892                 return rte_flow_error_set(error, EINVAL,
893                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
894                                           "can't have encap action before"
895                                           " decap action");
896         if (action_flags & MLX5_FLOW_DECAP_ACTIONS)
897                 return rte_flow_error_set(error, EINVAL,
898                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
899                                           "can only have a single decap"
900                                           " action in a flow");
901         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
902                 return rte_flow_error_set(error, EINVAL,
903                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
904                                           "can't have decap action after"
905                                           " modify action");
906         /* decap action is valid on egress only if it is followed by encap */
907         if (attr->egress) {
908                 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
909                        action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
910                        action++) {
911                 }
912                 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
913                         return rte_flow_error_set
914                                         (error, ENOTSUP,
915                                          RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
916                                          NULL, "decap action not supported"
917                                          " for egress");
918         }
919         return 0;
920 }
921
922 /**
923  * Find existing encap/decap resource or create and register a new one.
924  *
925  * @param dev[in, out]
926  *   Pointer to rte_eth_dev structure.
927  * @param[in, out] resource
928  *   Pointer to encap/decap resource.
929  * @parm[in, out] dev_flow
930  *   Pointer to the dev_flow.
931  * @param[out] error
932  *   pointer to error structure.
933  *
934  * @return
935  *   0 on success otherwise -errno and errno is set.
936  */
937 static int
938 flow_dv_encap_decap_resource_register
939                         (struct rte_eth_dev *dev,
940                          struct mlx5_flow_dv_encap_decap_resource *resource,
941                          struct mlx5_flow *dev_flow,
942                          struct rte_flow_error *error)
943 {
944         struct mlx5_priv *priv = dev->data->dev_private;
945         struct mlx5_ibv_shared *sh = priv->sh;
946         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
947         struct rte_flow *flow = dev_flow->flow;
948         struct mlx5dv_dr_domain *domain;
949
950         resource->flags = flow->group ? 0 : 1;
951         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
952                 domain = sh->fdb_domain;
953         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
954                 domain = sh->rx_domain;
955         else
956                 domain = sh->tx_domain;
957
958         /* Lookup a matching resource from cache. */
959         LIST_FOREACH(cache_resource, &sh->encaps_decaps, next) {
960                 if (resource->reformat_type == cache_resource->reformat_type &&
961                     resource->ft_type == cache_resource->ft_type &&
962                     resource->flags == cache_resource->flags &&
963                     resource->size == cache_resource->size &&
964                     !memcmp((const void *)resource->buf,
965                             (const void *)cache_resource->buf,
966                             resource->size)) {
967                         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
968                                 (void *)cache_resource,
969                                 rte_atomic32_read(&cache_resource->refcnt));
970                         rte_atomic32_inc(&cache_resource->refcnt);
971                         dev_flow->dv.encap_decap = cache_resource;
972                         return 0;
973                 }
974         }
975         /* Register new encap/decap resource. */
976         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
977         if (!cache_resource)
978                 return rte_flow_error_set(error, ENOMEM,
979                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
980                                           "cannot allocate resource memory");
981         *cache_resource = *resource;
982         cache_resource->verbs_action =
983                 mlx5_glue->dv_create_flow_action_packet_reformat
984                         (sh->ctx, cache_resource->reformat_type,
985                          cache_resource->ft_type, domain, cache_resource->flags,
986                          cache_resource->size,
987                          (cache_resource->size ? cache_resource->buf : NULL));
988         if (!cache_resource->verbs_action) {
989                 rte_free(cache_resource);
990                 return rte_flow_error_set(error, ENOMEM,
991                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
992                                           NULL, "cannot create action");
993         }
994         rte_atomic32_init(&cache_resource->refcnt);
995         rte_atomic32_inc(&cache_resource->refcnt);
996         LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next);
997         dev_flow->dv.encap_decap = cache_resource;
998         DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
999                 (void *)cache_resource,
1000                 rte_atomic32_read(&cache_resource->refcnt));
1001         return 0;
1002 }
1003
1004 /**
1005  * Find existing table jump resource or create and register a new one.
1006  *
1007  * @param dev[in, out]
1008  *   Pointer to rte_eth_dev structure.
1009  * @param[in, out] resource
1010  *   Pointer to jump table resource.
1011  * @parm[in, out] dev_flow
1012  *   Pointer to the dev_flow.
1013  * @param[out] error
1014  *   pointer to error structure.
1015  *
1016  * @return
1017  *   0 on success otherwise -errno and errno is set.
1018  */
1019 static int
1020 flow_dv_jump_tbl_resource_register
1021                         (struct rte_eth_dev *dev,
1022                          struct mlx5_flow_dv_jump_tbl_resource *resource,
1023                          struct mlx5_flow *dev_flow,
1024                          struct rte_flow_error *error)
1025 {
1026         struct mlx5_priv *priv = dev->data->dev_private;
1027         struct mlx5_ibv_shared *sh = priv->sh;
1028         struct mlx5_flow_dv_jump_tbl_resource *cache_resource;
1029
1030         /* Lookup a matching resource from cache. */
1031         LIST_FOREACH(cache_resource, &sh->jump_tbl, next) {
1032                 if (resource->tbl == cache_resource->tbl) {
1033                         DRV_LOG(DEBUG, "jump table resource resource %p: refcnt %d++",
1034                                 (void *)cache_resource,
1035                                 rte_atomic32_read(&cache_resource->refcnt));
1036                         rte_atomic32_inc(&cache_resource->refcnt);
1037                         dev_flow->dv.jump = cache_resource;
1038                         return 0;
1039                 }
1040         }
1041         /* Register new jump table resource. */
1042         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1043         if (!cache_resource)
1044                 return rte_flow_error_set(error, ENOMEM,
1045                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1046                                           "cannot allocate resource memory");
1047         *cache_resource = *resource;
1048         cache_resource->action =
1049                 mlx5_glue->dr_create_flow_action_dest_flow_tbl
1050                 (resource->tbl->obj);
1051         if (!cache_resource->action) {
1052                 rte_free(cache_resource);
1053                 return rte_flow_error_set(error, ENOMEM,
1054                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1055                                           NULL, "cannot create action");
1056         }
1057         rte_atomic32_init(&cache_resource->refcnt);
1058         rte_atomic32_inc(&cache_resource->refcnt);
1059         LIST_INSERT_HEAD(&sh->jump_tbl, cache_resource, next);
1060         dev_flow->dv.jump = cache_resource;
1061         DRV_LOG(DEBUG, "new jump table  resource %p: refcnt %d++",
1062                 (void *)cache_resource,
1063                 rte_atomic32_read(&cache_resource->refcnt));
1064         return 0;
1065 }
1066
1067 /**
1068  * Find existing table port ID resource or create and register a new one.
1069  *
1070  * @param dev[in, out]
1071  *   Pointer to rte_eth_dev structure.
1072  * @param[in, out] resource
1073  *   Pointer to port ID action resource.
1074  * @parm[in, out] dev_flow
1075  *   Pointer to the dev_flow.
1076  * @param[out] error
1077  *   pointer to error structure.
1078  *
1079  * @return
1080  *   0 on success otherwise -errno and errno is set.
1081  */
1082 static int
1083 flow_dv_port_id_action_resource_register
1084                         (struct rte_eth_dev *dev,
1085                          struct mlx5_flow_dv_port_id_action_resource *resource,
1086                          struct mlx5_flow *dev_flow,
1087                          struct rte_flow_error *error)
1088 {
1089         struct mlx5_priv *priv = dev->data->dev_private;
1090         struct mlx5_ibv_shared *sh = priv->sh;
1091         struct mlx5_flow_dv_port_id_action_resource *cache_resource;
1092
1093         /* Lookup a matching resource from cache. */
1094         LIST_FOREACH(cache_resource, &sh->port_id_action_list, next) {
1095                 if (resource->port_id == cache_resource->port_id) {
1096                         DRV_LOG(DEBUG, "port id action resource resource %p: "
1097                                 "refcnt %d++",
1098                                 (void *)cache_resource,
1099                                 rte_atomic32_read(&cache_resource->refcnt));
1100                         rte_atomic32_inc(&cache_resource->refcnt);
1101                         dev_flow->dv.port_id_action = cache_resource;
1102                         return 0;
1103                 }
1104         }
1105         /* Register new port id action resource. */
1106         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1107         if (!cache_resource)
1108                 return rte_flow_error_set(error, ENOMEM,
1109                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1110                                           "cannot allocate resource memory");
1111         *cache_resource = *resource;
1112         cache_resource->action =
1113                 mlx5_glue->dr_create_flow_action_dest_vport
1114                         (priv->sh->fdb_domain, resource->port_id);
1115         if (!cache_resource->action) {
1116                 rte_free(cache_resource);
1117                 return rte_flow_error_set(error, ENOMEM,
1118                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1119                                           NULL, "cannot create action");
1120         }
1121         rte_atomic32_init(&cache_resource->refcnt);
1122         rte_atomic32_inc(&cache_resource->refcnt);
1123         LIST_INSERT_HEAD(&sh->port_id_action_list, cache_resource, next);
1124         dev_flow->dv.port_id_action = cache_resource;
1125         DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
1126                 (void *)cache_resource,
1127                 rte_atomic32_read(&cache_resource->refcnt));
1128         return 0;
1129 }
1130
1131 /**
1132  * Get the size of specific rte_flow_item_type
1133  *
1134  * @param[in] item_type
1135  *   Tested rte_flow_item_type.
1136  *
1137  * @return
1138  *   sizeof struct item_type, 0 if void or irrelevant.
1139  */
1140 static size_t
1141 flow_dv_get_item_len(const enum rte_flow_item_type item_type)
1142 {
1143         size_t retval;
1144
1145         switch (item_type) {
1146         case RTE_FLOW_ITEM_TYPE_ETH:
1147                 retval = sizeof(struct rte_flow_item_eth);
1148                 break;
1149         case RTE_FLOW_ITEM_TYPE_VLAN:
1150                 retval = sizeof(struct rte_flow_item_vlan);
1151                 break;
1152         case RTE_FLOW_ITEM_TYPE_IPV4:
1153                 retval = sizeof(struct rte_flow_item_ipv4);
1154                 break;
1155         case RTE_FLOW_ITEM_TYPE_IPV6:
1156                 retval = sizeof(struct rte_flow_item_ipv6);
1157                 break;
1158         case RTE_FLOW_ITEM_TYPE_UDP:
1159                 retval = sizeof(struct rte_flow_item_udp);
1160                 break;
1161         case RTE_FLOW_ITEM_TYPE_TCP:
1162                 retval = sizeof(struct rte_flow_item_tcp);
1163                 break;
1164         case RTE_FLOW_ITEM_TYPE_VXLAN:
1165                 retval = sizeof(struct rte_flow_item_vxlan);
1166                 break;
1167         case RTE_FLOW_ITEM_TYPE_GRE:
1168                 retval = sizeof(struct rte_flow_item_gre);
1169                 break;
1170         case RTE_FLOW_ITEM_TYPE_NVGRE:
1171                 retval = sizeof(struct rte_flow_item_nvgre);
1172                 break;
1173         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1174                 retval = sizeof(struct rte_flow_item_vxlan_gpe);
1175                 break;
1176         case RTE_FLOW_ITEM_TYPE_MPLS:
1177                 retval = sizeof(struct rte_flow_item_mpls);
1178                 break;
1179         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
1180         default:
1181                 retval = 0;
1182                 break;
1183         }
1184         return retval;
1185 }
1186
1187 #define MLX5_ENCAP_IPV4_VERSION         0x40
1188 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
1189 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
1190 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
1191 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
1192 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
1193 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
1194
1195 /**
1196  * Convert the encap action data from list of rte_flow_item to raw buffer
1197  *
1198  * @param[in] items
1199  *   Pointer to rte_flow_item objects list.
1200  * @param[out] buf
1201  *   Pointer to the output buffer.
1202  * @param[out] size
1203  *   Pointer to the output buffer size.
1204  * @param[out] error
1205  *   Pointer to the error structure.
1206  *
1207  * @return
1208  *   0 on success, a negative errno value otherwise and rte_errno is set.
1209  */
1210 static int
1211 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
1212                            size_t *size, struct rte_flow_error *error)
1213 {
1214         struct rte_ether_hdr *eth = NULL;
1215         struct rte_vlan_hdr *vlan = NULL;
1216         struct ipv4_hdr *ipv4 = NULL;
1217         struct ipv6_hdr *ipv6 = NULL;
1218         struct udp_hdr *udp = NULL;
1219         struct rte_vxlan_hdr *vxlan = NULL;
1220         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
1221         struct gre_hdr *gre = NULL;
1222         size_t len;
1223         size_t temp_size = 0;
1224
1225         if (!items)
1226                 return rte_flow_error_set(error, EINVAL,
1227                                           RTE_FLOW_ERROR_TYPE_ACTION,
1228                                           NULL, "invalid empty data");
1229         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1230                 len = flow_dv_get_item_len(items->type);
1231                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
1232                         return rte_flow_error_set(error, EINVAL,
1233                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1234                                                   (void *)items->type,
1235                                                   "items total size is too big"
1236                                                   " for encap action");
1237                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
1238                 switch (items->type) {
1239                 case RTE_FLOW_ITEM_TYPE_ETH:
1240                         eth = (struct rte_ether_hdr *)&buf[temp_size];
1241                         break;
1242                 case RTE_FLOW_ITEM_TYPE_VLAN:
1243                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
1244                         if (!eth)
1245                                 return rte_flow_error_set(error, EINVAL,
1246                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1247                                                 (void *)items->type,
1248                                                 "eth header not found");
1249                         if (!eth->ether_type)
1250                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
1251                         break;
1252                 case RTE_FLOW_ITEM_TYPE_IPV4:
1253                         ipv4 = (struct ipv4_hdr *)&buf[temp_size];
1254                         if (!vlan && !eth)
1255                                 return rte_flow_error_set(error, EINVAL,
1256                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1257                                                 (void *)items->type,
1258                                                 "neither eth nor vlan"
1259                                                 " header found");
1260                         if (vlan && !vlan->eth_proto)
1261                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPv4);
1262                         else if (eth && !eth->ether_type)
1263                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPv4);
1264                         if (!ipv4->version_ihl)
1265                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
1266                                                     MLX5_ENCAP_IPV4_IHL_MIN;
1267                         if (!ipv4->time_to_live)
1268                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
1269                         break;
1270                 case RTE_FLOW_ITEM_TYPE_IPV6:
1271                         ipv6 = (struct ipv6_hdr *)&buf[temp_size];
1272                         if (!vlan && !eth)
1273                                 return rte_flow_error_set(error, EINVAL,
1274                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1275                                                 (void *)items->type,
1276                                                 "neither eth nor vlan"
1277                                                 " header found");
1278                         if (vlan && !vlan->eth_proto)
1279                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPv6);
1280                         else if (eth && !eth->ether_type)
1281                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPv6);
1282                         if (!ipv6->vtc_flow)
1283                                 ipv6->vtc_flow =
1284                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
1285                         if (!ipv6->hop_limits)
1286                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
1287                         break;
1288                 case RTE_FLOW_ITEM_TYPE_UDP:
1289                         udp = (struct udp_hdr *)&buf[temp_size];
1290                         if (!ipv4 && !ipv6)
1291                                 return rte_flow_error_set(error, EINVAL,
1292                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1293                                                 (void *)items->type,
1294                                                 "ip header not found");
1295                         if (ipv4 && !ipv4->next_proto_id)
1296                                 ipv4->next_proto_id = IPPROTO_UDP;
1297                         else if (ipv6 && !ipv6->proto)
1298                                 ipv6->proto = IPPROTO_UDP;
1299                         break;
1300                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1301                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
1302                         if (!udp)
1303                                 return rte_flow_error_set(error, EINVAL,
1304                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1305                                                 (void *)items->type,
1306                                                 "udp header not found");
1307                         if (!udp->dst_port)
1308                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
1309                         if (!vxlan->vx_flags)
1310                                 vxlan->vx_flags =
1311                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
1312                         break;
1313                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1314                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
1315                         if (!udp)
1316                                 return rte_flow_error_set(error, EINVAL,
1317                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1318                                                 (void *)items->type,
1319                                                 "udp header not found");
1320                         if (!vxlan_gpe->proto)
1321                                 return rte_flow_error_set(error, EINVAL,
1322                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1323                                                 (void *)items->type,
1324                                                 "next protocol not found");
1325                         if (!udp->dst_port)
1326                                 udp->dst_port =
1327                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
1328                         if (!vxlan_gpe->vx_flags)
1329                                 vxlan_gpe->vx_flags =
1330                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
1331                         break;
1332                 case RTE_FLOW_ITEM_TYPE_GRE:
1333                 case RTE_FLOW_ITEM_TYPE_NVGRE:
1334                         gre = (struct gre_hdr *)&buf[temp_size];
1335                         if (!gre->proto)
1336                                 return rte_flow_error_set(error, EINVAL,
1337                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1338                                                 (void *)items->type,
1339                                                 "next protocol not found");
1340                         if (!ipv4 && !ipv6)
1341                                 return rte_flow_error_set(error, EINVAL,
1342                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1343                                                 (void *)items->type,
1344                                                 "ip header not found");
1345                         if (ipv4 && !ipv4->next_proto_id)
1346                                 ipv4->next_proto_id = IPPROTO_GRE;
1347                         else if (ipv6 && !ipv6->proto)
1348                                 ipv6->proto = IPPROTO_GRE;
1349                         break;
1350                 case RTE_FLOW_ITEM_TYPE_VOID:
1351                         break;
1352                 default:
1353                         return rte_flow_error_set(error, EINVAL,
1354                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1355                                                   (void *)items->type,
1356                                                   "unsupported item type");
1357                         break;
1358                 }
1359                 temp_size += len;
1360         }
1361         *size = temp_size;
1362         return 0;
1363 }
1364
1365 /**
1366  * Convert L2 encap action to DV specification.
1367  *
1368  * @param[in] dev
1369  *   Pointer to rte_eth_dev structure.
1370  * @param[in] action
1371  *   Pointer to action structure.
1372  * @param[in, out] dev_flow
1373  *   Pointer to the mlx5_flow.
1374  * @param[in] transfer
1375  *   Mark if the flow is E-Switch flow.
1376  * @param[out] error
1377  *   Pointer to the error structure.
1378  *
1379  * @return
1380  *   0 on success, a negative errno value otherwise and rte_errno is set.
1381  */
1382 static int
1383 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
1384                                const struct rte_flow_action *action,
1385                                struct mlx5_flow *dev_flow,
1386                                uint8_t transfer,
1387                                struct rte_flow_error *error)
1388 {
1389         const struct rte_flow_item *encap_data;
1390         const struct rte_flow_action_raw_encap *raw_encap_data;
1391         struct mlx5_flow_dv_encap_decap_resource res = {
1392                 .reformat_type =
1393                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
1394                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
1395                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
1396         };
1397
1398         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
1399                 raw_encap_data =
1400                         (const struct rte_flow_action_raw_encap *)action->conf;
1401                 res.size = raw_encap_data->size;
1402                 memcpy(res.buf, raw_encap_data->data, res.size);
1403         } else {
1404                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
1405                         encap_data =
1406                                 ((const struct rte_flow_action_vxlan_encap *)
1407                                                 action->conf)->definition;
1408                 else
1409                         encap_data =
1410                                 ((const struct rte_flow_action_nvgre_encap *)
1411                                                 action->conf)->definition;
1412                 if (flow_dv_convert_encap_data(encap_data, res.buf,
1413                                                &res.size, error))
1414                         return -rte_errno;
1415         }
1416         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1417                 return rte_flow_error_set(error, EINVAL,
1418                                           RTE_FLOW_ERROR_TYPE_ACTION,
1419                                           NULL, "can't create L2 encap action");
1420         return 0;
1421 }
1422
1423 /**
1424  * Convert L2 decap action to DV specification.
1425  *
1426  * @param[in] dev
1427  *   Pointer to rte_eth_dev structure.
1428  * @param[in, out] dev_flow
1429  *   Pointer to the mlx5_flow.
1430  * @param[in] transfer
1431  *   Mark if the flow is E-Switch flow.
1432  * @param[out] error
1433  *   Pointer to the error structure.
1434  *
1435  * @return
1436  *   0 on success, a negative errno value otherwise and rte_errno is set.
1437  */
1438 static int
1439 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
1440                                struct mlx5_flow *dev_flow,
1441                                uint8_t transfer,
1442                                struct rte_flow_error *error)
1443 {
1444         struct mlx5_flow_dv_encap_decap_resource res = {
1445                 .size = 0,
1446                 .reformat_type =
1447                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
1448                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
1449                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
1450         };
1451
1452         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1453                 return rte_flow_error_set(error, EINVAL,
1454                                           RTE_FLOW_ERROR_TYPE_ACTION,
1455                                           NULL, "can't create L2 decap action");
1456         return 0;
1457 }
1458
1459 /**
1460  * Convert raw decap/encap (L3 tunnel) action to DV specification.
1461  *
1462  * @param[in] dev
1463  *   Pointer to rte_eth_dev structure.
1464  * @param[in] action
1465  *   Pointer to action structure.
1466  * @param[in, out] dev_flow
1467  *   Pointer to the mlx5_flow.
1468  * @param[in] attr
1469  *   Pointer to the flow attributes.
1470  * @param[out] error
1471  *   Pointer to the error structure.
1472  *
1473  * @return
1474  *   0 on success, a negative errno value otherwise and rte_errno is set.
1475  */
1476 static int
1477 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
1478                                 const struct rte_flow_action *action,
1479                                 struct mlx5_flow *dev_flow,
1480                                 const struct rte_flow_attr *attr,
1481                                 struct rte_flow_error *error)
1482 {
1483         const struct rte_flow_action_raw_encap *encap_data;
1484         struct mlx5_flow_dv_encap_decap_resource res;
1485
1486         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
1487         res.size = encap_data->size;
1488         memcpy(res.buf, encap_data->data, res.size);
1489         res.reformat_type = attr->egress ?
1490                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL :
1491                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
1492         if (attr->transfer)
1493                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
1494         else
1495                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
1496                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
1497         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1498                 return rte_flow_error_set(error, EINVAL,
1499                                           RTE_FLOW_ERROR_TYPE_ACTION,
1500                                           NULL, "can't create encap action");
1501         return 0;
1502 }
1503
1504 /**
1505  * Validate the modify-header actions.
1506  *
1507  * @param[in] action_flags
1508  *   Holds the actions detected until now.
1509  * @param[in] action
1510  *   Pointer to the modify action.
1511  * @param[out] error
1512  *   Pointer to error structure.
1513  *
1514  * @return
1515  *   0 on success, a negative errno value otherwise and rte_errno is set.
1516  */
1517 static int
1518 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
1519                                    const struct rte_flow_action *action,
1520                                    struct rte_flow_error *error)
1521 {
1522         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
1523                 return rte_flow_error_set(error, EINVAL,
1524                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1525                                           NULL, "action configuration not set");
1526         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
1527                 return rte_flow_error_set(error, EINVAL,
1528                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1529                                           "can't have encap action before"
1530                                           " modify action");
1531         return 0;
1532 }
1533
1534 /**
1535  * Validate the modify-header MAC address actions.
1536  *
1537  * @param[in] action_flags
1538  *   Holds the actions detected until now.
1539  * @param[in] action
1540  *   Pointer to the modify action.
1541  * @param[in] item_flags
1542  *   Holds the items detected.
1543  * @param[out] error
1544  *   Pointer to error structure.
1545  *
1546  * @return
1547  *   0 on success, a negative errno value otherwise and rte_errno is set.
1548  */
1549 static int
1550 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
1551                                    const struct rte_flow_action *action,
1552                                    const uint64_t item_flags,
1553                                    struct rte_flow_error *error)
1554 {
1555         int ret = 0;
1556
1557         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1558         if (!ret) {
1559                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
1560                         return rte_flow_error_set(error, EINVAL,
1561                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1562                                                   NULL,
1563                                                   "no L2 item in pattern");
1564         }
1565         return ret;
1566 }
1567
1568 /**
1569  * Validate the modify-header IPv4 address actions.
1570  *
1571  * @param[in] action_flags
1572  *   Holds the actions detected until now.
1573  * @param[in] action
1574  *   Pointer to the modify action.
1575  * @param[in] item_flags
1576  *   Holds the items detected.
1577  * @param[out] error
1578  *   Pointer to error structure.
1579  *
1580  * @return
1581  *   0 on success, a negative errno value otherwise and rte_errno is set.
1582  */
1583 static int
1584 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
1585                                     const struct rte_flow_action *action,
1586                                     const uint64_t item_flags,
1587                                     struct rte_flow_error *error)
1588 {
1589         int ret = 0;
1590
1591         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1592         if (!ret) {
1593                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
1594                         return rte_flow_error_set(error, EINVAL,
1595                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1596                                                   NULL,
1597                                                   "no ipv4 item in pattern");
1598         }
1599         return ret;
1600 }
1601
1602 /**
1603  * Validate the modify-header IPv6 address actions.
1604  *
1605  * @param[in] action_flags
1606  *   Holds the actions detected until now.
1607  * @param[in] action
1608  *   Pointer to the modify action.
1609  * @param[in] item_flags
1610  *   Holds the items detected.
1611  * @param[out] error
1612  *   Pointer to error structure.
1613  *
1614  * @return
1615  *   0 on success, a negative errno value otherwise and rte_errno is set.
1616  */
1617 static int
1618 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
1619                                     const struct rte_flow_action *action,
1620                                     const uint64_t item_flags,
1621                                     struct rte_flow_error *error)
1622 {
1623         int ret = 0;
1624
1625         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1626         if (!ret) {
1627                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
1628                         return rte_flow_error_set(error, EINVAL,
1629                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1630                                                   NULL,
1631                                                   "no ipv6 item in pattern");
1632         }
1633         return ret;
1634 }
1635
1636 /**
1637  * Validate the modify-header TP actions.
1638  *
1639  * @param[in] action_flags
1640  *   Holds the actions detected until now.
1641  * @param[in] action
1642  *   Pointer to the modify action.
1643  * @param[in] item_flags
1644  *   Holds the items detected.
1645  * @param[out] error
1646  *   Pointer to error structure.
1647  *
1648  * @return
1649  *   0 on success, a negative errno value otherwise and rte_errno is set.
1650  */
1651 static int
1652 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
1653                                   const struct rte_flow_action *action,
1654                                   const uint64_t item_flags,
1655                                   struct rte_flow_error *error)
1656 {
1657         int ret = 0;
1658
1659         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1660         if (!ret) {
1661                 if (!(item_flags & MLX5_FLOW_LAYER_L4))
1662                         return rte_flow_error_set(error, EINVAL,
1663                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1664                                                   NULL, "no transport layer "
1665                                                   "in pattern");
1666         }
1667         return ret;
1668 }
1669
1670 /**
1671  * Validate the modify-header TTL actions.
1672  *
1673  * @param[in] action_flags
1674  *   Holds the actions detected until now.
1675  * @param[in] action
1676  *   Pointer to the modify action.
1677  * @param[in] item_flags
1678  *   Holds the items detected.
1679  * @param[out] error
1680  *   Pointer to error structure.
1681  *
1682  * @return
1683  *   0 on success, a negative errno value otherwise and rte_errno is set.
1684  */
1685 static int
1686 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
1687                                    const struct rte_flow_action *action,
1688                                    const uint64_t item_flags,
1689                                    struct rte_flow_error *error)
1690 {
1691         int ret = 0;
1692
1693         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1694         if (!ret) {
1695                 if (!(item_flags & MLX5_FLOW_LAYER_L3))
1696                         return rte_flow_error_set(error, EINVAL,
1697                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1698                                                   NULL,
1699                                                   "no IP protocol in pattern");
1700         }
1701         return ret;
1702 }
1703
1704 /**
1705  * Validate jump action.
1706  *
1707  * @param[in] action
1708  *   Pointer to the modify action.
1709  * @param[in] group
1710  *   The group of the current flow.
1711  * @param[out] error
1712  *   Pointer to error structure.
1713  *
1714  * @return
1715  *   0 on success, a negative errno value otherwise and rte_errno is set.
1716  */
1717 static int
1718 flow_dv_validate_action_jump(const struct rte_flow_action *action,
1719                              uint32_t group,
1720                              struct rte_flow_error *error)
1721 {
1722         if (action->type != RTE_FLOW_ACTION_TYPE_JUMP && !action->conf)
1723                 return rte_flow_error_set(error, EINVAL,
1724                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1725                                           NULL, "action configuration not set");
1726         if (group >= ((const struct rte_flow_action_jump *)action->conf)->group)
1727                 return rte_flow_error_set(error, EINVAL,
1728                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1729                                           "target group must be higher then"
1730                                           " the current flow group");
1731         return 0;
1732 }
1733
1734 /*
1735  * Validate the port_id action.
1736  *
1737  * @param[in] dev
1738  *   Pointer to rte_eth_dev structure.
1739  * @param[in] action_flags
1740  *   Bit-fields that holds the actions detected until now.
1741  * @param[in] action
1742  *   Port_id RTE action structure.
1743  * @param[in] attr
1744  *   Attributes of flow that includes this action.
1745  * @param[out] error
1746  *   Pointer to error structure.
1747  *
1748  * @return
1749  *   0 on success, a negative errno value otherwise and rte_errno is set.
1750  */
1751 static int
1752 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
1753                                 uint64_t action_flags,
1754                                 const struct rte_flow_action *action,
1755                                 const struct rte_flow_attr *attr,
1756                                 struct rte_flow_error *error)
1757 {
1758         const struct rte_flow_action_port_id *port_id;
1759         uint16_t port;
1760         uint16_t esw_domain_id;
1761         uint16_t act_port_domain_id;
1762         int ret;
1763
1764         if (!attr->transfer)
1765                 return rte_flow_error_set(error, ENOTSUP,
1766                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1767                                           NULL,
1768                                           "port id action is valid in transfer"
1769                                           " mode only");
1770         if (!action || !action->conf)
1771                 return rte_flow_error_set(error, ENOTSUP,
1772                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1773                                           NULL,
1774                                           "port id action parameters must be"
1775                                           " specified");
1776         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
1777                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
1778                 return rte_flow_error_set(error, EINVAL,
1779                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1780                                           "can have only one fate actions in"
1781                                           " a flow");
1782         ret = mlx5_port_to_eswitch_info(dev->data->port_id,
1783                                         &esw_domain_id, NULL);
1784         if (ret < 0)
1785                 return rte_flow_error_set(error, -ret,
1786                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1787                                           NULL,
1788                                           "failed to obtain E-Switch info");
1789         port_id = action->conf;
1790         port = port_id->original ? dev->data->port_id : port_id->id;
1791         ret = mlx5_port_to_eswitch_info(port, &act_port_domain_id, NULL);
1792         if (ret)
1793                 return rte_flow_error_set
1794                                 (error, -ret,
1795                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
1796                                  "failed to obtain E-Switch port id for port");
1797         if (act_port_domain_id != esw_domain_id)
1798                 return rte_flow_error_set
1799                                 (error, -ret,
1800                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1801                                  "port does not belong to"
1802                                  " E-Switch being configured");
1803         return 0;
1804 }
1805
1806 /**
1807  * Find existing modify-header resource or create and register a new one.
1808  *
1809  * @param dev[in, out]
1810  *   Pointer to rte_eth_dev structure.
1811  * @param[in, out] resource
1812  *   Pointer to modify-header resource.
1813  * @parm[in, out] dev_flow
1814  *   Pointer to the dev_flow.
1815  * @param[out] error
1816  *   pointer to error structure.
1817  *
1818  * @return
1819  *   0 on success otherwise -errno and errno is set.
1820  */
1821 static int
1822 flow_dv_modify_hdr_resource_register
1823                         (struct rte_eth_dev *dev,
1824                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1825                          struct mlx5_flow *dev_flow,
1826                          struct rte_flow_error *error)
1827 {
1828         struct mlx5_priv *priv = dev->data->dev_private;
1829         struct mlx5_ibv_shared *sh = priv->sh;
1830         struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
1831         struct mlx5dv_dr_domain *ns;
1832
1833         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
1834                 ns = sh->fdb_domain;
1835         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
1836                 ns = sh->tx_domain;
1837         else
1838                 ns = sh->rx_domain;
1839         resource->flags =
1840                 dev_flow->flow->group ? 0 : MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
1841         /* Lookup a matching resource from cache. */
1842         LIST_FOREACH(cache_resource, &sh->modify_cmds, next) {
1843                 if (resource->ft_type == cache_resource->ft_type &&
1844                     resource->actions_num == cache_resource->actions_num &&
1845                     resource->flags == cache_resource->flags &&
1846                     !memcmp((const void *)resource->actions,
1847                             (const void *)cache_resource->actions,
1848                             (resource->actions_num *
1849                                             sizeof(resource->actions[0])))) {
1850                         DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
1851                                 (void *)cache_resource,
1852                                 rte_atomic32_read(&cache_resource->refcnt));
1853                         rte_atomic32_inc(&cache_resource->refcnt);
1854                         dev_flow->dv.modify_hdr = cache_resource;
1855                         return 0;
1856                 }
1857         }
1858         /* Register new modify-header resource. */
1859         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1860         if (!cache_resource)
1861                 return rte_flow_error_set(error, ENOMEM,
1862                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1863                                           "cannot allocate resource memory");
1864         *cache_resource = *resource;
1865         cache_resource->verbs_action =
1866                 mlx5_glue->dv_create_flow_action_modify_header
1867                                         (sh->ctx, cache_resource->ft_type,
1868                                          ns, cache_resource->flags,
1869                                          cache_resource->actions_num *
1870                                          sizeof(cache_resource->actions[0]),
1871                                          (uint64_t *)cache_resource->actions);
1872         if (!cache_resource->verbs_action) {
1873                 rte_free(cache_resource);
1874                 return rte_flow_error_set(error, ENOMEM,
1875                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1876                                           NULL, "cannot create action");
1877         }
1878         rte_atomic32_init(&cache_resource->refcnt);
1879         rte_atomic32_inc(&cache_resource->refcnt);
1880         LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
1881         dev_flow->dv.modify_hdr = cache_resource;
1882         DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
1883                 (void *)cache_resource,
1884                 rte_atomic32_read(&cache_resource->refcnt));
1885         return 0;
1886 }
1887
1888 /**
1889  * Get or create a flow counter.
1890  *
1891  * @param[in] dev
1892  *   Pointer to the Ethernet device structure.
1893  * @param[in] shared
1894  *   Indicate if this counter is shared with other flows.
1895  * @param[in] id
1896  *   Counter identifier.
1897  *
1898  * @return
1899  *   pointer to flow counter on success, NULL otherwise and rte_errno is set.
1900  */
1901 static struct mlx5_flow_counter *
1902 flow_dv_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
1903 {
1904         struct mlx5_priv *priv = dev->data->dev_private;
1905         struct mlx5_flow_counter *cnt = NULL;
1906         struct mlx5_devx_counter_set *dcs = NULL;
1907         int ret;
1908
1909         if (!priv->config.devx) {
1910                 ret = -ENOTSUP;
1911                 goto error_exit;
1912         }
1913         if (shared) {
1914                 LIST_FOREACH(cnt, &priv->flow_counters, next) {
1915                         if (cnt->shared && cnt->id == id) {
1916                                 cnt->ref_cnt++;
1917                                 return cnt;
1918                         }
1919                 }
1920         }
1921         cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
1922         dcs = rte_calloc(__func__, 1, sizeof(*dcs), 0);
1923         if (!dcs || !cnt) {
1924                 ret = -ENOMEM;
1925                 goto error_exit;
1926         }
1927         ret = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, dcs);
1928         if (ret)
1929                 goto error_exit;
1930         struct mlx5_flow_counter tmpl = {
1931                 .shared = shared,
1932                 .ref_cnt = 1,
1933                 .id = id,
1934                 .dcs = dcs,
1935         };
1936         tmpl.action = mlx5_glue->dv_create_flow_action_counter(dcs->obj, 0);
1937         if (!tmpl.action) {
1938                 ret = errno;
1939                 goto error_exit;
1940         }
1941         *cnt = tmpl;
1942         LIST_INSERT_HEAD(&priv->flow_counters, cnt, next);
1943         return cnt;
1944 error_exit:
1945         rte_free(cnt);
1946         rte_free(dcs);
1947         rte_errno = -ret;
1948         return NULL;
1949 }
1950
1951 /**
1952  * Release a flow counter.
1953  *
1954  * @param[in] counter
1955  *   Pointer to the counter handler.
1956  */
1957 static void
1958 flow_dv_counter_release(struct mlx5_flow_counter *counter)
1959 {
1960         int ret;
1961
1962         if (!counter)
1963                 return;
1964         if (--counter->ref_cnt == 0) {
1965                 ret = mlx5_devx_cmd_flow_counter_free(counter->dcs->obj);
1966                 if (ret)
1967                         DRV_LOG(ERR, "Failed to free devx counters, %d", ret);
1968                 LIST_REMOVE(counter, next);
1969                 rte_free(counter->dcs);
1970                 rte_free(counter);
1971         }
1972 }
1973
1974 /**
1975  * Verify the @p attributes will be correctly understood by the NIC and store
1976  * them in the @p flow if everything is correct.
1977  *
1978  * @param[in] dev
1979  *   Pointer to dev struct.
1980  * @param[in] attributes
1981  *   Pointer to flow attributes
1982  * @param[out] error
1983  *   Pointer to error structure.
1984  *
1985  * @return
1986  *   0 on success, a negative errno value otherwise and rte_errno is set.
1987  */
1988 static int
1989 flow_dv_validate_attributes(struct rte_eth_dev *dev,
1990                             const struct rte_flow_attr *attributes,
1991                             struct rte_flow_error *error)
1992 {
1993         struct mlx5_priv *priv = dev->data->dev_private;
1994         uint32_t priority_max = priv->config.flow_prio - 1;
1995
1996 #ifndef HAVE_MLX5DV_DR
1997         if (attributes->group)
1998                 return rte_flow_error_set(error, ENOTSUP,
1999                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
2000                                           NULL,
2001                                           "groups is not supported");
2002 #endif
2003         if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
2004             attributes->priority >= priority_max)
2005                 return rte_flow_error_set(error, ENOTSUP,
2006                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
2007                                           NULL,
2008                                           "priority out of range");
2009         if (attributes->transfer) {
2010                 if (!priv->config.dv_esw_en)
2011                         return rte_flow_error_set
2012                                 (error, ENOTSUP,
2013                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2014                                  "E-Switch dr is not supported");
2015                 if (!(priv->representor || priv->master))
2016                         return rte_flow_error_set
2017                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2018                                  NULL, "E-Switch configurationd can only be"
2019                                  " done by a master or a representor device");
2020                 if (attributes->egress)
2021                         return rte_flow_error_set
2022                                 (error, ENOTSUP,
2023                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
2024                                  "egress is not supported");
2025                 if (attributes->group >= MLX5_MAX_TABLES_FDB)
2026                         return rte_flow_error_set
2027                                 (error, EINVAL,
2028                                  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
2029                                  NULL, "group must be smaller than "
2030                                  RTE_STR(MLX5_MAX_FDB_TABLES));
2031         }
2032         if (!(attributes->egress ^ attributes->ingress))
2033                 return rte_flow_error_set(error, ENOTSUP,
2034                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
2035                                           "must specify exactly one of "
2036                                           "ingress or egress");
2037         return 0;
2038 }
2039
2040 /**
2041  * Internal validation function. For validating both actions and items.
2042  *
2043  * @param[in] dev
2044  *   Pointer to the rte_eth_dev structure.
2045  * @param[in] attr
2046  *   Pointer to the flow attributes.
2047  * @param[in] items
2048  *   Pointer to the list of items.
2049  * @param[in] actions
2050  *   Pointer to the list of actions.
2051  * @param[out] error
2052  *   Pointer to the error structure.
2053  *
2054  * @return
2055  *   0 on success, a negative errno value otherwise and rte_errno is set.
2056  */
2057 static int
2058 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
2059                  const struct rte_flow_item items[],
2060                  const struct rte_flow_action actions[],
2061                  struct rte_flow_error *error)
2062 {
2063         int ret;
2064         uint64_t action_flags = 0;
2065         uint64_t item_flags = 0;
2066         uint64_t last_item = 0;
2067         uint8_t next_protocol = 0xff;
2068         int actions_n = 0;
2069
2070         if (items == NULL)
2071                 return -1;
2072         ret = flow_dv_validate_attributes(dev, attr, error);
2073         if (ret < 0)
2074                 return ret;
2075         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2076                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2077                 switch (items->type) {
2078                 case RTE_FLOW_ITEM_TYPE_VOID:
2079                         break;
2080                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
2081                         ret = flow_dv_validate_item_port_id
2082                                         (dev, items, attr, item_flags, error);
2083                         if (ret < 0)
2084                                 return ret;
2085                         last_item |= MLX5_FLOW_ITEM_PORT_ID;
2086                         break;
2087                 case RTE_FLOW_ITEM_TYPE_ETH:
2088                         ret = mlx5_flow_validate_item_eth(items, item_flags,
2089                                                           error);
2090                         if (ret < 0)
2091                                 return ret;
2092                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
2093                                              MLX5_FLOW_LAYER_OUTER_L2;
2094                         break;
2095                 case RTE_FLOW_ITEM_TYPE_VLAN:
2096                         ret = mlx5_flow_validate_item_vlan(items, item_flags,
2097                                                            error);
2098                         if (ret < 0)
2099                                 return ret;
2100                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
2101                                              MLX5_FLOW_LAYER_OUTER_VLAN;
2102                         break;
2103                 case RTE_FLOW_ITEM_TYPE_IPV4:
2104                         ret = mlx5_flow_validate_item_ipv4(items, item_flags,
2105                                                            NULL, error);
2106                         if (ret < 0)
2107                                 return ret;
2108                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
2109                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
2110                         if (items->mask != NULL &&
2111                             ((const struct rte_flow_item_ipv4 *)
2112                              items->mask)->hdr.next_proto_id) {
2113                                 next_protocol =
2114                                         ((const struct rte_flow_item_ipv4 *)
2115                                          (items->spec))->hdr.next_proto_id;
2116                                 next_protocol &=
2117                                         ((const struct rte_flow_item_ipv4 *)
2118                                          (items->mask))->hdr.next_proto_id;
2119                         } else {
2120                                 /* Reset for inner layer. */
2121                                 next_protocol = 0xff;
2122                         }
2123                         break;
2124                 case RTE_FLOW_ITEM_TYPE_IPV6:
2125                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
2126                                                            NULL, error);
2127                         if (ret < 0)
2128                                 return ret;
2129                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
2130                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
2131                         if (items->mask != NULL &&
2132                             ((const struct rte_flow_item_ipv6 *)
2133                              items->mask)->hdr.proto) {
2134                                 next_protocol =
2135                                         ((const struct rte_flow_item_ipv6 *)
2136                                          items->spec)->hdr.proto;
2137                                 next_protocol &=
2138                                         ((const struct rte_flow_item_ipv6 *)
2139                                          items->mask)->hdr.proto;
2140                         } else {
2141                                 /* Reset for inner layer. */
2142                                 next_protocol = 0xff;
2143                         }
2144                         break;
2145                 case RTE_FLOW_ITEM_TYPE_TCP:
2146                         ret = mlx5_flow_validate_item_tcp
2147                                                 (items, item_flags,
2148                                                  next_protocol,
2149                                                  &rte_flow_item_tcp_mask,
2150                                                  error);
2151                         if (ret < 0)
2152                                 return ret;
2153                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
2154                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
2155                         break;
2156                 case RTE_FLOW_ITEM_TYPE_UDP:
2157                         ret = mlx5_flow_validate_item_udp(items, item_flags,
2158                                                           next_protocol,
2159                                                           error);
2160                         if (ret < 0)
2161                                 return ret;
2162                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
2163                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
2164                         break;
2165                 case RTE_FLOW_ITEM_TYPE_GRE:
2166                 case RTE_FLOW_ITEM_TYPE_NVGRE:
2167                         ret = mlx5_flow_validate_item_gre(items, item_flags,
2168                                                           next_protocol, error);
2169                         if (ret < 0)
2170                                 return ret;
2171                         last_item = MLX5_FLOW_LAYER_GRE;
2172                         break;
2173                 case RTE_FLOW_ITEM_TYPE_VXLAN:
2174                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
2175                                                             error);
2176                         if (ret < 0)
2177                                 return ret;
2178                         last_item = MLX5_FLOW_LAYER_VXLAN;
2179                         break;
2180                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
2181                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
2182                                                                 item_flags, dev,
2183                                                                 error);
2184                         if (ret < 0)
2185                                 return ret;
2186                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
2187                         break;
2188                 case RTE_FLOW_ITEM_TYPE_MPLS:
2189                         ret = mlx5_flow_validate_item_mpls(dev, items,
2190                                                            item_flags,
2191                                                            last_item, error);
2192                         if (ret < 0)
2193                                 return ret;
2194                         last_item = MLX5_FLOW_LAYER_MPLS;
2195                         break;
2196                 case RTE_FLOW_ITEM_TYPE_META:
2197                         ret = flow_dv_validate_item_meta(dev, items, attr,
2198                                                          error);
2199                         if (ret < 0)
2200                                 return ret;
2201                         last_item = MLX5_FLOW_ITEM_METADATA;
2202                         break;
2203                 default:
2204                         return rte_flow_error_set(error, ENOTSUP,
2205                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2206                                                   NULL, "item not supported");
2207                 }
2208                 item_flags |= last_item;
2209         }
2210         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2211                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
2212                         return rte_flow_error_set(error, ENOTSUP,
2213                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2214                                                   actions, "too many actions");
2215                 switch (actions->type) {
2216                 case RTE_FLOW_ACTION_TYPE_VOID:
2217                         break;
2218                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
2219                         ret = flow_dv_validate_action_port_id(dev,
2220                                                               action_flags,
2221                                                               actions,
2222                                                               attr,
2223                                                               error);
2224                         if (ret)
2225                                 return ret;
2226                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
2227                         ++actions_n;
2228                         break;
2229                 case RTE_FLOW_ACTION_TYPE_FLAG:
2230                         ret = mlx5_flow_validate_action_flag(action_flags,
2231                                                              attr, error);
2232                         if (ret < 0)
2233                                 return ret;
2234                         action_flags |= MLX5_FLOW_ACTION_FLAG;
2235                         ++actions_n;
2236                         break;
2237                 case RTE_FLOW_ACTION_TYPE_MARK:
2238                         ret = mlx5_flow_validate_action_mark(actions,
2239                                                              action_flags,
2240                                                              attr, error);
2241                         if (ret < 0)
2242                                 return ret;
2243                         action_flags |= MLX5_FLOW_ACTION_MARK;
2244                         ++actions_n;
2245                         break;
2246                 case RTE_FLOW_ACTION_TYPE_DROP:
2247                         ret = mlx5_flow_validate_action_drop(action_flags,
2248                                                              attr, error);
2249                         if (ret < 0)
2250                                 return ret;
2251                         action_flags |= MLX5_FLOW_ACTION_DROP;
2252                         ++actions_n;
2253                         break;
2254                 case RTE_FLOW_ACTION_TYPE_QUEUE:
2255                         ret = mlx5_flow_validate_action_queue(actions,
2256                                                               action_flags, dev,
2257                                                               attr, error);
2258                         if (ret < 0)
2259                                 return ret;
2260                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
2261                         ++actions_n;
2262                         break;
2263                 case RTE_FLOW_ACTION_TYPE_RSS:
2264                         ret = mlx5_flow_validate_action_rss(actions,
2265                                                             action_flags, dev,
2266                                                             attr, item_flags,
2267                                                             error);
2268                         if (ret < 0)
2269                                 return ret;
2270                         action_flags |= MLX5_FLOW_ACTION_RSS;
2271                         ++actions_n;
2272                         break;
2273                 case RTE_FLOW_ACTION_TYPE_COUNT:
2274                         ret = flow_dv_validate_action_count(dev, error);
2275                         if (ret < 0)
2276                                 return ret;
2277                         action_flags |= MLX5_FLOW_ACTION_COUNT;
2278                         ++actions_n;
2279                         break;
2280                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
2281                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
2282                         ret = flow_dv_validate_action_l2_encap(action_flags,
2283                                                                actions, attr,
2284                                                                error);
2285                         if (ret < 0)
2286                                 return ret;
2287                         action_flags |= actions->type ==
2288                                         RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
2289                                         MLX5_FLOW_ACTION_VXLAN_ENCAP :
2290                                         MLX5_FLOW_ACTION_NVGRE_ENCAP;
2291                         ++actions_n;
2292                         break;
2293                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
2294                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
2295                         ret = flow_dv_validate_action_l2_decap(action_flags,
2296                                                                attr, error);
2297                         if (ret < 0)
2298                                 return ret;
2299                         action_flags |= actions->type ==
2300                                         RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
2301                                         MLX5_FLOW_ACTION_VXLAN_DECAP :
2302                                         MLX5_FLOW_ACTION_NVGRE_DECAP;
2303                         ++actions_n;
2304                         break;
2305                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
2306                         ret = flow_dv_validate_action_raw_encap(action_flags,
2307                                                                 actions, attr,
2308                                                                 error);
2309                         if (ret < 0)
2310                                 return ret;
2311                         action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
2312                         ++actions_n;
2313                         break;
2314                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
2315                         ret = flow_dv_validate_action_raw_decap(action_flags,
2316                                                                 actions, attr,
2317                                                                 error);
2318                         if (ret < 0)
2319                                 return ret;
2320                         action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
2321                         ++actions_n;
2322                         break;
2323                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
2324                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
2325                         ret = flow_dv_validate_action_modify_mac(action_flags,
2326                                                                  actions,
2327                                                                  item_flags,
2328                                                                  error);
2329                         if (ret < 0)
2330                                 return ret;
2331                         /* Count all modify-header actions as one action. */
2332                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2333                                 ++actions_n;
2334                         action_flags |= actions->type ==
2335                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
2336                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
2337                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
2338                         break;
2339
2340                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
2341                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
2342                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
2343                                                                   actions,
2344                                                                   item_flags,
2345                                                                   error);
2346                         if (ret < 0)
2347                                 return ret;
2348                         /* Count all modify-header actions as one action. */
2349                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2350                                 ++actions_n;
2351                         action_flags |= actions->type ==
2352                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
2353                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
2354                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
2355                         break;
2356                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
2357                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
2358                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
2359                                                                   actions,
2360                                                                   item_flags,
2361                                                                   error);
2362                         if (ret < 0)
2363                                 return ret;
2364                         /* Count all modify-header actions as one action. */
2365                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2366                                 ++actions_n;
2367                         action_flags |= actions->type ==
2368                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
2369                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
2370                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
2371                         break;
2372                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
2373                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
2374                         ret = flow_dv_validate_action_modify_tp(action_flags,
2375                                                                 actions,
2376                                                                 item_flags,
2377                                                                 error);
2378                         if (ret < 0)
2379                                 return ret;
2380                         /* Count all modify-header actions as one action. */
2381                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2382                                 ++actions_n;
2383                         action_flags |= actions->type ==
2384                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
2385                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
2386                                                 MLX5_FLOW_ACTION_SET_TP_DST;
2387                         break;
2388                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
2389                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
2390                         ret = flow_dv_validate_action_modify_ttl(action_flags,
2391                                                                  actions,
2392                                                                  item_flags,
2393                                                                  error);
2394                         if (ret < 0)
2395                                 return ret;
2396                         /* Count all modify-header actions as one action. */
2397                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2398                                 ++actions_n;
2399                         action_flags |= actions->type ==
2400                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
2401                                                 MLX5_FLOW_ACTION_SET_TTL :
2402                                                 MLX5_FLOW_ACTION_DEC_TTL;
2403                         break;
2404                 case RTE_FLOW_ACTION_TYPE_JUMP:
2405                         ret = flow_dv_validate_action_jump(actions,
2406                                                            attr->group, error);
2407                         if (ret)
2408                                 return ret;
2409                         ++actions_n;
2410                         action_flags |= MLX5_FLOW_ACTION_JUMP;
2411                         break;
2412                 default:
2413                         return rte_flow_error_set(error, ENOTSUP,
2414                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2415                                                   actions,
2416                                                   "action not supported");
2417                 }
2418         }
2419         /* Eswitch has few restrictions on using items and actions */
2420         if (attr->transfer) {
2421                 if (action_flags & MLX5_FLOW_ACTION_FLAG)
2422                         return rte_flow_error_set(error, ENOTSUP,
2423                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2424                                                   NULL,
2425                                                   "unsupported action FLAG");
2426                 if (action_flags & MLX5_FLOW_ACTION_MARK)
2427                         return rte_flow_error_set(error, ENOTSUP,
2428                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2429                                                   NULL,
2430                                                   "unsupported action MARK");
2431                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
2432                         return rte_flow_error_set(error, ENOTSUP,
2433                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2434                                                   NULL,
2435                                                   "unsupported action QUEUE");
2436                 if (action_flags & MLX5_FLOW_ACTION_RSS)
2437                         return rte_flow_error_set(error, ENOTSUP,
2438                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2439                                                   NULL,
2440                                                   "unsupported action RSS");
2441                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
2442                         return rte_flow_error_set(error, EINVAL,
2443                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2444                                                   actions,
2445                                                   "no fate action is found");
2446         } else {
2447                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
2448                         return rte_flow_error_set(error, EINVAL,
2449                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2450                                                   actions,
2451                                                   "no fate action is found");
2452         }
2453         return 0;
2454 }
2455
2456 /**
2457  * Internal preparation function. Allocates the DV flow size,
2458  * this size is constant.
2459  *
2460  * @param[in] attr
2461  *   Pointer to the flow attributes.
2462  * @param[in] items
2463  *   Pointer to the list of items.
2464  * @param[in] actions
2465  *   Pointer to the list of actions.
2466  * @param[out] error
2467  *   Pointer to the error structure.
2468  *
2469  * @return
2470  *   Pointer to mlx5_flow object on success,
2471  *   otherwise NULL and rte_errno is set.
2472  */
2473 static struct mlx5_flow *
2474 flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
2475                 const struct rte_flow_item items[] __rte_unused,
2476                 const struct rte_flow_action actions[] __rte_unused,
2477                 struct rte_flow_error *error)
2478 {
2479         uint32_t size = sizeof(struct mlx5_flow);
2480         struct mlx5_flow *flow;
2481
2482         flow = rte_calloc(__func__, 1, size, 0);
2483         if (!flow) {
2484                 rte_flow_error_set(error, ENOMEM,
2485                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2486                                    "not enough memory to create flow");
2487                 return NULL;
2488         }
2489         flow->dv.value.size = MLX5_ST_SZ_DB(fte_match_param);
2490         return flow;
2491 }
2492
2493 #ifndef NDEBUG
2494 /**
2495  * Sanity check for match mask and value. Similar to check_valid_spec() in
2496  * kernel driver. If unmasked bit is present in value, it returns failure.
2497  *
2498  * @param match_mask
2499  *   pointer to match mask buffer.
2500  * @param match_value
2501  *   pointer to match value buffer.
2502  *
2503  * @return
2504  *   0 if valid, -EINVAL otherwise.
2505  */
2506 static int
2507 flow_dv_check_valid_spec(void *match_mask, void *match_value)
2508 {
2509         uint8_t *m = match_mask;
2510         uint8_t *v = match_value;
2511         unsigned int i;
2512
2513         for (i = 0; i < MLX5_ST_SZ_DB(fte_match_param); ++i) {
2514                 if (v[i] & ~m[i]) {
2515                         DRV_LOG(ERR,
2516                                 "match_value differs from match_criteria"
2517                                 " %p[%u] != %p[%u]",
2518                                 match_value, i, match_mask, i);
2519                         return -EINVAL;
2520                 }
2521         }
2522         return 0;
2523 }
2524 #endif
2525
2526 /**
2527  * Add Ethernet item to matcher and to the value.
2528  *
2529  * @param[in, out] matcher
2530  *   Flow matcher.
2531  * @param[in, out] key
2532  *   Flow matcher value.
2533  * @param[in] item
2534  *   Flow pattern to translate.
2535  * @param[in] inner
2536  *   Item is inner pattern.
2537  */
2538 static void
2539 flow_dv_translate_item_eth(void *matcher, void *key,
2540                            const struct rte_flow_item *item, int inner)
2541 {
2542         const struct rte_flow_item_eth *eth_m = item->mask;
2543         const struct rte_flow_item_eth *eth_v = item->spec;
2544         const struct rte_flow_item_eth nic_mask = {
2545                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
2546                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
2547                 .type = RTE_BE16(0xffff),
2548         };
2549         void *headers_m;
2550         void *headers_v;
2551         char *l24_v;
2552         unsigned int i;
2553
2554         if (!eth_v)
2555                 return;
2556         if (!eth_m)
2557                 eth_m = &nic_mask;
2558         if (inner) {
2559                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2560                                          inner_headers);
2561                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2562         } else {
2563                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2564                                          outer_headers);
2565                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2566         }
2567         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
2568                &eth_m->dst, sizeof(eth_m->dst));
2569         /* The value must be in the range of the mask. */
2570         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
2571         for (i = 0; i < sizeof(eth_m->dst); ++i)
2572                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
2573         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
2574                &eth_m->src, sizeof(eth_m->src));
2575         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
2576         /* The value must be in the range of the mask. */
2577         for (i = 0; i < sizeof(eth_m->dst); ++i)
2578                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
2579         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
2580                  rte_be_to_cpu_16(eth_m->type));
2581         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
2582         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
2583 }
2584
2585 /**
2586  * Add VLAN item to matcher and to the value.
2587  *
2588  * @param[in, out] matcher
2589  *   Flow matcher.
2590  * @param[in, out] key
2591  *   Flow matcher value.
2592  * @param[in] item
2593  *   Flow pattern to translate.
2594  * @param[in] inner
2595  *   Item is inner pattern.
2596  */
2597 static void
2598 flow_dv_translate_item_vlan(void *matcher, void *key,
2599                             const struct rte_flow_item *item,
2600                             int inner)
2601 {
2602         const struct rte_flow_item_vlan *vlan_m = item->mask;
2603         const struct rte_flow_item_vlan *vlan_v = item->spec;
2604         const struct rte_flow_item_vlan nic_mask = {
2605                 .tci = RTE_BE16(0x0fff),
2606                 .inner_type = RTE_BE16(0xffff),
2607         };
2608         void *headers_m;
2609         void *headers_v;
2610         uint16_t tci_m;
2611         uint16_t tci_v;
2612
2613         if (!vlan_v)
2614                 return;
2615         if (!vlan_m)
2616                 vlan_m = &nic_mask;
2617         if (inner) {
2618                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2619                                          inner_headers);
2620                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2621         } else {
2622                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2623                                          outer_headers);
2624                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2625         }
2626         tci_m = rte_be_to_cpu_16(vlan_m->tci);
2627         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
2628         MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
2629         MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
2630         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
2631         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
2632         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
2633         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
2634         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
2635         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
2636 }
2637
2638 /**
2639  * Add IPV4 item to matcher and to the value.
2640  *
2641  * @param[in, out] matcher
2642  *   Flow matcher.
2643  * @param[in, out] key
2644  *   Flow matcher value.
2645  * @param[in] item
2646  *   Flow pattern to translate.
2647  * @param[in] inner
2648  *   Item is inner pattern.
2649  * @param[in] group
2650  *   The group to insert the rule.
2651  */
2652 static void
2653 flow_dv_translate_item_ipv4(void *matcher, void *key,
2654                             const struct rte_flow_item *item,
2655                             int inner, uint32_t group)
2656 {
2657         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
2658         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
2659         const struct rte_flow_item_ipv4 nic_mask = {
2660                 .hdr = {
2661                         .src_addr = RTE_BE32(0xffffffff),
2662                         .dst_addr = RTE_BE32(0xffffffff),
2663                         .type_of_service = 0xff,
2664                         .next_proto_id = 0xff,
2665                 },
2666         };
2667         void *headers_m;
2668         void *headers_v;
2669         char *l24_m;
2670         char *l24_v;
2671         uint8_t tos;
2672
2673         if (inner) {
2674                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2675                                          inner_headers);
2676                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2677         } else {
2678                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2679                                          outer_headers);
2680                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2681         }
2682         if (group == 0)
2683                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
2684         else
2685                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x4);
2686         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
2687         if (!ipv4_v)
2688                 return;
2689         if (!ipv4_m)
2690                 ipv4_m = &nic_mask;
2691         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2692                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
2693         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2694                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
2695         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
2696         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
2697         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2698                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
2699         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2700                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
2701         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
2702         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
2703         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
2704         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
2705                  ipv4_m->hdr.type_of_service);
2706         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
2707         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
2708                  ipv4_m->hdr.type_of_service >> 2);
2709         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
2710         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
2711                  ipv4_m->hdr.next_proto_id);
2712         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2713                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
2714 }
2715
2716 /**
2717  * Add IPV6 item to matcher and to the value.
2718  *
2719  * @param[in, out] matcher
2720  *   Flow matcher.
2721  * @param[in, out] key
2722  *   Flow matcher value.
2723  * @param[in] item
2724  *   Flow pattern to translate.
2725  * @param[in] inner
2726  *   Item is inner pattern.
2727  * @param[in] group
2728  *   The group to insert the rule.
2729  */
2730 static void
2731 flow_dv_translate_item_ipv6(void *matcher, void *key,
2732                             const struct rte_flow_item *item,
2733                             int inner, uint32_t group)
2734 {
2735         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
2736         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
2737         const struct rte_flow_item_ipv6 nic_mask = {
2738                 .hdr = {
2739                         .src_addr =
2740                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
2741                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
2742                         .dst_addr =
2743                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
2744                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
2745                         .vtc_flow = RTE_BE32(0xffffffff),
2746                         .proto = 0xff,
2747                         .hop_limits = 0xff,
2748                 },
2749         };
2750         void *headers_m;
2751         void *headers_v;
2752         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2753         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2754         char *l24_m;
2755         char *l24_v;
2756         uint32_t vtc_m;
2757         uint32_t vtc_v;
2758         int i;
2759         int size;
2760
2761         if (inner) {
2762                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2763                                          inner_headers);
2764                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2765         } else {
2766                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2767                                          outer_headers);
2768                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2769         }
2770         if (group == 0)
2771                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
2772         else
2773                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x6);
2774         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
2775         if (!ipv6_v)
2776                 return;
2777         if (!ipv6_m)
2778                 ipv6_m = &nic_mask;
2779         size = sizeof(ipv6_m->hdr.dst_addr);
2780         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2781                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
2782         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2783                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
2784         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
2785         for (i = 0; i < size; ++i)
2786                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
2787         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2788                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
2789         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2790                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
2791         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
2792         for (i = 0; i < size; ++i)
2793                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
2794         /* TOS. */
2795         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
2796         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
2797         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
2798         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
2799         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
2800         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
2801         /* Label. */
2802         if (inner) {
2803                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
2804                          vtc_m);
2805                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
2806                          vtc_v);
2807         } else {
2808                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
2809                          vtc_m);
2810                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
2811                          vtc_v);
2812         }
2813         /* Protocol. */
2814         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
2815                  ipv6_m->hdr.proto);
2816         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2817                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
2818 }
2819
2820 /**
2821  * Add TCP item to matcher and to the value.
2822  *
2823  * @param[in, out] matcher
2824  *   Flow matcher.
2825  * @param[in, out] key
2826  *   Flow matcher value.
2827  * @param[in] item
2828  *   Flow pattern to translate.
2829  * @param[in] inner
2830  *   Item is inner pattern.
2831  */
2832 static void
2833 flow_dv_translate_item_tcp(void *matcher, void *key,
2834                            const struct rte_flow_item *item,
2835                            int inner)
2836 {
2837         const struct rte_flow_item_tcp *tcp_m = item->mask;
2838         const struct rte_flow_item_tcp *tcp_v = item->spec;
2839         void *headers_m;
2840         void *headers_v;
2841
2842         if (inner) {
2843                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2844                                          inner_headers);
2845                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2846         } else {
2847                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2848                                          outer_headers);
2849                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2850         }
2851         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2852         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
2853         if (!tcp_v)
2854                 return;
2855         if (!tcp_m)
2856                 tcp_m = &rte_flow_item_tcp_mask;
2857         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
2858                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
2859         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
2860                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
2861         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
2862                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
2863         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
2864                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
2865 }
2866
2867 /**
2868  * Add UDP item to matcher and to the value.
2869  *
2870  * @param[in, out] matcher
2871  *   Flow matcher.
2872  * @param[in, out] key
2873  *   Flow matcher value.
2874  * @param[in] item
2875  *   Flow pattern to translate.
2876  * @param[in] inner
2877  *   Item is inner pattern.
2878  */
2879 static void
2880 flow_dv_translate_item_udp(void *matcher, void *key,
2881                            const struct rte_flow_item *item,
2882                            int inner)
2883 {
2884         const struct rte_flow_item_udp *udp_m = item->mask;
2885         const struct rte_flow_item_udp *udp_v = item->spec;
2886         void *headers_m;
2887         void *headers_v;
2888
2889         if (inner) {
2890                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2891                                          inner_headers);
2892                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2893         } else {
2894                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2895                                          outer_headers);
2896                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2897         }
2898         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2899         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
2900         if (!udp_v)
2901                 return;
2902         if (!udp_m)
2903                 udp_m = &rte_flow_item_udp_mask;
2904         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
2905                  rte_be_to_cpu_16(udp_m->hdr.src_port));
2906         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
2907                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
2908         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
2909                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
2910         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
2911                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
2912 }
2913
2914 /**
2915  * Add GRE item to matcher and to the value.
2916  *
2917  * @param[in, out] matcher
2918  *   Flow matcher.
2919  * @param[in, out] key
2920  *   Flow matcher value.
2921  * @param[in] item
2922  *   Flow pattern to translate.
2923  * @param[in] inner
2924  *   Item is inner pattern.
2925  */
2926 static void
2927 flow_dv_translate_item_gre(void *matcher, void *key,
2928                            const struct rte_flow_item *item,
2929                            int inner)
2930 {
2931         const struct rte_flow_item_gre *gre_m = item->mask;
2932         const struct rte_flow_item_gre *gre_v = item->spec;
2933         void *headers_m;
2934         void *headers_v;
2935         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2936         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2937
2938         if (inner) {
2939                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2940                                          inner_headers);
2941                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2942         } else {
2943                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2944                                          outer_headers);
2945                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2946         }
2947         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2948         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
2949         if (!gre_v)
2950                 return;
2951         if (!gre_m)
2952                 gre_m = &rte_flow_item_gre_mask;
2953         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
2954                  rte_be_to_cpu_16(gre_m->protocol));
2955         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
2956                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
2957 }
2958
2959 /**
2960  * Add NVGRE item to matcher and to the value.
2961  *
2962  * @param[in, out] matcher
2963  *   Flow matcher.
2964  * @param[in, out] key
2965  *   Flow matcher value.
2966  * @param[in] item
2967  *   Flow pattern to translate.
2968  * @param[in] inner
2969  *   Item is inner pattern.
2970  */
2971 static void
2972 flow_dv_translate_item_nvgre(void *matcher, void *key,
2973                              const struct rte_flow_item *item,
2974                              int inner)
2975 {
2976         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
2977         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
2978         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2979         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2980         const char *tni_flow_id_m = (const char *)nvgre_m->tni;
2981         const char *tni_flow_id_v = (const char *)nvgre_v->tni;
2982         char *gre_key_m;
2983         char *gre_key_v;
2984         int size;
2985         int i;
2986
2987         flow_dv_translate_item_gre(matcher, key, item, inner);
2988         if (!nvgre_v)
2989                 return;
2990         if (!nvgre_m)
2991                 nvgre_m = &rte_flow_item_nvgre_mask;
2992         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
2993         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
2994         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
2995         memcpy(gre_key_m, tni_flow_id_m, size);
2996         for (i = 0; i < size; ++i)
2997                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
2998 }
2999
3000 /**
3001  * Add VXLAN item to matcher and to the value.
3002  *
3003  * @param[in, out] matcher
3004  *   Flow matcher.
3005  * @param[in, out] key
3006  *   Flow matcher value.
3007  * @param[in] item
3008  *   Flow pattern to translate.
3009  * @param[in] inner
3010  *   Item is inner pattern.
3011  */
3012 static void
3013 flow_dv_translate_item_vxlan(void *matcher, void *key,
3014                              const struct rte_flow_item *item,
3015                              int inner)
3016 {
3017         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
3018         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
3019         void *headers_m;
3020         void *headers_v;
3021         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3022         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3023         char *vni_m;
3024         char *vni_v;
3025         uint16_t dport;
3026         int size;
3027         int i;
3028
3029         if (inner) {
3030                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3031                                          inner_headers);
3032                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3033         } else {
3034                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3035                                          outer_headers);
3036                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3037         }
3038         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
3039                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
3040         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
3041                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
3042                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
3043         }
3044         if (!vxlan_v)
3045                 return;
3046         if (!vxlan_m)
3047                 vxlan_m = &rte_flow_item_vxlan_mask;
3048         size = sizeof(vxlan_m->vni);
3049         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
3050         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
3051         memcpy(vni_m, vxlan_m->vni, size);
3052         for (i = 0; i < size; ++i)
3053                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
3054 }
3055
3056 /**
3057  * Add MPLS item to matcher and to the value.
3058  *
3059  * @param[in, out] matcher
3060  *   Flow matcher.
3061  * @param[in, out] key
3062  *   Flow matcher value.
3063  * @param[in] item
3064  *   Flow pattern to translate.
3065  * @param[in] prev_layer
3066  *   The protocol layer indicated in previous item.
3067  * @param[in] inner
3068  *   Item is inner pattern.
3069  */
3070 static void
3071 flow_dv_translate_item_mpls(void *matcher, void *key,
3072                             const struct rte_flow_item *item,
3073                             uint64_t prev_layer,
3074                             int inner)
3075 {
3076         const uint32_t *in_mpls_m = item->mask;
3077         const uint32_t *in_mpls_v = item->spec;
3078         uint32_t *out_mpls_m = 0;
3079         uint32_t *out_mpls_v = 0;
3080         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3081         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3082         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
3083                                      misc_parameters_2);
3084         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
3085         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
3086         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3087
3088         switch (prev_layer) {
3089         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
3090                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
3091                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
3092                          MLX5_UDP_PORT_MPLS);
3093                 break;
3094         case MLX5_FLOW_LAYER_GRE:
3095                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
3096                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
3097                          RTE_ETHER_TYPE_MPLS);
3098                 break;
3099         default:
3100                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
3101                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
3102                          IPPROTO_MPLS);
3103                 break;
3104         }
3105         if (!in_mpls_v)
3106                 return;
3107         if (!in_mpls_m)
3108                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
3109         switch (prev_layer) {
3110         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
3111                 out_mpls_m =
3112                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
3113                                                  outer_first_mpls_over_udp);
3114                 out_mpls_v =
3115                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
3116                                                  outer_first_mpls_over_udp);
3117                 break;
3118         case MLX5_FLOW_LAYER_GRE:
3119                 out_mpls_m =
3120                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
3121                                                  outer_first_mpls_over_gre);
3122                 out_mpls_v =
3123                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
3124                                                  outer_first_mpls_over_gre);
3125                 break;
3126         default:
3127                 /* Inner MPLS not over GRE is not supported. */
3128                 if (!inner) {
3129                         out_mpls_m =
3130                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
3131                                                          misc2_m,
3132                                                          outer_first_mpls);
3133                         out_mpls_v =
3134                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
3135                                                          misc2_v,
3136                                                          outer_first_mpls);
3137                 }
3138                 break;
3139         }
3140         if (out_mpls_m && out_mpls_v) {
3141                 *out_mpls_m = *in_mpls_m;
3142                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
3143         }
3144 }
3145
3146 /**
3147  * Add META item to matcher
3148  *
3149  * @param[in, out] matcher
3150  *   Flow matcher.
3151  * @param[in, out] key
3152  *   Flow matcher value.
3153  * @param[in] item
3154  *   Flow pattern to translate.
3155  * @param[in] inner
3156  *   Item is inner pattern.
3157  */
3158 static void
3159 flow_dv_translate_item_meta(void *matcher, void *key,
3160                             const struct rte_flow_item *item)
3161 {
3162         const struct rte_flow_item_meta *meta_m;
3163         const struct rte_flow_item_meta *meta_v;
3164         void *misc2_m =
3165                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
3166         void *misc2_v =
3167                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
3168
3169         meta_m = (const void *)item->mask;
3170         if (!meta_m)
3171                 meta_m = &rte_flow_item_meta_mask;
3172         meta_v = (const void *)item->spec;
3173         if (meta_v) {
3174                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a,
3175                          rte_be_to_cpu_32(meta_m->data));
3176                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a,
3177                          rte_be_to_cpu_32(meta_v->data & meta_m->data));
3178         }
3179 }
3180
3181 /**
3182  * Add source vport match to the specified matcher.
3183  *
3184  * @param[in, out] matcher
3185  *   Flow matcher.
3186  * @param[in, out] key
3187  *   Flow matcher value.
3188  * @param[in] port
3189  *   Source vport value to match
3190  * @param[in] mask
3191  *   Mask
3192  */
3193 static void
3194 flow_dv_translate_item_source_vport(void *matcher, void *key,
3195                                     int16_t port, uint16_t mask)
3196 {
3197         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3198         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3199
3200         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
3201         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
3202 }
3203
3204 /**
3205  * Translate port-id item to eswitch match on  port-id.
3206  *
3207  * @param[in] dev
3208  *   The devich to configure through.
3209  * @param[in, out] matcher
3210  *   Flow matcher.
3211  * @param[in, out] key
3212  *   Flow matcher value.
3213  * @param[in] item
3214  *   Flow pattern to translate.
3215  *
3216  * @return
3217  *   0 on success, a negative errno value otherwise.
3218  */
3219 static int
3220 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
3221                                void *key, const struct rte_flow_item *item)
3222 {
3223         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
3224         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
3225         uint16_t mask, val, id;
3226         int ret;
3227
3228         mask = pid_m ? pid_m->id : 0xffff;
3229         id = pid_v ? pid_v->id : dev->data->port_id;
3230         ret = mlx5_port_to_eswitch_info(id, NULL, &val);
3231         if (ret)
3232                 return ret;
3233         flow_dv_translate_item_source_vport(matcher, key, val, mask);
3234         return 0;
3235 }
3236
3237 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
3238
3239 #define HEADER_IS_ZERO(match_criteria, headers)                              \
3240         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
3241                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
3242
3243 /**
3244  * Calculate flow matcher enable bitmap.
3245  *
3246  * @param match_criteria
3247  *   Pointer to flow matcher criteria.
3248  *
3249  * @return
3250  *   Bitmap of enabled fields.
3251  */
3252 static uint8_t
3253 flow_dv_matcher_enable(uint32_t *match_criteria)
3254 {
3255         uint8_t match_criteria_enable;
3256
3257         match_criteria_enable =
3258                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
3259                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
3260         match_criteria_enable |=
3261                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
3262                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
3263         match_criteria_enable |=
3264                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
3265                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
3266         match_criteria_enable |=
3267                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
3268                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
3269 #ifdef HAVE_MLX5DV_DR
3270         match_criteria_enable |=
3271                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
3272                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
3273 #endif
3274         return match_criteria_enable;
3275 }
3276
3277
3278 /**
3279  * Get a flow table.
3280  *
3281  * @param dev[in, out]
3282  *   Pointer to rte_eth_dev structure.
3283  * @param[in] table_id
3284  *   Table id to use.
3285  * @param[in] egress
3286  *   Direction of the table.
3287  * @param[in] transfer
3288  *   E-Switch or NIC flow.
3289  * @param[out] error
3290  *   pointer to error structure.
3291  *
3292  * @return
3293  *   Returns tables resource based on the index, NULL in case of failed.
3294  */
3295 static struct mlx5_flow_tbl_resource *
3296 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
3297                          uint32_t table_id, uint8_t egress,
3298                          uint8_t transfer,
3299                          struct rte_flow_error *error)
3300 {
3301         struct mlx5_priv *priv = dev->data->dev_private;
3302         struct mlx5_ibv_shared *sh = priv->sh;
3303         struct mlx5_flow_tbl_resource *tbl;
3304
3305 #ifdef HAVE_MLX5DV_DR
3306         if (transfer) {
3307                 tbl = &sh->fdb_tbl[table_id];
3308                 if (!tbl->obj)
3309                         tbl->obj = mlx5_glue->dr_create_flow_tbl
3310                                 (sh->fdb_domain, table_id);
3311         } else if (egress) {
3312                 tbl = &sh->tx_tbl[table_id];
3313                 if (!tbl->obj)
3314                         tbl->obj = mlx5_glue->dr_create_flow_tbl
3315                                 (sh->tx_domain, table_id);
3316         } else {
3317                 tbl = &sh->rx_tbl[table_id];
3318                 if (!tbl->obj)
3319                         tbl->obj = mlx5_glue->dr_create_flow_tbl
3320                                 (sh->rx_domain, table_id);
3321         }
3322         if (!tbl->obj) {
3323                 rte_flow_error_set(error, ENOMEM,
3324                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3325                                    NULL, "cannot create table");
3326                 return NULL;
3327         }
3328         rte_atomic32_inc(&tbl->refcnt);
3329         return tbl;
3330 #else
3331         (void)error;
3332         (void)tbl;
3333         if (transfer)
3334                 return &sh->fdb_tbl[table_id];
3335         else if (egress)
3336                 return &sh->tx_tbl[table_id];
3337         else
3338                 return &sh->rx_tbl[table_id];
3339 #endif
3340 }
3341
3342 /**
3343  * Release a flow table.
3344  *
3345  * @param[in] tbl
3346  *   Table resource to be released.
3347  *
3348  * @return
3349  *   Returns 0 if table was released, else return 1;
3350  */
3351 static int
3352 flow_dv_tbl_resource_release(struct mlx5_flow_tbl_resource *tbl)
3353 {
3354         if (!tbl)
3355                 return 0;
3356         if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
3357                 mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
3358                 tbl->obj = NULL;
3359                 return 0;
3360         }
3361         return 1;
3362 }
3363
3364 /**
3365  * Register the flow matcher.
3366  *
3367  * @param dev[in, out]
3368  *   Pointer to rte_eth_dev structure.
3369  * @param[in, out] matcher
3370  *   Pointer to flow matcher.
3371  * @parm[in, out] dev_flow
3372  *   Pointer to the dev_flow.
3373  * @param[out] error
3374  *   pointer to error structure.
3375  *
3376  * @return
3377  *   0 on success otherwise -errno and errno is set.
3378  */
3379 static int
3380 flow_dv_matcher_register(struct rte_eth_dev *dev,
3381                          struct mlx5_flow_dv_matcher *matcher,
3382                          struct mlx5_flow *dev_flow,
3383                          struct rte_flow_error *error)
3384 {
3385         struct mlx5_priv *priv = dev->data->dev_private;
3386         struct mlx5_ibv_shared *sh = priv->sh;
3387         struct mlx5_flow_dv_matcher *cache_matcher;
3388         struct mlx5dv_flow_matcher_attr dv_attr = {
3389                 .type = IBV_FLOW_ATTR_NORMAL,
3390                 .match_mask = (void *)&matcher->mask,
3391         };
3392         struct mlx5_flow_tbl_resource *tbl = NULL;
3393
3394         /* Lookup from cache. */
3395         LIST_FOREACH(cache_matcher, &sh->matchers, next) {
3396                 if (matcher->crc == cache_matcher->crc &&
3397                     matcher->priority == cache_matcher->priority &&
3398                     matcher->egress == cache_matcher->egress &&
3399                     matcher->group == cache_matcher->group &&
3400                     matcher->transfer == cache_matcher->transfer &&
3401                     !memcmp((const void *)matcher->mask.buf,
3402                             (const void *)cache_matcher->mask.buf,
3403                             cache_matcher->mask.size)) {
3404                         DRV_LOG(DEBUG,
3405                                 "priority %hd use %s matcher %p: refcnt %d++",
3406                                 cache_matcher->priority,
3407                                 cache_matcher->egress ? "tx" : "rx",
3408                                 (void *)cache_matcher,
3409                                 rte_atomic32_read(&cache_matcher->refcnt));
3410                         rte_atomic32_inc(&cache_matcher->refcnt);
3411                         dev_flow->dv.matcher = cache_matcher;
3412                         return 0;
3413                 }
3414         }
3415         /* Register new matcher. */
3416         cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
3417         if (!cache_matcher)
3418                 return rte_flow_error_set(error, ENOMEM,
3419                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3420                                           "cannot allocate matcher memory");
3421         tbl = flow_dv_tbl_resource_get(dev, matcher->group * MLX5_GROUP_FACTOR,
3422                                        matcher->egress, matcher->transfer,
3423                                        error);
3424         if (!tbl) {
3425                 rte_free(cache_matcher);
3426                 return rte_flow_error_set(error, ENOMEM,
3427                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3428                                           NULL, "cannot create table");
3429         }
3430         *cache_matcher = *matcher;
3431         dv_attr.match_criteria_enable =
3432                 flow_dv_matcher_enable(cache_matcher->mask.buf);
3433         dv_attr.priority = matcher->priority;
3434         if (matcher->egress)
3435                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
3436         cache_matcher->matcher_object =
3437                 mlx5_glue->dv_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj);
3438         if (!cache_matcher->matcher_object) {
3439                 rte_free(cache_matcher);
3440 #ifdef HAVE_MLX5DV_DR
3441                 flow_dv_tbl_resource_release(tbl);
3442 #endif
3443                 return rte_flow_error_set(error, ENOMEM,
3444                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3445                                           NULL, "cannot create matcher");
3446         }
3447         rte_atomic32_inc(&cache_matcher->refcnt);
3448         LIST_INSERT_HEAD(&sh->matchers, cache_matcher, next);
3449         dev_flow->dv.matcher = cache_matcher;
3450         DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
3451                 cache_matcher->priority,
3452                 cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
3453                 rte_atomic32_read(&cache_matcher->refcnt));
3454         rte_atomic32_inc(&tbl->refcnt);
3455         return 0;
3456 }
3457
3458 /**
3459  * Find existing tag resource or create and register a new one.
3460  *
3461  * @param dev[in, out]
3462  *   Pointer to rte_eth_dev structure.
3463  * @param[in, out] resource
3464  *   Pointer to tag resource.
3465  * @parm[in, out] dev_flow
3466  *   Pointer to the dev_flow.
3467  * @param[out] error
3468  *   pointer to error structure.
3469  *
3470  * @return
3471  *   0 on success otherwise -errno and errno is set.
3472  */
3473 static int
3474 flow_dv_tag_resource_register
3475                         (struct rte_eth_dev *dev,
3476                          struct mlx5_flow_dv_tag_resource *resource,
3477                          struct mlx5_flow *dev_flow,
3478                          struct rte_flow_error *error)
3479 {
3480         struct mlx5_priv *priv = dev->data->dev_private;
3481         struct mlx5_ibv_shared *sh = priv->sh;
3482         struct mlx5_flow_dv_tag_resource *cache_resource;
3483
3484         /* Lookup a matching resource from cache. */
3485         LIST_FOREACH(cache_resource, &sh->tags, next) {
3486                 if (resource->tag == cache_resource->tag) {
3487                         DRV_LOG(DEBUG, "tag resource %p: refcnt %d++",
3488                                 (void *)cache_resource,
3489                                 rte_atomic32_read(&cache_resource->refcnt));
3490                         rte_atomic32_inc(&cache_resource->refcnt);
3491                         dev_flow->flow->tag_resource = cache_resource;
3492                         return 0;
3493                 }
3494         }
3495         /* Register new  resource. */
3496         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
3497         if (!cache_resource)
3498                 return rte_flow_error_set(error, ENOMEM,
3499                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3500                                           "cannot allocate resource memory");
3501         *cache_resource = *resource;
3502         cache_resource->action = mlx5_glue->dv_create_flow_action_tag
3503                 (resource->tag);
3504         if (!cache_resource->action) {
3505                 rte_free(cache_resource);
3506                 return rte_flow_error_set(error, ENOMEM,
3507                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3508                                           NULL, "cannot create action");
3509         }
3510         rte_atomic32_init(&cache_resource->refcnt);
3511         rte_atomic32_inc(&cache_resource->refcnt);
3512         LIST_INSERT_HEAD(&sh->tags, cache_resource, next);
3513         dev_flow->flow->tag_resource = cache_resource;
3514         DRV_LOG(DEBUG, "new tag resource %p: refcnt %d++",
3515                 (void *)cache_resource,
3516                 rte_atomic32_read(&cache_resource->refcnt));
3517         return 0;
3518 }
3519
3520 /**
3521  * Release the tag.
3522  *
3523  * @param dev
3524  *   Pointer to Ethernet device.
3525  * @param flow
3526  *   Pointer to mlx5_flow.
3527  *
3528  * @return
3529  *   1 while a reference on it exists, 0 when freed.
3530  */
3531 static int
3532 flow_dv_tag_release(struct rte_eth_dev *dev,
3533                     struct mlx5_flow_dv_tag_resource *tag)
3534 {
3535         assert(tag);
3536         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
3537                 dev->data->port_id, (void *)tag,
3538                 rte_atomic32_read(&tag->refcnt));
3539         if (rte_atomic32_dec_and_test(&tag->refcnt)) {
3540                 claim_zero(mlx5_glue->destroy_flow_action(tag->action));
3541                 LIST_REMOVE(tag, next);
3542                 DRV_LOG(DEBUG, "port %u tag %p: removed",
3543                         dev->data->port_id, (void *)tag);
3544                 rte_free(tag);
3545                 return 0;
3546         }
3547         return 1;
3548 }
3549
3550 /**
3551  * Translate port ID action to vport.
3552  *
3553  * @param[in] dev
3554  *   Pointer to rte_eth_dev structure.
3555  * @param[in] action
3556  *   Pointer to the port ID action.
3557  * @param[out] dst_port_id
3558  *   The target port ID.
3559  * @param[out] error
3560  *   Pointer to the error structure.
3561  *
3562  * @return
3563  *   0 on success, a negative errno value otherwise and rte_errno is set.
3564  */
3565 static int
3566 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
3567                                  const struct rte_flow_action *action,
3568                                  uint32_t *dst_port_id,
3569                                  struct rte_flow_error *error)
3570 {
3571         uint32_t port;
3572         uint16_t port_id;
3573         int ret;
3574         const struct rte_flow_action_port_id *conf =
3575                         (const struct rte_flow_action_port_id *)action->conf;
3576
3577         port = conf->original ? dev->data->port_id : conf->id;
3578         ret = mlx5_port_to_eswitch_info(port, NULL, &port_id);
3579         if (ret)
3580                 return rte_flow_error_set(error, -ret,
3581                                           RTE_FLOW_ERROR_TYPE_ACTION,
3582                                           NULL,
3583                                           "No eswitch info was found for port");
3584         *dst_port_id = port_id;
3585         return 0;
3586 }
3587
3588 /**
3589  * Fill the flow with DV spec.
3590  *
3591  * @param[in] dev
3592  *   Pointer to rte_eth_dev structure.
3593  * @param[in, out] dev_flow
3594  *   Pointer to the sub flow.
3595  * @param[in] attr
3596  *   Pointer to the flow attributes.
3597  * @param[in] items
3598  *   Pointer to the list of items.
3599  * @param[in] actions
3600  *   Pointer to the list of actions.
3601  * @param[out] error
3602  *   Pointer to the error structure.
3603  *
3604  * @return
3605  *   0 on success, a negative errno value otherwise and rte_errno is set.
3606  */
3607 static int
3608 flow_dv_translate(struct rte_eth_dev *dev,
3609                   struct mlx5_flow *dev_flow,
3610                   const struct rte_flow_attr *attr,
3611                   const struct rte_flow_item items[],
3612                   const struct rte_flow_action actions[],
3613                   struct rte_flow_error *error)
3614 {
3615         struct mlx5_priv *priv = dev->data->dev_private;
3616         struct rte_flow *flow = dev_flow->flow;
3617         uint64_t item_flags = 0;
3618         uint64_t last_item = 0;
3619         uint64_t action_flags = 0;
3620         uint64_t priority = attr->priority;
3621         struct mlx5_flow_dv_matcher matcher = {
3622                 .mask = {
3623                         .size = sizeof(matcher.mask.buf),
3624                 },
3625         };
3626         int actions_n = 0;
3627         bool actions_end = false;
3628         struct mlx5_flow_dv_modify_hdr_resource res = {
3629                 .ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3630                                           MLX5DV_FLOW_TABLE_TYPE_NIC_RX
3631         };
3632         union flow_dv_attr flow_attr = { .attr = 0 };
3633         struct mlx5_flow_dv_tag_resource tag_resource;
3634         uint32_t modify_action_position = UINT32_MAX;
3635         void *match_mask = matcher.mask.buf;
3636         void *match_value = dev_flow->dv.value.buf;
3637
3638         flow->group = attr->group;
3639         if (attr->transfer)
3640                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3641         if (priority == MLX5_FLOW_PRIO_RSVD)
3642                 priority = priv->config.flow_prio - 1;
3643         for (; !actions_end ; actions++) {
3644                 const struct rte_flow_action_queue *queue;
3645                 const struct rte_flow_action_rss *rss;
3646                 const struct rte_flow_action *action = actions;
3647                 const struct rte_flow_action_count *count = action->conf;
3648                 const uint8_t *rss_key;
3649                 const struct rte_flow_action_jump *jump_data;
3650                 struct mlx5_flow_dv_jump_tbl_resource jump_tbl_resource;
3651                 struct mlx5_flow_tbl_resource *tbl;
3652                 uint32_t port_id = 0;
3653                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
3654
3655                 switch (actions->type) {
3656                 case RTE_FLOW_ACTION_TYPE_VOID:
3657                         break;
3658                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
3659                         if (flow_dv_translate_action_port_id(dev, action,
3660                                                              &port_id, error))
3661                                 return -rte_errno;
3662                         port_id_resource.port_id = port_id;
3663                         if (flow_dv_port_id_action_resource_register
3664                             (dev, &port_id_resource, dev_flow, error))
3665                                 return -rte_errno;
3666                         dev_flow->dv.actions[actions_n++] =
3667                                 dev_flow->dv.port_id_action->action;
3668                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
3669                         break;
3670                 case RTE_FLOW_ACTION_TYPE_FLAG:
3671                         tag_resource.tag =
3672                                 mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
3673                         if (!flow->tag_resource)
3674                                 if (flow_dv_tag_resource_register
3675                                     (dev, &tag_resource, dev_flow, error))
3676                                         return errno;
3677                         dev_flow->dv.actions[actions_n++] =
3678                                 flow->tag_resource->action;
3679                         action_flags |= MLX5_FLOW_ACTION_FLAG;
3680                         break;
3681                 case RTE_FLOW_ACTION_TYPE_MARK:
3682                         tag_resource.tag = mlx5_flow_mark_set
3683                               (((const struct rte_flow_action_mark *)
3684                                (actions->conf))->id);
3685                         if (!flow->tag_resource)
3686                                 if (flow_dv_tag_resource_register
3687                                     (dev, &tag_resource, dev_flow, error))
3688                                         return errno;
3689                         dev_flow->dv.actions[actions_n++] =
3690                                 flow->tag_resource->action;
3691                         action_flags |= MLX5_FLOW_ACTION_MARK;
3692                         break;
3693                 case RTE_FLOW_ACTION_TYPE_DROP:
3694                         action_flags |= MLX5_FLOW_ACTION_DROP;
3695                         break;
3696                 case RTE_FLOW_ACTION_TYPE_QUEUE:
3697                         queue = actions->conf;
3698                         flow->rss.queue_num = 1;
3699                         (*flow->queue)[0] = queue->index;
3700                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
3701                         break;
3702                 case RTE_FLOW_ACTION_TYPE_RSS:
3703                         rss = actions->conf;
3704                         if (flow->queue)
3705                                 memcpy((*flow->queue), rss->queue,
3706                                        rss->queue_num * sizeof(uint16_t));
3707                         flow->rss.queue_num = rss->queue_num;
3708                         /* NULL RSS key indicates default RSS key. */
3709                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
3710                         memcpy(flow->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
3711                         /* RSS type 0 indicates default RSS type ETH_RSS_IP. */
3712                         flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
3713                         flow->rss.level = rss->level;
3714                         action_flags |= MLX5_FLOW_ACTION_RSS;
3715                         break;
3716                 case RTE_FLOW_ACTION_TYPE_COUNT:
3717                         if (!priv->config.devx) {
3718                                 rte_errno = ENOTSUP;
3719                                 goto cnt_err;
3720                         }
3721                         flow->counter = flow_dv_counter_new(dev, count->shared,
3722                                                             count->id);
3723                         if (flow->counter == NULL)
3724                                 goto cnt_err;
3725                         dev_flow->dv.actions[actions_n++] =
3726                                 flow->counter->action;
3727                         action_flags |= MLX5_FLOW_ACTION_COUNT;
3728                         break;
3729 cnt_err:
3730                         if (rte_errno == ENOTSUP)
3731                                 return rte_flow_error_set
3732                                               (error, ENOTSUP,
3733                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3734                                                NULL,
3735                                                "count action not supported");
3736                         else
3737                                 return rte_flow_error_set
3738                                                 (error, rte_errno,
3739                                                  RTE_FLOW_ERROR_TYPE_ACTION,
3740                                                  action,
3741                                                  "cannot create counter"
3742                                                   " object.");
3743                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3744                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3745                         if (flow_dv_create_action_l2_encap(dev, actions,
3746                                                            dev_flow,
3747                                                            attr->transfer,
3748                                                            error))
3749                                 return -rte_errno;
3750                         dev_flow->dv.actions[actions_n++] =
3751                                 dev_flow->dv.encap_decap->verbs_action;
3752                         action_flags |= actions->type ==
3753                                         RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
3754                                         MLX5_FLOW_ACTION_VXLAN_ENCAP :
3755                                         MLX5_FLOW_ACTION_NVGRE_ENCAP;
3756                         break;
3757                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
3758                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
3759                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
3760                                                            attr->transfer,
3761                                                            error))
3762                                 return -rte_errno;
3763                         dev_flow->dv.actions[actions_n++] =
3764                                 dev_flow->dv.encap_decap->verbs_action;
3765                         action_flags |= actions->type ==
3766                                         RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
3767                                         MLX5_FLOW_ACTION_VXLAN_DECAP :
3768                                         MLX5_FLOW_ACTION_NVGRE_DECAP;
3769                         break;
3770                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3771                         /* Handle encap with preceding decap. */
3772                         if (action_flags & MLX5_FLOW_ACTION_RAW_DECAP) {
3773                                 if (flow_dv_create_action_raw_encap
3774                                         (dev, actions, dev_flow, attr, error))
3775                                         return -rte_errno;
3776                                 dev_flow->dv.actions[actions_n++] =
3777                                         dev_flow->dv.encap_decap->verbs_action;
3778                         } else {
3779                                 /* Handle encap without preceding decap. */
3780                                 if (flow_dv_create_action_l2_encap
3781                                     (dev, actions, dev_flow, attr->transfer,
3782                                      error))
3783                                         return -rte_errno;
3784                                 dev_flow->dv.actions[actions_n++] =
3785                                         dev_flow->dv.encap_decap->verbs_action;
3786                         }
3787                         action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
3788                         break;
3789                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
3790                         /* Check if this decap is followed by encap. */
3791                         for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
3792                                action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
3793                                action++) {
3794                         }
3795                         /* Handle decap only if it isn't followed by encap. */
3796                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
3797                                 if (flow_dv_create_action_l2_decap
3798                                     (dev, dev_flow, attr->transfer, error))
3799                                         return -rte_errno;
3800                                 dev_flow->dv.actions[actions_n++] =
3801                                         dev_flow->dv.encap_decap->verbs_action;
3802                         }
3803                         /* If decap is followed by encap, handle it at encap. */
3804                         action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
3805                         break;
3806                 case RTE_FLOW_ACTION_TYPE_JUMP:
3807                         jump_data = action->conf;
3808                         tbl = flow_dv_tbl_resource_get(dev, jump_data->group *
3809                                                        MLX5_GROUP_FACTOR,
3810                                                        attr->egress,
3811                                                        attr->transfer, error);
3812                         if (!tbl)
3813                                 return rte_flow_error_set
3814                                                 (error, errno,
3815                                                  RTE_FLOW_ERROR_TYPE_ACTION,
3816                                                  NULL,
3817                                                  "cannot create jump action.");
3818                         jump_tbl_resource.tbl = tbl;
3819                         if (flow_dv_jump_tbl_resource_register
3820                             (dev, &jump_tbl_resource, dev_flow, error)) {
3821                                 flow_dv_tbl_resource_release(tbl);
3822                                 return rte_flow_error_set
3823                                                 (error, errno,
3824                                                  RTE_FLOW_ERROR_TYPE_ACTION,
3825                                                  NULL,
3826                                                  "cannot create jump action.");
3827                         }
3828                         dev_flow->dv.actions[actions_n++] =
3829                                 dev_flow->dv.jump->action;
3830                         action_flags |= MLX5_FLOW_ACTION_JUMP;
3831                         break;
3832                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
3833                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
3834                         if (flow_dv_convert_action_modify_mac(&res, actions,
3835                                                               error))
3836                                 return -rte_errno;
3837                         action_flags |= actions->type ==
3838                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
3839                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
3840                                         MLX5_FLOW_ACTION_SET_MAC_DST;
3841                         break;
3842                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
3843                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
3844                         if (flow_dv_convert_action_modify_ipv4(&res, actions,
3845                                                                error))
3846                                 return -rte_errno;
3847                         action_flags |= actions->type ==
3848                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
3849                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
3850                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
3851                         break;
3852                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
3853                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
3854                         if (flow_dv_convert_action_modify_ipv6(&res, actions,
3855                                                                error))
3856                                 return -rte_errno;
3857                         action_flags |= actions->type ==
3858                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
3859                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
3860                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
3861                         break;
3862                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
3863                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
3864                         if (flow_dv_convert_action_modify_tp(&res, actions,
3865                                                              items, &flow_attr,
3866                                                              error))
3867                                 return -rte_errno;
3868                         action_flags |= actions->type ==
3869                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
3870                                         MLX5_FLOW_ACTION_SET_TP_SRC :
3871                                         MLX5_FLOW_ACTION_SET_TP_DST;
3872                         break;
3873                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
3874                         if (flow_dv_convert_action_modify_dec_ttl(&res, items,
3875                                                                   &flow_attr,
3876                                                                   error))
3877                                 return -rte_errno;
3878                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
3879                         break;
3880                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
3881                         if (flow_dv_convert_action_modify_ttl(&res, actions,
3882                                                              items, &flow_attr,
3883                                                              error))
3884                                 return -rte_errno;
3885                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
3886                         break;
3887                 case RTE_FLOW_ACTION_TYPE_END:
3888                         actions_end = true;
3889                         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) {
3890                                 /* create modify action if needed. */
3891                                 if (flow_dv_modify_hdr_resource_register
3892                                                                 (dev, &res,
3893                                                                  dev_flow,
3894                                                                  error))
3895                                         return -rte_errno;
3896                                 dev_flow->dv.actions[modify_action_position] =
3897                                         dev_flow->dv.modify_hdr->verbs_action;
3898                         }
3899                         break;
3900                 default:
3901                         break;
3902                 }
3903                 if ((action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) &&
3904                     modify_action_position == UINT32_MAX)
3905                         modify_action_position = actions_n++;
3906         }
3907         dev_flow->dv.actions_n = actions_n;
3908         flow->actions = action_flags;
3909         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3910                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
3911
3912                 switch (items->type) {
3913                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
3914                         flow_dv_translate_item_port_id(dev, match_mask,
3915                                                        match_value, items);
3916                         last_item = MLX5_FLOW_ITEM_PORT_ID;
3917                         break;
3918                 case RTE_FLOW_ITEM_TYPE_ETH:
3919                         flow_dv_translate_item_eth(match_mask, match_value,
3920                                                    items, tunnel);
3921                         matcher.priority = MLX5_PRIORITY_MAP_L2;
3922                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
3923                                              MLX5_FLOW_LAYER_OUTER_L2;
3924                         break;
3925                 case RTE_FLOW_ITEM_TYPE_VLAN:
3926                         flow_dv_translate_item_vlan(match_mask, match_value,
3927                                                     items, tunnel);
3928                         matcher.priority = MLX5_PRIORITY_MAP_L2;
3929                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
3930                                               MLX5_FLOW_LAYER_INNER_VLAN) :
3931                                              (MLX5_FLOW_LAYER_OUTER_L2 |
3932                                               MLX5_FLOW_LAYER_OUTER_VLAN);
3933                         break;
3934                 case RTE_FLOW_ITEM_TYPE_IPV4:
3935                         flow_dv_translate_item_ipv4(match_mask, match_value,
3936                                                     items, tunnel, attr->group);
3937                         matcher.priority = MLX5_PRIORITY_MAP_L3;
3938                         dev_flow->dv.hash_fields |=
3939                                 mlx5_flow_hashfields_adjust
3940                                         (dev_flow, tunnel,
3941                                          MLX5_IPV4_LAYER_TYPES,
3942                                          MLX5_IPV4_IBV_RX_HASH);
3943                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
3944                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3945                         break;
3946                 case RTE_FLOW_ITEM_TYPE_IPV6:
3947                         flow_dv_translate_item_ipv6(match_mask, match_value,
3948                                                     items, tunnel, attr->group);
3949                         matcher.priority = MLX5_PRIORITY_MAP_L3;
3950                         dev_flow->dv.hash_fields |=
3951                                 mlx5_flow_hashfields_adjust
3952                                         (dev_flow, tunnel,
3953                                          MLX5_IPV6_LAYER_TYPES,
3954                                          MLX5_IPV6_IBV_RX_HASH);
3955                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
3956                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3957                         break;
3958                 case RTE_FLOW_ITEM_TYPE_TCP:
3959                         flow_dv_translate_item_tcp(match_mask, match_value,
3960                                                    items, tunnel);
3961                         matcher.priority = MLX5_PRIORITY_MAP_L4;
3962                         dev_flow->dv.hash_fields |=
3963                                 mlx5_flow_hashfields_adjust
3964                                         (dev_flow, tunnel, ETH_RSS_TCP,
3965                                          IBV_RX_HASH_SRC_PORT_TCP |
3966                                          IBV_RX_HASH_DST_PORT_TCP);
3967                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
3968                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
3969                         break;
3970                 case RTE_FLOW_ITEM_TYPE_UDP:
3971                         flow_dv_translate_item_udp(match_mask, match_value,
3972                                                    items, tunnel);
3973                         matcher.priority = MLX5_PRIORITY_MAP_L4;
3974                         dev_flow->dv.hash_fields |=
3975                                 mlx5_flow_hashfields_adjust
3976                                         (dev_flow, tunnel, ETH_RSS_UDP,
3977                                          IBV_RX_HASH_SRC_PORT_UDP |
3978                                          IBV_RX_HASH_DST_PORT_UDP);
3979                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
3980                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
3981                         break;
3982                 case RTE_FLOW_ITEM_TYPE_GRE:
3983                         flow_dv_translate_item_gre(match_mask, match_value,
3984                                                    items, tunnel);
3985                         last_item = MLX5_FLOW_LAYER_GRE;
3986                         break;
3987                 case RTE_FLOW_ITEM_TYPE_NVGRE:
3988                         flow_dv_translate_item_nvgre(match_mask, match_value,
3989                                                      items, tunnel);
3990                         last_item = MLX5_FLOW_LAYER_GRE;
3991                         break;
3992                 case RTE_FLOW_ITEM_TYPE_VXLAN:
3993                         flow_dv_translate_item_vxlan(match_mask, match_value,
3994                                                      items, tunnel);
3995                         last_item = MLX5_FLOW_LAYER_VXLAN;
3996                         break;
3997                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3998                         flow_dv_translate_item_vxlan(match_mask, match_value,
3999                                                      items, tunnel);
4000                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
4001                         break;
4002                 case RTE_FLOW_ITEM_TYPE_MPLS:
4003                         flow_dv_translate_item_mpls(match_mask, match_value,
4004                                                     items, last_item, tunnel);
4005                         last_item = MLX5_FLOW_LAYER_MPLS;
4006                         break;
4007                 case RTE_FLOW_ITEM_TYPE_META:
4008                         flow_dv_translate_item_meta(match_mask, match_value,
4009                                                     items);
4010                         last_item = MLX5_FLOW_ITEM_METADATA;
4011                         break;
4012                 default:
4013                         break;
4014                 }
4015                 item_flags |= last_item;
4016         }
4017         /*
4018          * In case of ingress traffic when E-Switch mode is enabled,
4019          * we have two cases where we need to set the source port manually.
4020          * The first one, is in case of Nic steering rule, and the second is
4021          * E-Switch rule where no port_id item was found. In both cases
4022          * the source port is set according the current port in use.
4023          */
4024         if ((attr->ingress && !(item_flags & MLX5_FLOW_ITEM_PORT_ID)) &&
4025             (priv->representor || priv->master)) {
4026                 if (flow_dv_translate_item_port_id(dev, match_mask,
4027                                                    match_value, NULL))
4028                         return -rte_errno;
4029         }
4030         assert(!flow_dv_check_valid_spec(matcher.mask.buf,
4031                                          dev_flow->dv.value.buf));
4032         dev_flow->layers = item_flags;
4033         /* Register matcher. */
4034         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
4035                                     matcher.mask.size);
4036         matcher.priority = mlx5_flow_adjust_priority(dev, priority,
4037                                                      matcher.priority);
4038         matcher.egress = attr->egress;
4039         matcher.group = attr->group;
4040         matcher.transfer = attr->transfer;
4041         if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
4042                 return -rte_errno;
4043         return 0;
4044 }
4045
4046 /**
4047  * Apply the flow to the NIC.
4048  *
4049  * @param[in] dev
4050  *   Pointer to the Ethernet device structure.
4051  * @param[in, out] flow
4052  *   Pointer to flow structure.
4053  * @param[out] error
4054  *   Pointer to error structure.
4055  *
4056  * @return
4057  *   0 on success, a negative errno value otherwise and rte_errno is set.
4058  */
4059 static int
4060 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
4061               struct rte_flow_error *error)
4062 {
4063         struct mlx5_flow_dv *dv;
4064         struct mlx5_flow *dev_flow;
4065         struct mlx5_priv *priv = dev->data->dev_private;
4066         int n;
4067         int err;
4068
4069         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
4070                 dv = &dev_flow->dv;
4071                 n = dv->actions_n;
4072                 if (flow->actions & MLX5_FLOW_ACTION_DROP) {
4073                         if (flow->transfer) {
4074                                 dv->actions[n++] = priv->sh->esw_drop_action;
4075                         } else {
4076                                 dv->hrxq = mlx5_hrxq_drop_new(dev);
4077                                 if (!dv->hrxq) {
4078                                         rte_flow_error_set
4079                                                 (error, errno,
4080                                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4081                                                  NULL,
4082                                                  "cannot get drop hash queue");
4083                                         goto error;
4084                                 }
4085                                 dv->actions[n++] = dv->hrxq->action;
4086                         }
4087                 } else if (flow->actions &
4088                            (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
4089                         struct mlx5_hrxq *hrxq;
4090
4091                         hrxq = mlx5_hrxq_get(dev, flow->key,
4092                                              MLX5_RSS_HASH_KEY_LEN,
4093                                              dv->hash_fields,
4094                                              (*flow->queue),
4095                                              flow->rss.queue_num);
4096                         if (!hrxq)
4097                                 hrxq = mlx5_hrxq_new
4098                                         (dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
4099                                          dv->hash_fields, (*flow->queue),
4100                                          flow->rss.queue_num,
4101                                          !!(dev_flow->layers &
4102                                             MLX5_FLOW_LAYER_TUNNEL));
4103                         if (!hrxq) {
4104                                 rte_flow_error_set
4105                                         (error, rte_errno,
4106                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4107                                          "cannot get hash queue");
4108                                 goto error;
4109                         }
4110                         dv->hrxq = hrxq;
4111                         dv->actions[n++] = dv->hrxq->action;
4112                 }
4113                 dv->flow =
4114                         mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
4115                                                   (void *)&dv->value, n,
4116                                                   dv->actions);
4117                 if (!dv->flow) {
4118                         rte_flow_error_set(error, errno,
4119                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4120                                            NULL,
4121                                            "hardware refuses to create flow");
4122                         goto error;
4123                 }
4124         }
4125         return 0;
4126 error:
4127         err = rte_errno; /* Save rte_errno before cleanup. */
4128         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
4129                 struct mlx5_flow_dv *dv = &dev_flow->dv;
4130                 if (dv->hrxq) {
4131                         if (flow->actions & MLX5_FLOW_ACTION_DROP)
4132                                 mlx5_hrxq_drop_release(dev);
4133                         else
4134                                 mlx5_hrxq_release(dev, dv->hrxq);
4135                         dv->hrxq = NULL;
4136                 }
4137         }
4138         rte_errno = err; /* Restore rte_errno. */
4139         return -rte_errno;
4140 }
4141
4142 /**
4143  * Release the flow matcher.
4144  *
4145  * @param dev
4146  *   Pointer to Ethernet device.
4147  * @param flow
4148  *   Pointer to mlx5_flow.
4149  *
4150  * @return
4151  *   1 while a reference on it exists, 0 when freed.
4152  */
4153 static int
4154 flow_dv_matcher_release(struct rte_eth_dev *dev,
4155                         struct mlx5_flow *flow)
4156 {
4157         struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
4158         struct mlx5_priv *priv = dev->data->dev_private;
4159         struct mlx5_ibv_shared *sh = priv->sh;
4160         struct mlx5_flow_tbl_resource *tbl;
4161
4162         assert(matcher->matcher_object);
4163         DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
4164                 dev->data->port_id, (void *)matcher,
4165                 rte_atomic32_read(&matcher->refcnt));
4166         if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
4167                 claim_zero(mlx5_glue->dv_destroy_flow_matcher
4168                            (matcher->matcher_object));
4169                 LIST_REMOVE(matcher, next);
4170                 if (matcher->egress)
4171                         tbl = &sh->tx_tbl[matcher->group];
4172                 else
4173                         tbl = &sh->rx_tbl[matcher->group];
4174                 flow_dv_tbl_resource_release(tbl);
4175                 rte_free(matcher);
4176                 DRV_LOG(DEBUG, "port %u matcher %p: removed",
4177                         dev->data->port_id, (void *)matcher);
4178                 return 0;
4179         }
4180         return 1;
4181 }
4182
4183 /**
4184  * Release an encap/decap resource.
4185  *
4186  * @param flow
4187  *   Pointer to mlx5_flow.
4188  *
4189  * @return
4190  *   1 while a reference on it exists, 0 when freed.
4191  */
4192 static int
4193 flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
4194 {
4195         struct mlx5_flow_dv_encap_decap_resource *cache_resource =
4196                                                 flow->dv.encap_decap;
4197
4198         assert(cache_resource->verbs_action);
4199         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
4200                 (void *)cache_resource,
4201                 rte_atomic32_read(&cache_resource->refcnt));
4202         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
4203                 claim_zero(mlx5_glue->destroy_flow_action
4204                                 (cache_resource->verbs_action));
4205                 LIST_REMOVE(cache_resource, next);
4206                 rte_free(cache_resource);
4207                 DRV_LOG(DEBUG, "encap/decap resource %p: removed",
4208                         (void *)cache_resource);
4209                 return 0;
4210         }
4211         return 1;
4212 }
4213
4214 /**
4215  * Release an jump to table action resource.
4216  *
4217  * @param flow
4218  *   Pointer to mlx5_flow.
4219  *
4220  * @return
4221  *   1 while a reference on it exists, 0 when freed.
4222  */
4223 static int
4224 flow_dv_jump_tbl_resource_release(struct mlx5_flow *flow)
4225 {
4226         struct mlx5_flow_dv_jump_tbl_resource *cache_resource =
4227                                                 flow->dv.jump;
4228
4229         assert(cache_resource->action);
4230         DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--",
4231                 (void *)cache_resource,
4232                 rte_atomic32_read(&cache_resource->refcnt));
4233         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
4234                 claim_zero(mlx5_glue->destroy_flow_action
4235                                 (cache_resource->action));
4236                 LIST_REMOVE(cache_resource, next);
4237                 flow_dv_tbl_resource_release(cache_resource->tbl);
4238                 rte_free(cache_resource);
4239                 DRV_LOG(DEBUG, "jump table resource %p: removed",
4240                         (void *)cache_resource);
4241                 return 0;
4242         }
4243         return 1;
4244 }
4245
4246 /**
4247  * Release a modify-header resource.
4248  *
4249  * @param flow
4250  *   Pointer to mlx5_flow.
4251  *
4252  * @return
4253  *   1 while a reference on it exists, 0 when freed.
4254  */
4255 static int
4256 flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
4257 {
4258         struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
4259                                                 flow->dv.modify_hdr;
4260
4261         assert(cache_resource->verbs_action);
4262         DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
4263                 (void *)cache_resource,
4264                 rte_atomic32_read(&cache_resource->refcnt));
4265         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
4266                 claim_zero(mlx5_glue->destroy_flow_action
4267                                 (cache_resource->verbs_action));
4268                 LIST_REMOVE(cache_resource, next);
4269                 rte_free(cache_resource);
4270                 DRV_LOG(DEBUG, "modify-header resource %p: removed",
4271                         (void *)cache_resource);
4272                 return 0;
4273         }
4274         return 1;
4275 }
4276
4277 /**
4278  * Release port ID action resource.
4279  *
4280  * @param flow
4281  *   Pointer to mlx5_flow.
4282  *
4283  * @return
4284  *   1 while a reference on it exists, 0 when freed.
4285  */
4286 static int
4287 flow_dv_port_id_action_resource_release(struct mlx5_flow *flow)
4288 {
4289         struct mlx5_flow_dv_port_id_action_resource *cache_resource =
4290                 flow->dv.port_id_action;
4291
4292         assert(cache_resource->action);
4293         DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--",
4294                 (void *)cache_resource,
4295                 rte_atomic32_read(&cache_resource->refcnt));
4296         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
4297                 claim_zero(mlx5_glue->destroy_flow_action
4298                                 (cache_resource->action));
4299                 LIST_REMOVE(cache_resource, next);
4300                 rte_free(cache_resource);
4301                 DRV_LOG(DEBUG, "port id action resource %p: removed",
4302                         (void *)cache_resource);
4303                 return 0;
4304         }
4305         return 1;
4306 }
4307
4308 /**
4309  * Remove the flow from the NIC but keeps it in memory.
4310  *
4311  * @param[in] dev
4312  *   Pointer to Ethernet device.
4313  * @param[in, out] flow
4314  *   Pointer to flow structure.
4315  */
4316 static void
4317 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
4318 {
4319         struct mlx5_flow_dv *dv;
4320         struct mlx5_flow *dev_flow;
4321
4322         if (!flow)
4323                 return;
4324         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
4325                 dv = &dev_flow->dv;
4326                 if (dv->flow) {
4327                         claim_zero(mlx5_glue->dv_destroy_flow(dv->flow));
4328                         dv->flow = NULL;
4329                 }
4330                 if (dv->hrxq) {
4331                         if (flow->actions & MLX5_FLOW_ACTION_DROP)
4332                                 mlx5_hrxq_drop_release(dev);
4333                         else
4334                                 mlx5_hrxq_release(dev, dv->hrxq);
4335                         dv->hrxq = NULL;
4336                 }
4337         }
4338 }
4339
4340 /**
4341  * Remove the flow from the NIC and the memory.
4342  *
4343  * @param[in] dev
4344  *   Pointer to the Ethernet device structure.
4345  * @param[in, out] flow
4346  *   Pointer to flow structure.
4347  */
4348 static void
4349 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
4350 {
4351         struct mlx5_flow *dev_flow;
4352
4353         if (!flow)
4354                 return;
4355         flow_dv_remove(dev, flow);
4356         if (flow->counter) {
4357                 flow_dv_counter_release(flow->counter);
4358                 flow->counter = NULL;
4359         }
4360         if (flow->tag_resource) {
4361                 flow_dv_tag_release(dev, flow->tag_resource);
4362                 flow->tag_resource = NULL;
4363         }
4364         while (!LIST_EMPTY(&flow->dev_flows)) {
4365                 dev_flow = LIST_FIRST(&flow->dev_flows);
4366                 LIST_REMOVE(dev_flow, next);
4367                 if (dev_flow->dv.matcher)
4368                         flow_dv_matcher_release(dev, dev_flow);
4369                 if (dev_flow->dv.encap_decap)
4370                         flow_dv_encap_decap_resource_release(dev_flow);
4371                 if (dev_flow->dv.modify_hdr)
4372                         flow_dv_modify_hdr_resource_release(dev_flow);
4373                 if (dev_flow->dv.jump)
4374                         flow_dv_jump_tbl_resource_release(dev_flow);
4375                 if (dev_flow->dv.port_id_action)
4376                         flow_dv_port_id_action_resource_release(dev_flow);
4377                 rte_free(dev_flow);
4378         }
4379 }
4380
4381 /**
4382  * Query a dv flow  rule for its statistics via devx.
4383  *
4384  * @param[in] dev
4385  *   Pointer to Ethernet device.
4386  * @param[in] flow
4387  *   Pointer to the sub flow.
4388  * @param[out] data
4389  *   data retrieved by the query.
4390  * @param[out] error
4391  *   Perform verbose error reporting if not NULL.
4392  *
4393  * @return
4394  *   0 on success, a negative errno value otherwise and rte_errno is set.
4395  */
4396 static int
4397 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
4398                     void *data, struct rte_flow_error *error)
4399 {
4400         struct mlx5_priv *priv = dev->data->dev_private;
4401         struct rte_flow_query_count *qc = data;
4402         uint64_t pkts = 0;
4403         uint64_t bytes = 0;
4404         int err;
4405
4406         if (!priv->config.devx)
4407                 return rte_flow_error_set(error, ENOTSUP,
4408                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4409                                           NULL,
4410                                           "counters are not supported");
4411         if (flow->counter) {
4412                 err = mlx5_devx_cmd_flow_counter_query
4413                                                 (flow->counter->dcs,
4414                                                  qc->reset, &pkts, &bytes);
4415                 if (err)
4416                         return rte_flow_error_set
4417                                 (error, err,
4418                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4419                                  NULL,
4420                                  "cannot read counters");
4421                 qc->hits_set = 1;
4422                 qc->bytes_set = 1;
4423                 qc->hits = pkts - flow->counter->hits;
4424                 qc->bytes = bytes - flow->counter->bytes;
4425                 if (qc->reset) {
4426                         flow->counter->hits = pkts;
4427                         flow->counter->bytes = bytes;
4428                 }
4429                 return 0;
4430         }
4431         return rte_flow_error_set(error, EINVAL,
4432                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4433                                   NULL,
4434                                   "counters are not available");
4435 }
4436
4437 /**
4438  * Query a flow.
4439  *
4440  * @see rte_flow_query()
4441  * @see rte_flow_ops
4442  */
4443 static int
4444 flow_dv_query(struct rte_eth_dev *dev,
4445               struct rte_flow *flow __rte_unused,
4446               const struct rte_flow_action *actions __rte_unused,
4447               void *data __rte_unused,
4448               struct rte_flow_error *error __rte_unused)
4449 {
4450         int ret = -EINVAL;
4451
4452         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
4453                 switch (actions->type) {
4454                 case RTE_FLOW_ACTION_TYPE_VOID:
4455                         break;
4456                 case RTE_FLOW_ACTION_TYPE_COUNT:
4457                         ret = flow_dv_query_count(dev, flow, data, error);
4458                         break;
4459                 default:
4460                         return rte_flow_error_set(error, ENOTSUP,
4461                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4462                                                   actions,
4463                                                   "action not supported");
4464                 }
4465         }
4466         return ret;
4467 }
4468
4469 /*
4470  * Mutex-protected thunk to flow_dv_translate().
4471  */
4472 static int
4473 flow_d_translate(struct rte_eth_dev *dev,
4474                  struct mlx5_flow *dev_flow,
4475                  const struct rte_flow_attr *attr,
4476                  const struct rte_flow_item items[],
4477                  const struct rte_flow_action actions[],
4478                  struct rte_flow_error *error)
4479 {
4480         int ret;
4481
4482         flow_d_shared_lock(dev);
4483         ret = flow_dv_translate(dev, dev_flow, attr, items, actions, error);
4484         flow_d_shared_unlock(dev);
4485         return ret;
4486 }
4487
4488 /*
4489  * Mutex-protected thunk to flow_dv_apply().
4490  */
4491 static int
4492 flow_d_apply(struct rte_eth_dev *dev,
4493              struct rte_flow *flow,
4494              struct rte_flow_error *error)
4495 {
4496         int ret;
4497
4498         flow_d_shared_lock(dev);
4499         ret = flow_dv_apply(dev, flow, error);
4500         flow_d_shared_unlock(dev);
4501         return ret;
4502 }
4503
4504 /*
4505  * Mutex-protected thunk to flow_dv_remove().
4506  */
4507 static void
4508 flow_d_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
4509 {
4510         flow_d_shared_lock(dev);
4511         flow_dv_remove(dev, flow);
4512         flow_d_shared_unlock(dev);
4513 }
4514
4515 /*
4516  * Mutex-protected thunk to flow_dv_destroy().
4517  */
4518 static void
4519 flow_d_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
4520 {
4521         flow_d_shared_lock(dev);
4522         flow_dv_destroy(dev, flow);
4523         flow_d_shared_unlock(dev);
4524 }
4525
4526 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
4527         .validate = flow_dv_validate,
4528         .prepare = flow_dv_prepare,
4529         .translate = flow_d_translate,
4530         .apply = flow_d_apply,
4531         .remove = flow_d_remove,
4532         .destroy = flow_d_destroy,
4533         .query = flow_dv_query,
4534 };
4535
4536 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */