net/mlx5: fix E-Switch flow without port item
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9
10 /* Verbs header. */
11 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
12 #ifdef PEDANTIC
13 #pragma GCC diagnostic ignored "-Wpedantic"
14 #endif
15 #include <infiniband/verbs.h>
16 #ifdef PEDANTIC
17 #pragma GCC diagnostic error "-Wpedantic"
18 #endif
19
20 #include <rte_common.h>
21 #include <rte_ether.h>
22 #include <rte_ethdev_driver.h>
23 #include <rte_flow.h>
24 #include <rte_flow_driver.h>
25 #include <rte_malloc.h>
26 #include <rte_ip.h>
27 #include <rte_gre.h>
28
29 #include "mlx5.h"
30 #include "mlx5_defs.h"
31 #include "mlx5_glue.h"
32 #include "mlx5_flow.h"
33 #include "mlx5_prm.h"
34 #include "mlx5_rxtx.h"
35
36 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
37
38 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
39 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
40 #endif
41
42 #ifndef HAVE_MLX5DV_DR_ESWITCH
43 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
44 #endif
45
46 #ifndef HAVE_MLX5DV_DR
47 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
48 #endif
49
50 union flow_dv_attr {
51         struct {
52                 uint32_t valid:1;
53                 uint32_t ipv4:1;
54                 uint32_t ipv6:1;
55                 uint32_t tcp:1;
56                 uint32_t udp:1;
57                 uint32_t reserved:27;
58         };
59         uint32_t attr;
60 };
61
62 /**
63  * Initialize flow attributes structure according to flow items' types.
64  *
65  * @param[in] item
66  *   Pointer to item specification.
67  * @param[out] attr
68  *   Pointer to flow attributes structure.
69  */
70 static void
71 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr)
72 {
73         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
74                 switch (item->type) {
75                 case RTE_FLOW_ITEM_TYPE_IPV4:
76                         attr->ipv4 = 1;
77                         break;
78                 case RTE_FLOW_ITEM_TYPE_IPV6:
79                         attr->ipv6 = 1;
80                         break;
81                 case RTE_FLOW_ITEM_TYPE_UDP:
82                         attr->udp = 1;
83                         break;
84                 case RTE_FLOW_ITEM_TYPE_TCP:
85                         attr->tcp = 1;
86                         break;
87                 default:
88                         break;
89                 }
90         }
91         attr->valid = 1;
92 }
93
94 struct field_modify_info {
95         uint32_t size; /* Size of field in protocol header, in bytes. */
96         uint32_t offset; /* Offset of field in protocol header, in bytes. */
97         enum mlx5_modification_field id;
98 };
99
100 struct field_modify_info modify_eth[] = {
101         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
102         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
103         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
104         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
105         {0, 0, 0},
106 };
107
108 struct field_modify_info modify_ipv4[] = {
109         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
110         {4, 12, MLX5_MODI_OUT_SIPV4},
111         {4, 16, MLX5_MODI_OUT_DIPV4},
112         {0, 0, 0},
113 };
114
115 struct field_modify_info modify_ipv6[] = {
116         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
117         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
118         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
119         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
120         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
121         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
122         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
123         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
124         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
125         {0, 0, 0},
126 };
127
128 struct field_modify_info modify_udp[] = {
129         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
130         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
131         {0, 0, 0},
132 };
133
134 struct field_modify_info modify_tcp[] = {
135         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
136         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
137         {0, 0, 0},
138 };
139
140 /**
141  * Acquire the synchronizing object to protect multithreaded access
142  * to shared dv context. Lock occurs only if context is actually
143  * shared, i.e. we have multiport IB device and representors are
144  * created.
145  *
146  * @param[in] dev
147  *   Pointer to the rte_eth_dev structure.
148  */
149 static void
150 flow_d_shared_lock(struct rte_eth_dev *dev)
151 {
152         struct mlx5_priv *priv = dev->data->dev_private;
153         struct mlx5_ibv_shared *sh = priv->sh;
154
155         if (sh->dv_refcnt > 1) {
156                 int ret;
157
158                 ret = pthread_mutex_lock(&sh->dv_mutex);
159                 assert(!ret);
160                 (void)ret;
161         }
162 }
163
164 static void
165 flow_d_shared_unlock(struct rte_eth_dev *dev)
166 {
167         struct mlx5_priv *priv = dev->data->dev_private;
168         struct mlx5_ibv_shared *sh = priv->sh;
169
170         if (sh->dv_refcnt > 1) {
171                 int ret;
172
173                 ret = pthread_mutex_unlock(&sh->dv_mutex);
174                 assert(!ret);
175                 (void)ret;
176         }
177 }
178
179 /**
180  * Convert modify-header action to DV specification.
181  *
182  * @param[in] item
183  *   Pointer to item specification.
184  * @param[in] field
185  *   Pointer to field modification information.
186  * @param[in,out] resource
187  *   Pointer to the modify-header resource.
188  * @param[in] type
189  *   Type of modification.
190  * @param[out] error
191  *   Pointer to the error structure.
192  *
193  * @return
194  *   0 on success, a negative errno value otherwise and rte_errno is set.
195  */
196 static int
197 flow_dv_convert_modify_action(struct rte_flow_item *item,
198                               struct field_modify_info *field,
199                               struct mlx5_flow_dv_modify_hdr_resource *resource,
200                               uint32_t type,
201                               struct rte_flow_error *error)
202 {
203         uint32_t i = resource->actions_num;
204         struct mlx5_modification_cmd *actions = resource->actions;
205         const uint8_t *spec = item->spec;
206         const uint8_t *mask = item->mask;
207         uint32_t set;
208
209         while (field->size) {
210                 set = 0;
211                 /* Generate modify command for each mask segment. */
212                 memcpy(&set, &mask[field->offset], field->size);
213                 if (set) {
214                         if (i >= MLX5_MODIFY_NUM)
215                                 return rte_flow_error_set(error, EINVAL,
216                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
217                                          "too many items to modify");
218                         actions[i].action_type = type;
219                         actions[i].field = field->id;
220                         actions[i].length = field->size ==
221                                         4 ? 0 : field->size * 8;
222                         rte_memcpy(&actions[i].data[4 - field->size],
223                                    &spec[field->offset], field->size);
224                         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
225                         ++i;
226                 }
227                 if (resource->actions_num != i)
228                         resource->actions_num = i;
229                 field++;
230         }
231         if (!resource->actions_num)
232                 return rte_flow_error_set(error, EINVAL,
233                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
234                                           "invalid modification flow item");
235         return 0;
236 }
237
238 /**
239  * Convert modify-header set IPv4 address action to DV specification.
240  *
241  * @param[in,out] resource
242  *   Pointer to the modify-header resource.
243  * @param[in] action
244  *   Pointer to action specification.
245  * @param[out] error
246  *   Pointer to the error structure.
247  *
248  * @return
249  *   0 on success, a negative errno value otherwise and rte_errno is set.
250  */
251 static int
252 flow_dv_convert_action_modify_ipv4
253                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
254                          const struct rte_flow_action *action,
255                          struct rte_flow_error *error)
256 {
257         const struct rte_flow_action_set_ipv4 *conf =
258                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
259         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
260         struct rte_flow_item_ipv4 ipv4;
261         struct rte_flow_item_ipv4 ipv4_mask;
262
263         memset(&ipv4, 0, sizeof(ipv4));
264         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
265         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
266                 ipv4.hdr.src_addr = conf->ipv4_addr;
267                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
268         } else {
269                 ipv4.hdr.dst_addr = conf->ipv4_addr;
270                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
271         }
272         item.spec = &ipv4;
273         item.mask = &ipv4_mask;
274         return flow_dv_convert_modify_action(&item, modify_ipv4, resource,
275                                              MLX5_MODIFICATION_TYPE_SET, error);
276 }
277
278 /**
279  * Convert modify-header set IPv6 address action to DV specification.
280  *
281  * @param[in,out] resource
282  *   Pointer to the modify-header resource.
283  * @param[in] action
284  *   Pointer to action specification.
285  * @param[out] error
286  *   Pointer to the error structure.
287  *
288  * @return
289  *   0 on success, a negative errno value otherwise and rte_errno is set.
290  */
291 static int
292 flow_dv_convert_action_modify_ipv6
293                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
294                          const struct rte_flow_action *action,
295                          struct rte_flow_error *error)
296 {
297         const struct rte_flow_action_set_ipv6 *conf =
298                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
299         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
300         struct rte_flow_item_ipv6 ipv6;
301         struct rte_flow_item_ipv6 ipv6_mask;
302
303         memset(&ipv6, 0, sizeof(ipv6));
304         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
305         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
306                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
307                        sizeof(ipv6.hdr.src_addr));
308                 memcpy(&ipv6_mask.hdr.src_addr,
309                        &rte_flow_item_ipv6_mask.hdr.src_addr,
310                        sizeof(ipv6.hdr.src_addr));
311         } else {
312                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
313                        sizeof(ipv6.hdr.dst_addr));
314                 memcpy(&ipv6_mask.hdr.dst_addr,
315                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
316                        sizeof(ipv6.hdr.dst_addr));
317         }
318         item.spec = &ipv6;
319         item.mask = &ipv6_mask;
320         return flow_dv_convert_modify_action(&item, modify_ipv6, resource,
321                                              MLX5_MODIFICATION_TYPE_SET, error);
322 }
323
324 /**
325  * Convert modify-header set MAC address action to DV specification.
326  *
327  * @param[in,out] resource
328  *   Pointer to the modify-header resource.
329  * @param[in] action
330  *   Pointer to action specification.
331  * @param[out] error
332  *   Pointer to the error structure.
333  *
334  * @return
335  *   0 on success, a negative errno value otherwise and rte_errno is set.
336  */
337 static int
338 flow_dv_convert_action_modify_mac
339                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
340                          const struct rte_flow_action *action,
341                          struct rte_flow_error *error)
342 {
343         const struct rte_flow_action_set_mac *conf =
344                 (const struct rte_flow_action_set_mac *)(action->conf);
345         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
346         struct rte_flow_item_eth eth;
347         struct rte_flow_item_eth eth_mask;
348
349         memset(&eth, 0, sizeof(eth));
350         memset(&eth_mask, 0, sizeof(eth_mask));
351         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
352                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
353                        sizeof(eth.src.addr_bytes));
354                 memcpy(&eth_mask.src.addr_bytes,
355                        &rte_flow_item_eth_mask.src.addr_bytes,
356                        sizeof(eth_mask.src.addr_bytes));
357         } else {
358                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
359                        sizeof(eth.dst.addr_bytes));
360                 memcpy(&eth_mask.dst.addr_bytes,
361                        &rte_flow_item_eth_mask.dst.addr_bytes,
362                        sizeof(eth_mask.dst.addr_bytes));
363         }
364         item.spec = &eth;
365         item.mask = &eth_mask;
366         return flow_dv_convert_modify_action(&item, modify_eth, resource,
367                                              MLX5_MODIFICATION_TYPE_SET, error);
368 }
369
370 /**
371  * Convert modify-header set TP action to DV specification.
372  *
373  * @param[in,out] resource
374  *   Pointer to the modify-header resource.
375  * @param[in] action
376  *   Pointer to action specification.
377  * @param[in] items
378  *   Pointer to rte_flow_item objects list.
379  * @param[in] attr
380  *   Pointer to flow attributes structure.
381  * @param[out] error
382  *   Pointer to the error structure.
383  *
384  * @return
385  *   0 on success, a negative errno value otherwise and rte_errno is set.
386  */
387 static int
388 flow_dv_convert_action_modify_tp
389                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
390                          const struct rte_flow_action *action,
391                          const struct rte_flow_item *items,
392                          union flow_dv_attr *attr,
393                          struct rte_flow_error *error)
394 {
395         const struct rte_flow_action_set_tp *conf =
396                 (const struct rte_flow_action_set_tp *)(action->conf);
397         struct rte_flow_item item;
398         struct rte_flow_item_udp udp;
399         struct rte_flow_item_udp udp_mask;
400         struct rte_flow_item_tcp tcp;
401         struct rte_flow_item_tcp tcp_mask;
402         struct field_modify_info *field;
403
404         if (!attr->valid)
405                 flow_dv_attr_init(items, attr);
406         if (attr->udp) {
407                 memset(&udp, 0, sizeof(udp));
408                 memset(&udp_mask, 0, sizeof(udp_mask));
409                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
410                         udp.hdr.src_port = conf->port;
411                         udp_mask.hdr.src_port =
412                                         rte_flow_item_udp_mask.hdr.src_port;
413                 } else {
414                         udp.hdr.dst_port = conf->port;
415                         udp_mask.hdr.dst_port =
416                                         rte_flow_item_udp_mask.hdr.dst_port;
417                 }
418                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
419                 item.spec = &udp;
420                 item.mask = &udp_mask;
421                 field = modify_udp;
422         }
423         if (attr->tcp) {
424                 memset(&tcp, 0, sizeof(tcp));
425                 memset(&tcp_mask, 0, sizeof(tcp_mask));
426                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
427                         tcp.hdr.src_port = conf->port;
428                         tcp_mask.hdr.src_port =
429                                         rte_flow_item_tcp_mask.hdr.src_port;
430                 } else {
431                         tcp.hdr.dst_port = conf->port;
432                         tcp_mask.hdr.dst_port =
433                                         rte_flow_item_tcp_mask.hdr.dst_port;
434                 }
435                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
436                 item.spec = &tcp;
437                 item.mask = &tcp_mask;
438                 field = modify_tcp;
439         }
440         return flow_dv_convert_modify_action(&item, field, resource,
441                                              MLX5_MODIFICATION_TYPE_SET, error);
442 }
443
444 /**
445  * Convert modify-header set TTL action to DV specification.
446  *
447  * @param[in,out] resource
448  *   Pointer to the modify-header resource.
449  * @param[in] action
450  *   Pointer to action specification.
451  * @param[in] items
452  *   Pointer to rte_flow_item objects list.
453  * @param[in] attr
454  *   Pointer to flow attributes structure.
455  * @param[out] error
456  *   Pointer to the error structure.
457  *
458  * @return
459  *   0 on success, a negative errno value otherwise and rte_errno is set.
460  */
461 static int
462 flow_dv_convert_action_modify_ttl
463                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
464                          const struct rte_flow_action *action,
465                          const struct rte_flow_item *items,
466                          union flow_dv_attr *attr,
467                          struct rte_flow_error *error)
468 {
469         const struct rte_flow_action_set_ttl *conf =
470                 (const struct rte_flow_action_set_ttl *)(action->conf);
471         struct rte_flow_item item;
472         struct rte_flow_item_ipv4 ipv4;
473         struct rte_flow_item_ipv4 ipv4_mask;
474         struct rte_flow_item_ipv6 ipv6;
475         struct rte_flow_item_ipv6 ipv6_mask;
476         struct field_modify_info *field;
477
478         if (!attr->valid)
479                 flow_dv_attr_init(items, attr);
480         if (attr->ipv4) {
481                 memset(&ipv4, 0, sizeof(ipv4));
482                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
483                 ipv4.hdr.time_to_live = conf->ttl_value;
484                 ipv4_mask.hdr.time_to_live = 0xFF;
485                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
486                 item.spec = &ipv4;
487                 item.mask = &ipv4_mask;
488                 field = modify_ipv4;
489         }
490         if (attr->ipv6) {
491                 memset(&ipv6, 0, sizeof(ipv6));
492                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
493                 ipv6.hdr.hop_limits = conf->ttl_value;
494                 ipv6_mask.hdr.hop_limits = 0xFF;
495                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
496                 item.spec = &ipv6;
497                 item.mask = &ipv6_mask;
498                 field = modify_ipv6;
499         }
500         return flow_dv_convert_modify_action(&item, field, resource,
501                                              MLX5_MODIFICATION_TYPE_SET, error);
502 }
503
504 /**
505  * Convert modify-header decrement TTL action to DV specification.
506  *
507  * @param[in,out] resource
508  *   Pointer to the modify-header resource.
509  * @param[in] action
510  *   Pointer to action specification.
511  * @param[in] items
512  *   Pointer to rte_flow_item objects list.
513  * @param[in] attr
514  *   Pointer to flow attributes structure.
515  * @param[out] error
516  *   Pointer to the error structure.
517  *
518  * @return
519  *   0 on success, a negative errno value otherwise and rte_errno is set.
520  */
521 static int
522 flow_dv_convert_action_modify_dec_ttl
523                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
524                          const struct rte_flow_item *items,
525                          union flow_dv_attr *attr,
526                          struct rte_flow_error *error)
527 {
528         struct rte_flow_item item;
529         struct rte_flow_item_ipv4 ipv4;
530         struct rte_flow_item_ipv4 ipv4_mask;
531         struct rte_flow_item_ipv6 ipv6;
532         struct rte_flow_item_ipv6 ipv6_mask;
533         struct field_modify_info *field;
534
535         if (!attr->valid)
536                 flow_dv_attr_init(items, attr);
537         if (attr->ipv4) {
538                 memset(&ipv4, 0, sizeof(ipv4));
539                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
540                 ipv4.hdr.time_to_live = 0xFF;
541                 ipv4_mask.hdr.time_to_live = 0xFF;
542                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
543                 item.spec = &ipv4;
544                 item.mask = &ipv4_mask;
545                 field = modify_ipv4;
546         }
547         if (attr->ipv6) {
548                 memset(&ipv6, 0, sizeof(ipv6));
549                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
550                 ipv6.hdr.hop_limits = 0xFF;
551                 ipv6_mask.hdr.hop_limits = 0xFF;
552                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
553                 item.spec = &ipv6;
554                 item.mask = &ipv6_mask;
555                 field = modify_ipv6;
556         }
557         return flow_dv_convert_modify_action(&item, field, resource,
558                                              MLX5_MODIFICATION_TYPE_ADD, error);
559 }
560
561 /**
562  * Validate META item.
563  *
564  * @param[in] dev
565  *   Pointer to the rte_eth_dev structure.
566  * @param[in] item
567  *   Item specification.
568  * @param[in] attr
569  *   Attributes of flow that includes this item.
570  * @param[out] error
571  *   Pointer to error structure.
572  *
573  * @return
574  *   0 on success, a negative errno value otherwise and rte_errno is set.
575  */
576 static int
577 flow_dv_validate_item_meta(struct rte_eth_dev *dev,
578                            const struct rte_flow_item *item,
579                            const struct rte_flow_attr *attr,
580                            struct rte_flow_error *error)
581 {
582         const struct rte_flow_item_meta *spec = item->spec;
583         const struct rte_flow_item_meta *mask = item->mask;
584         const struct rte_flow_item_meta nic_mask = {
585                 .data = RTE_BE32(UINT32_MAX)
586         };
587         int ret;
588         uint64_t offloads = dev->data->dev_conf.txmode.offloads;
589
590         if (!(offloads & DEV_TX_OFFLOAD_MATCH_METADATA))
591                 return rte_flow_error_set(error, EPERM,
592                                           RTE_FLOW_ERROR_TYPE_ITEM,
593                                           NULL,
594                                           "match on metadata offload "
595                                           "configuration is off for this port");
596         if (!spec)
597                 return rte_flow_error_set(error, EINVAL,
598                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
599                                           item->spec,
600                                           "data cannot be empty");
601         if (!spec->data)
602                 return rte_flow_error_set(error, EINVAL,
603                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
604                                           NULL,
605                                           "data cannot be zero");
606         if (!mask)
607                 mask = &rte_flow_item_meta_mask;
608         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
609                                         (const uint8_t *)&nic_mask,
610                                         sizeof(struct rte_flow_item_meta),
611                                         error);
612         if (ret < 0)
613                 return ret;
614         if (attr->ingress)
615                 return rte_flow_error_set(error, ENOTSUP,
616                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
617                                           NULL,
618                                           "pattern not supported for ingress");
619         return 0;
620 }
621
622 /**
623  * Validate vport item.
624  *
625  * @param[in] dev
626  *   Pointer to the rte_eth_dev structure.
627  * @param[in] item
628  *   Item specification.
629  * @param[in] attr
630  *   Attributes of flow that includes this item.
631  * @param[in] item_flags
632  *   Bit-fields that holds the items detected until now.
633  * @param[out] error
634  *   Pointer to error structure.
635  *
636  * @return
637  *   0 on success, a negative errno value otherwise and rte_errno is set.
638  */
639 static int
640 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
641                               const struct rte_flow_item *item,
642                               const struct rte_flow_attr *attr,
643                               uint64_t item_flags,
644                               struct rte_flow_error *error)
645 {
646         const struct rte_flow_item_port_id *spec = item->spec;
647         const struct rte_flow_item_port_id *mask = item->mask;
648         const struct rte_flow_item_port_id switch_mask = {
649                         .id = 0xffffffff,
650         };
651         uint16_t esw_domain_id;
652         uint16_t item_port_esw_domain_id;
653         int ret;
654
655         if (!attr->transfer)
656                 return rte_flow_error_set(error, EINVAL,
657                                           RTE_FLOW_ERROR_TYPE_ITEM,
658                                           NULL,
659                                           "match on port id is valid only"
660                                           " when transfer flag is enabled");
661         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
662                 return rte_flow_error_set(error, ENOTSUP,
663                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
664                                           "multiple source ports are not"
665                                           " supported");
666         if (!mask)
667                 mask = &switch_mask;
668         if (mask->id != 0xffffffff)
669                 return rte_flow_error_set(error, ENOTSUP,
670                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
671                                            mask,
672                                            "no support for partial mask on"
673                                            " \"id\" field");
674         ret = mlx5_flow_item_acceptable
675                                 (item, (const uint8_t *)mask,
676                                  (const uint8_t *)&rte_flow_item_port_id_mask,
677                                  sizeof(struct rte_flow_item_port_id),
678                                  error);
679         if (ret)
680                 return ret;
681         if (!spec)
682                 return 0;
683         ret = mlx5_port_to_eswitch_info(spec->id, &item_port_esw_domain_id,
684                                         NULL);
685         if (ret)
686                 return rte_flow_error_set(error, -ret,
687                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
688                                           "failed to obtain E-Switch info for"
689                                           " port");
690         ret = mlx5_port_to_eswitch_info(dev->data->port_id,
691                                         &esw_domain_id, NULL);
692         if (ret < 0)
693                 return rte_flow_error_set(error, -ret,
694                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
695                                           NULL,
696                                           "failed to obtain E-Switch info");
697         if (item_port_esw_domain_id != esw_domain_id)
698                 return rte_flow_error_set(error, -ret,
699                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
700                                           "cannot match on a port from a"
701                                           " different E-Switch");
702         return 0;
703 }
704
705 /**
706  * Validate count action.
707  *
708  * @param[in] dev
709  *   device otr.
710  * @param[out] error
711  *   Pointer to error structure.
712  *
713  * @return
714  *   0 on success, a negative errno value otherwise and rte_errno is set.
715  */
716 static int
717 flow_dv_validate_action_count(struct rte_eth_dev *dev,
718                               struct rte_flow_error *error)
719 {
720         struct mlx5_priv *priv = dev->data->dev_private;
721
722         if (!priv->config.devx)
723                 goto notsup_err;
724 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
725         return 0;
726 #endif
727 notsup_err:
728         return rte_flow_error_set
729                       (error, ENOTSUP,
730                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
731                        NULL,
732                        "count action not supported");
733 }
734
735 /**
736  * Validate the L2 encap action.
737  *
738  * @param[in] action_flags
739  *   Holds the actions detected until now.
740  * @param[in] action
741  *   Pointer to the encap action.
742  * @param[in] attr
743  *   Pointer to flow attributes
744  * @param[out] error
745  *   Pointer to error structure.
746  *
747  * @return
748  *   0 on success, a negative errno value otherwise and rte_errno is set.
749  */
750 static int
751 flow_dv_validate_action_l2_encap(uint64_t action_flags,
752                                  const struct rte_flow_action *action,
753                                  const struct rte_flow_attr *attr,
754                                  struct rte_flow_error *error)
755 {
756         if (!(action->conf))
757                 return rte_flow_error_set(error, EINVAL,
758                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
759                                           "configuration cannot be null");
760         if (action_flags & MLX5_FLOW_ACTION_DROP)
761                 return rte_flow_error_set(error, EINVAL,
762                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
763                                           "can't drop and encap in same flow");
764         if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
765                 return rte_flow_error_set(error, EINVAL,
766                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
767                                           "can only have a single encap or"
768                                           " decap action in a flow");
769         if (!attr->transfer && attr->ingress)
770                 return rte_flow_error_set(error, ENOTSUP,
771                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
772                                           NULL,
773                                           "encap action not supported for "
774                                           "ingress");
775         return 0;
776 }
777
778 /**
779  * Validate the L2 decap action.
780  *
781  * @param[in] action_flags
782  *   Holds the actions detected until now.
783  * @param[in] attr
784  *   Pointer to flow attributes
785  * @param[out] error
786  *   Pointer to error structure.
787  *
788  * @return
789  *   0 on success, a negative errno value otherwise and rte_errno is set.
790  */
791 static int
792 flow_dv_validate_action_l2_decap(uint64_t action_flags,
793                                  const struct rte_flow_attr *attr,
794                                  struct rte_flow_error *error)
795 {
796         if (action_flags & MLX5_FLOW_ACTION_DROP)
797                 return rte_flow_error_set(error, EINVAL,
798                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
799                                           "can't drop and decap in same flow");
800         if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
801                 return rte_flow_error_set(error, EINVAL,
802                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
803                                           "can only have a single encap or"
804                                           " decap action in a flow");
805         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
806                 return rte_flow_error_set(error, EINVAL,
807                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
808                                           "can't have decap action after"
809                                           " modify action");
810         if (attr->egress)
811                 return rte_flow_error_set(error, ENOTSUP,
812                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
813                                           NULL,
814                                           "decap action not supported for "
815                                           "egress");
816         return 0;
817 }
818
819 /**
820  * Validate the raw encap action.
821  *
822  * @param[in] action_flags
823  *   Holds the actions detected until now.
824  * @param[in] action
825  *   Pointer to the encap action.
826  * @param[in] attr
827  *   Pointer to flow attributes
828  * @param[out] error
829  *   Pointer to error structure.
830  *
831  * @return
832  *   0 on success, a negative errno value otherwise and rte_errno is set.
833  */
834 static int
835 flow_dv_validate_action_raw_encap(uint64_t action_flags,
836                                   const struct rte_flow_action *action,
837                                   const struct rte_flow_attr *attr,
838                                   struct rte_flow_error *error)
839 {
840         if (!(action->conf))
841                 return rte_flow_error_set(error, EINVAL,
842                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
843                                           "configuration cannot be null");
844         if (action_flags & MLX5_FLOW_ACTION_DROP)
845                 return rte_flow_error_set(error, EINVAL,
846                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
847                                           "can't drop and encap in same flow");
848         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
849                 return rte_flow_error_set(error, EINVAL,
850                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
851                                           "can only have a single encap"
852                                           " action in a flow");
853         /* encap without preceding decap is not supported for ingress */
854         if (!attr->transfer &&  attr->ingress &&
855             !(action_flags & MLX5_FLOW_ACTION_RAW_DECAP))
856                 return rte_flow_error_set(error, ENOTSUP,
857                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
858                                           NULL,
859                                           "encap action not supported for "
860                                           "ingress");
861         return 0;
862 }
863
864 /**
865  * Validate the raw decap action.
866  *
867  * @param[in] action_flags
868  *   Holds the actions detected until now.
869  * @param[in] action
870  *   Pointer to the encap action.
871  * @param[in] attr
872  *   Pointer to flow attributes
873  * @param[out] error
874  *   Pointer to error structure.
875  *
876  * @return
877  *   0 on success, a negative errno value otherwise and rte_errno is set.
878  */
879 static int
880 flow_dv_validate_action_raw_decap(uint64_t action_flags,
881                                   const struct rte_flow_action *action,
882                                   const struct rte_flow_attr *attr,
883                                   struct rte_flow_error *error)
884 {
885         if (action_flags & MLX5_FLOW_ACTION_DROP)
886                 return rte_flow_error_set(error, EINVAL,
887                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
888                                           "can't drop and decap in same flow");
889         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
890                 return rte_flow_error_set(error, EINVAL,
891                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
892                                           "can't have encap action before"
893                                           " decap action");
894         if (action_flags & MLX5_FLOW_DECAP_ACTIONS)
895                 return rte_flow_error_set(error, EINVAL,
896                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
897                                           "can only have a single decap"
898                                           " action in a flow");
899         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
900                 return rte_flow_error_set(error, EINVAL,
901                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
902                                           "can't have decap action after"
903                                           " modify action");
904         /* decap action is valid on egress only if it is followed by encap */
905         if (attr->egress) {
906                 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
907                        action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
908                        action++) {
909                 }
910                 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
911                         return rte_flow_error_set
912                                         (error, ENOTSUP,
913                                          RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
914                                          NULL, "decap action not supported"
915                                          " for egress");
916         }
917         return 0;
918 }
919
920 /**
921  * Find existing encap/decap resource or create and register a new one.
922  *
923  * @param dev[in, out]
924  *   Pointer to rte_eth_dev structure.
925  * @param[in, out] resource
926  *   Pointer to encap/decap resource.
927  * @parm[in, out] dev_flow
928  *   Pointer to the dev_flow.
929  * @param[out] error
930  *   pointer to error structure.
931  *
932  * @return
933  *   0 on success otherwise -errno and errno is set.
934  */
935 static int
936 flow_dv_encap_decap_resource_register
937                         (struct rte_eth_dev *dev,
938                          struct mlx5_flow_dv_encap_decap_resource *resource,
939                          struct mlx5_flow *dev_flow,
940                          struct rte_flow_error *error)
941 {
942         struct mlx5_priv *priv = dev->data->dev_private;
943         struct mlx5_ibv_shared *sh = priv->sh;
944         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
945         struct rte_flow *flow = dev_flow->flow;
946         struct mlx5dv_dr_ns *ns;
947
948         resource->flags = flow->group ? 0 : 1;
949         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
950                 ns = sh->fdb_ns;
951         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
952                 ns = sh->rx_ns;
953         else
954                 ns = sh->tx_ns;
955
956         /* Lookup a matching resource from cache. */
957         LIST_FOREACH(cache_resource, &sh->encaps_decaps, next) {
958                 if (resource->reformat_type == cache_resource->reformat_type &&
959                     resource->ft_type == cache_resource->ft_type &&
960                     resource->flags == cache_resource->flags &&
961                     resource->size == cache_resource->size &&
962                     !memcmp((const void *)resource->buf,
963                             (const void *)cache_resource->buf,
964                             resource->size)) {
965                         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
966                                 (void *)cache_resource,
967                                 rte_atomic32_read(&cache_resource->refcnt));
968                         rte_atomic32_inc(&cache_resource->refcnt);
969                         dev_flow->dv.encap_decap = cache_resource;
970                         return 0;
971                 }
972         }
973         /* Register new encap/decap resource. */
974         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
975         if (!cache_resource)
976                 return rte_flow_error_set(error, ENOMEM,
977                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
978                                           "cannot allocate resource memory");
979         *cache_resource = *resource;
980         cache_resource->verbs_action =
981                 mlx5_glue->dv_create_flow_action_packet_reformat
982                         (sh->ctx, cache_resource->reformat_type,
983                          cache_resource->ft_type, ns, cache_resource->flags,
984                          cache_resource->size,
985                          (cache_resource->size ? cache_resource->buf : NULL));
986         if (!cache_resource->verbs_action) {
987                 rte_free(cache_resource);
988                 return rte_flow_error_set(error, ENOMEM,
989                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
990                                           NULL, "cannot create action");
991         }
992         rte_atomic32_init(&cache_resource->refcnt);
993         rte_atomic32_inc(&cache_resource->refcnt);
994         LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next);
995         dev_flow->dv.encap_decap = cache_resource;
996         DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
997                 (void *)cache_resource,
998                 rte_atomic32_read(&cache_resource->refcnt));
999         return 0;
1000 }
1001
1002 /**
1003  * Find existing table jump resource or create and register a new one.
1004  *
1005  * @param dev[in, out]
1006  *   Pointer to rte_eth_dev structure.
1007  * @param[in, out] resource
1008  *   Pointer to jump table resource.
1009  * @parm[in, out] dev_flow
1010  *   Pointer to the dev_flow.
1011  * @param[out] error
1012  *   pointer to error structure.
1013  *
1014  * @return
1015  *   0 on success otherwise -errno and errno is set.
1016  */
1017 static int
1018 flow_dv_jump_tbl_resource_register
1019                         (struct rte_eth_dev *dev,
1020                          struct mlx5_flow_dv_jump_tbl_resource *resource,
1021                          struct mlx5_flow *dev_flow,
1022                          struct rte_flow_error *error)
1023 {
1024         struct mlx5_priv *priv = dev->data->dev_private;
1025         struct mlx5_ibv_shared *sh = priv->sh;
1026         struct mlx5_flow_dv_jump_tbl_resource *cache_resource;
1027
1028         /* Lookup a matching resource from cache. */
1029         LIST_FOREACH(cache_resource, &sh->jump_tbl, next) {
1030                 if (resource->tbl == cache_resource->tbl) {
1031                         DRV_LOG(DEBUG, "jump table resource resource %p: refcnt %d++",
1032                                 (void *)cache_resource,
1033                                 rte_atomic32_read(&cache_resource->refcnt));
1034                         rte_atomic32_inc(&cache_resource->refcnt);
1035                         dev_flow->dv.jump = cache_resource;
1036                         return 0;
1037                 }
1038         }
1039         /* Register new jump table resource. */
1040         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1041         if (!cache_resource)
1042                 return rte_flow_error_set(error, ENOMEM,
1043                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1044                                           "cannot allocate resource memory");
1045         *cache_resource = *resource;
1046         cache_resource->action =
1047                 mlx5_glue->dr_create_flow_action_dest_flow_tbl
1048                 (resource->tbl->obj);
1049         if (!cache_resource->action) {
1050                 rte_free(cache_resource);
1051                 return rte_flow_error_set(error, ENOMEM,
1052                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1053                                           NULL, "cannot create action");
1054         }
1055         rte_atomic32_init(&cache_resource->refcnt);
1056         rte_atomic32_inc(&cache_resource->refcnt);
1057         LIST_INSERT_HEAD(&sh->jump_tbl, cache_resource, next);
1058         dev_flow->dv.jump = cache_resource;
1059         DRV_LOG(DEBUG, "new jump table  resource %p: refcnt %d++",
1060                 (void *)cache_resource,
1061                 rte_atomic32_read(&cache_resource->refcnt));
1062         return 0;
1063 }
1064
1065 /**
1066  * Find existing table port ID resource or create and register a new one.
1067  *
1068  * @param dev[in, out]
1069  *   Pointer to rte_eth_dev structure.
1070  * @param[in, out] resource
1071  *   Pointer to port ID action resource.
1072  * @parm[in, out] dev_flow
1073  *   Pointer to the dev_flow.
1074  * @param[out] error
1075  *   pointer to error structure.
1076  *
1077  * @return
1078  *   0 on success otherwise -errno and errno is set.
1079  */
1080 static int
1081 flow_dv_port_id_action_resource_register
1082                         (struct rte_eth_dev *dev,
1083                          struct mlx5_flow_dv_port_id_action_resource *resource,
1084                          struct mlx5_flow *dev_flow,
1085                          struct rte_flow_error *error)
1086 {
1087         struct mlx5_priv *priv = dev->data->dev_private;
1088         struct mlx5_ibv_shared *sh = priv->sh;
1089         struct mlx5_flow_dv_port_id_action_resource *cache_resource;
1090
1091         /* Lookup a matching resource from cache. */
1092         LIST_FOREACH(cache_resource, &sh->port_id_action_list, next) {
1093                 if (resource->port_id == cache_resource->port_id) {
1094                         DRV_LOG(DEBUG, "port id action resource resource %p: "
1095                                 "refcnt %d++",
1096                                 (void *)cache_resource,
1097                                 rte_atomic32_read(&cache_resource->refcnt));
1098                         rte_atomic32_inc(&cache_resource->refcnt);
1099                         dev_flow->dv.port_id_action = cache_resource;
1100                         return 0;
1101                 }
1102         }
1103         /* Register new port id action resource. */
1104         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1105         if (!cache_resource)
1106                 return rte_flow_error_set(error, ENOMEM,
1107                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1108                                           "cannot allocate resource memory");
1109         *cache_resource = *resource;
1110         cache_resource->action =
1111                 mlx5_glue->dr_create_flow_action_dest_vport(priv->sh->fdb_ns,
1112                                                             resource->port_id);
1113         if (!cache_resource->action) {
1114                 rte_free(cache_resource);
1115                 return rte_flow_error_set(error, ENOMEM,
1116                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1117                                           NULL, "cannot create action");
1118         }
1119         rte_atomic32_init(&cache_resource->refcnt);
1120         rte_atomic32_inc(&cache_resource->refcnt);
1121         LIST_INSERT_HEAD(&sh->port_id_action_list, cache_resource, next);
1122         dev_flow->dv.port_id_action = cache_resource;
1123         DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
1124                 (void *)cache_resource,
1125                 rte_atomic32_read(&cache_resource->refcnt));
1126         return 0;
1127 }
1128
1129 /**
1130  * Get the size of specific rte_flow_item_type
1131  *
1132  * @param[in] item_type
1133  *   Tested rte_flow_item_type.
1134  *
1135  * @return
1136  *   sizeof struct item_type, 0 if void or irrelevant.
1137  */
1138 static size_t
1139 flow_dv_get_item_len(const enum rte_flow_item_type item_type)
1140 {
1141         size_t retval;
1142
1143         switch (item_type) {
1144         case RTE_FLOW_ITEM_TYPE_ETH:
1145                 retval = sizeof(struct rte_flow_item_eth);
1146                 break;
1147         case RTE_FLOW_ITEM_TYPE_VLAN:
1148                 retval = sizeof(struct rte_flow_item_vlan);
1149                 break;
1150         case RTE_FLOW_ITEM_TYPE_IPV4:
1151                 retval = sizeof(struct rte_flow_item_ipv4);
1152                 break;
1153         case RTE_FLOW_ITEM_TYPE_IPV6:
1154                 retval = sizeof(struct rte_flow_item_ipv6);
1155                 break;
1156         case RTE_FLOW_ITEM_TYPE_UDP:
1157                 retval = sizeof(struct rte_flow_item_udp);
1158                 break;
1159         case RTE_FLOW_ITEM_TYPE_TCP:
1160                 retval = sizeof(struct rte_flow_item_tcp);
1161                 break;
1162         case RTE_FLOW_ITEM_TYPE_VXLAN:
1163                 retval = sizeof(struct rte_flow_item_vxlan);
1164                 break;
1165         case RTE_FLOW_ITEM_TYPE_GRE:
1166                 retval = sizeof(struct rte_flow_item_gre);
1167                 break;
1168         case RTE_FLOW_ITEM_TYPE_NVGRE:
1169                 retval = sizeof(struct rte_flow_item_nvgre);
1170                 break;
1171         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1172                 retval = sizeof(struct rte_flow_item_vxlan_gpe);
1173                 break;
1174         case RTE_FLOW_ITEM_TYPE_MPLS:
1175                 retval = sizeof(struct rte_flow_item_mpls);
1176                 break;
1177         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
1178         default:
1179                 retval = 0;
1180                 break;
1181         }
1182         return retval;
1183 }
1184
1185 #define MLX5_ENCAP_IPV4_VERSION         0x40
1186 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
1187 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
1188 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
1189 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
1190 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
1191 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
1192
1193 /**
1194  * Convert the encap action data from list of rte_flow_item to raw buffer
1195  *
1196  * @param[in] items
1197  *   Pointer to rte_flow_item objects list.
1198  * @param[out] buf
1199  *   Pointer to the output buffer.
1200  * @param[out] size
1201  *   Pointer to the output buffer size.
1202  * @param[out] error
1203  *   Pointer to the error structure.
1204  *
1205  * @return
1206  *   0 on success, a negative errno value otherwise and rte_errno is set.
1207  */
1208 static int
1209 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
1210                            size_t *size, struct rte_flow_error *error)
1211 {
1212         struct ether_hdr *eth = NULL;
1213         struct vlan_hdr *vlan = NULL;
1214         struct ipv4_hdr *ipv4 = NULL;
1215         struct ipv6_hdr *ipv6 = NULL;
1216         struct udp_hdr *udp = NULL;
1217         struct vxlan_hdr *vxlan = NULL;
1218         struct vxlan_gpe_hdr *vxlan_gpe = NULL;
1219         struct gre_hdr *gre = NULL;
1220         size_t len;
1221         size_t temp_size = 0;
1222
1223         if (!items)
1224                 return rte_flow_error_set(error, EINVAL,
1225                                           RTE_FLOW_ERROR_TYPE_ACTION,
1226                                           NULL, "invalid empty data");
1227         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1228                 len = flow_dv_get_item_len(items->type);
1229                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
1230                         return rte_flow_error_set(error, EINVAL,
1231                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1232                                                   (void *)items->type,
1233                                                   "items total size is too big"
1234                                                   " for encap action");
1235                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
1236                 switch (items->type) {
1237                 case RTE_FLOW_ITEM_TYPE_ETH:
1238                         eth = (struct ether_hdr *)&buf[temp_size];
1239                         break;
1240                 case RTE_FLOW_ITEM_TYPE_VLAN:
1241                         vlan = (struct vlan_hdr *)&buf[temp_size];
1242                         if (!eth)
1243                                 return rte_flow_error_set(error, EINVAL,
1244                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1245                                                 (void *)items->type,
1246                                                 "eth header not found");
1247                         if (!eth->ether_type)
1248                                 eth->ether_type = RTE_BE16(ETHER_TYPE_VLAN);
1249                         break;
1250                 case RTE_FLOW_ITEM_TYPE_IPV4:
1251                         ipv4 = (struct ipv4_hdr *)&buf[temp_size];
1252                         if (!vlan && !eth)
1253                                 return rte_flow_error_set(error, EINVAL,
1254                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1255                                                 (void *)items->type,
1256                                                 "neither eth nor vlan"
1257                                                 " header found");
1258                         if (vlan && !vlan->eth_proto)
1259                                 vlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv4);
1260                         else if (eth && !eth->ether_type)
1261                                 eth->ether_type = RTE_BE16(ETHER_TYPE_IPv4);
1262                         if (!ipv4->version_ihl)
1263                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
1264                                                     MLX5_ENCAP_IPV4_IHL_MIN;
1265                         if (!ipv4->time_to_live)
1266                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
1267                         break;
1268                 case RTE_FLOW_ITEM_TYPE_IPV6:
1269                         ipv6 = (struct ipv6_hdr *)&buf[temp_size];
1270                         if (!vlan && !eth)
1271                                 return rte_flow_error_set(error, EINVAL,
1272                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1273                                                 (void *)items->type,
1274                                                 "neither eth nor vlan"
1275                                                 " header found");
1276                         if (vlan && !vlan->eth_proto)
1277                                 vlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv6);
1278                         else if (eth && !eth->ether_type)
1279                                 eth->ether_type = RTE_BE16(ETHER_TYPE_IPv6);
1280                         if (!ipv6->vtc_flow)
1281                                 ipv6->vtc_flow =
1282                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
1283                         if (!ipv6->hop_limits)
1284                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
1285                         break;
1286                 case RTE_FLOW_ITEM_TYPE_UDP:
1287                         udp = (struct udp_hdr *)&buf[temp_size];
1288                         if (!ipv4 && !ipv6)
1289                                 return rte_flow_error_set(error, EINVAL,
1290                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1291                                                 (void *)items->type,
1292                                                 "ip header not found");
1293                         if (ipv4 && !ipv4->next_proto_id)
1294                                 ipv4->next_proto_id = IPPROTO_UDP;
1295                         else if (ipv6 && !ipv6->proto)
1296                                 ipv6->proto = IPPROTO_UDP;
1297                         break;
1298                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1299                         vxlan = (struct vxlan_hdr *)&buf[temp_size];
1300                         if (!udp)
1301                                 return rte_flow_error_set(error, EINVAL,
1302                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1303                                                 (void *)items->type,
1304                                                 "udp header not found");
1305                         if (!udp->dst_port)
1306                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
1307                         if (!vxlan->vx_flags)
1308                                 vxlan->vx_flags =
1309                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
1310                         break;
1311                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1312                         vxlan_gpe = (struct vxlan_gpe_hdr *)&buf[temp_size];
1313                         if (!udp)
1314                                 return rte_flow_error_set(error, EINVAL,
1315                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1316                                                 (void *)items->type,
1317                                                 "udp header not found");
1318                         if (!vxlan_gpe->proto)
1319                                 return rte_flow_error_set(error, EINVAL,
1320                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1321                                                 (void *)items->type,
1322                                                 "next protocol not found");
1323                         if (!udp->dst_port)
1324                                 udp->dst_port =
1325                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
1326                         if (!vxlan_gpe->vx_flags)
1327                                 vxlan_gpe->vx_flags =
1328                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
1329                         break;
1330                 case RTE_FLOW_ITEM_TYPE_GRE:
1331                 case RTE_FLOW_ITEM_TYPE_NVGRE:
1332                         gre = (struct gre_hdr *)&buf[temp_size];
1333                         if (!gre->proto)
1334                                 return rte_flow_error_set(error, EINVAL,
1335                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1336                                                 (void *)items->type,
1337                                                 "next protocol not found");
1338                         if (!ipv4 && !ipv6)
1339                                 return rte_flow_error_set(error, EINVAL,
1340                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1341                                                 (void *)items->type,
1342                                                 "ip header not found");
1343                         if (ipv4 && !ipv4->next_proto_id)
1344                                 ipv4->next_proto_id = IPPROTO_GRE;
1345                         else if (ipv6 && !ipv6->proto)
1346                                 ipv6->proto = IPPROTO_GRE;
1347                         break;
1348                 case RTE_FLOW_ITEM_TYPE_VOID:
1349                         break;
1350                 default:
1351                         return rte_flow_error_set(error, EINVAL,
1352                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1353                                                   (void *)items->type,
1354                                                   "unsupported item type");
1355                         break;
1356                 }
1357                 temp_size += len;
1358         }
1359         *size = temp_size;
1360         return 0;
1361 }
1362
1363 /**
1364  * Convert L2 encap action to DV specification.
1365  *
1366  * @param[in] dev
1367  *   Pointer to rte_eth_dev structure.
1368  * @param[in] action
1369  *   Pointer to action structure.
1370  * @param[in, out] dev_flow
1371  *   Pointer to the mlx5_flow.
1372  * @param[in] transfer
1373  *   Mark if the flow is E-Switch flow.
1374  * @param[out] error
1375  *   Pointer to the error structure.
1376  *
1377  * @return
1378  *   0 on success, a negative errno value otherwise and rte_errno is set.
1379  */
1380 static int
1381 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
1382                                const struct rte_flow_action *action,
1383                                struct mlx5_flow *dev_flow,
1384                                uint8_t transfer,
1385                                struct rte_flow_error *error)
1386 {
1387         const struct rte_flow_item *encap_data;
1388         const struct rte_flow_action_raw_encap *raw_encap_data;
1389         struct mlx5_flow_dv_encap_decap_resource res = {
1390                 .reformat_type =
1391                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
1392                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
1393                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
1394         };
1395
1396         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
1397                 raw_encap_data =
1398                         (const struct rte_flow_action_raw_encap *)action->conf;
1399                 res.size = raw_encap_data->size;
1400                 memcpy(res.buf, raw_encap_data->data, res.size);
1401         } else {
1402                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
1403                         encap_data =
1404                                 ((const struct rte_flow_action_vxlan_encap *)
1405                                                 action->conf)->definition;
1406                 else
1407                         encap_data =
1408                                 ((const struct rte_flow_action_nvgre_encap *)
1409                                                 action->conf)->definition;
1410                 if (flow_dv_convert_encap_data(encap_data, res.buf,
1411                                                &res.size, error))
1412                         return -rte_errno;
1413         }
1414         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1415                 return rte_flow_error_set(error, EINVAL,
1416                                           RTE_FLOW_ERROR_TYPE_ACTION,
1417                                           NULL, "can't create L2 encap action");
1418         return 0;
1419 }
1420
1421 /**
1422  * Convert L2 decap action to DV specification.
1423  *
1424  * @param[in] dev
1425  *   Pointer to rte_eth_dev structure.
1426  * @param[in, out] dev_flow
1427  *   Pointer to the mlx5_flow.
1428  * @param[in] transfer
1429  *   Mark if the flow is E-Switch flow.
1430  * @param[out] error
1431  *   Pointer to the error structure.
1432  *
1433  * @return
1434  *   0 on success, a negative errno value otherwise and rte_errno is set.
1435  */
1436 static int
1437 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
1438                                struct mlx5_flow *dev_flow,
1439                                uint8_t transfer,
1440                                struct rte_flow_error *error)
1441 {
1442         struct mlx5_flow_dv_encap_decap_resource res = {
1443                 .size = 0,
1444                 .reformat_type =
1445                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
1446                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
1447                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
1448         };
1449
1450         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1451                 return rte_flow_error_set(error, EINVAL,
1452                                           RTE_FLOW_ERROR_TYPE_ACTION,
1453                                           NULL, "can't create L2 decap action");
1454         return 0;
1455 }
1456
1457 /**
1458  * Convert raw decap/encap (L3 tunnel) action to DV specification.
1459  *
1460  * @param[in] dev
1461  *   Pointer to rte_eth_dev structure.
1462  * @param[in] action
1463  *   Pointer to action structure.
1464  * @param[in, out] dev_flow
1465  *   Pointer to the mlx5_flow.
1466  * @param[in] attr
1467  *   Pointer to the flow attributes.
1468  * @param[out] error
1469  *   Pointer to the error structure.
1470  *
1471  * @return
1472  *   0 on success, a negative errno value otherwise and rte_errno is set.
1473  */
1474 static int
1475 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
1476                                 const struct rte_flow_action *action,
1477                                 struct mlx5_flow *dev_flow,
1478                                 const struct rte_flow_attr *attr,
1479                                 struct rte_flow_error *error)
1480 {
1481         const struct rte_flow_action_raw_encap *encap_data;
1482         struct mlx5_flow_dv_encap_decap_resource res;
1483
1484         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
1485         res.size = encap_data->size;
1486         memcpy(res.buf, encap_data->data, res.size);
1487         res.reformat_type = attr->egress ?
1488                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL :
1489                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
1490         if (attr->transfer)
1491                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
1492         else
1493                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
1494                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
1495         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1496                 return rte_flow_error_set(error, EINVAL,
1497                                           RTE_FLOW_ERROR_TYPE_ACTION,
1498                                           NULL, "can't create encap action");
1499         return 0;
1500 }
1501
1502 /**
1503  * Validate the modify-header actions.
1504  *
1505  * @param[in] action_flags
1506  *   Holds the actions detected until now.
1507  * @param[in] action
1508  *   Pointer to the modify action.
1509  * @param[out] error
1510  *   Pointer to error structure.
1511  *
1512  * @return
1513  *   0 on success, a negative errno value otherwise and rte_errno is set.
1514  */
1515 static int
1516 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
1517                                    const struct rte_flow_action *action,
1518                                    struct rte_flow_error *error)
1519 {
1520         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
1521                 return rte_flow_error_set(error, EINVAL,
1522                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1523                                           NULL, "action configuration not set");
1524         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
1525                 return rte_flow_error_set(error, EINVAL,
1526                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1527                                           "can't have encap action before"
1528                                           " modify action");
1529         return 0;
1530 }
1531
1532 /**
1533  * Validate the modify-header MAC address actions.
1534  *
1535  * @param[in] action_flags
1536  *   Holds the actions detected until now.
1537  * @param[in] action
1538  *   Pointer to the modify action.
1539  * @param[in] item_flags
1540  *   Holds the items detected.
1541  * @param[out] error
1542  *   Pointer to error structure.
1543  *
1544  * @return
1545  *   0 on success, a negative errno value otherwise and rte_errno is set.
1546  */
1547 static int
1548 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
1549                                    const struct rte_flow_action *action,
1550                                    const uint64_t item_flags,
1551                                    struct rte_flow_error *error)
1552 {
1553         int ret = 0;
1554
1555         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1556         if (!ret) {
1557                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
1558                         return rte_flow_error_set(error, EINVAL,
1559                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1560                                                   NULL,
1561                                                   "no L2 item in pattern");
1562         }
1563         return ret;
1564 }
1565
1566 /**
1567  * Validate the modify-header IPv4 address actions.
1568  *
1569  * @param[in] action_flags
1570  *   Holds the actions detected until now.
1571  * @param[in] action
1572  *   Pointer to the modify action.
1573  * @param[in] item_flags
1574  *   Holds the items detected.
1575  * @param[out] error
1576  *   Pointer to error structure.
1577  *
1578  * @return
1579  *   0 on success, a negative errno value otherwise and rte_errno is set.
1580  */
1581 static int
1582 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
1583                                     const struct rte_flow_action *action,
1584                                     const uint64_t item_flags,
1585                                     struct rte_flow_error *error)
1586 {
1587         int ret = 0;
1588
1589         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1590         if (!ret) {
1591                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
1592                         return rte_flow_error_set(error, EINVAL,
1593                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1594                                                   NULL,
1595                                                   "no ipv4 item in pattern");
1596         }
1597         return ret;
1598 }
1599
1600 /**
1601  * Validate the modify-header IPv6 address actions.
1602  *
1603  * @param[in] action_flags
1604  *   Holds the actions detected until now.
1605  * @param[in] action
1606  *   Pointer to the modify action.
1607  * @param[in] item_flags
1608  *   Holds the items detected.
1609  * @param[out] error
1610  *   Pointer to error structure.
1611  *
1612  * @return
1613  *   0 on success, a negative errno value otherwise and rte_errno is set.
1614  */
1615 static int
1616 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
1617                                     const struct rte_flow_action *action,
1618                                     const uint64_t item_flags,
1619                                     struct rte_flow_error *error)
1620 {
1621         int ret = 0;
1622
1623         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1624         if (!ret) {
1625                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
1626                         return rte_flow_error_set(error, EINVAL,
1627                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1628                                                   NULL,
1629                                                   "no ipv6 item in pattern");
1630         }
1631         return ret;
1632 }
1633
1634 /**
1635  * Validate the modify-header TP actions.
1636  *
1637  * @param[in] action_flags
1638  *   Holds the actions detected until now.
1639  * @param[in] action
1640  *   Pointer to the modify action.
1641  * @param[in] item_flags
1642  *   Holds the items detected.
1643  * @param[out] error
1644  *   Pointer to error structure.
1645  *
1646  * @return
1647  *   0 on success, a negative errno value otherwise and rte_errno is set.
1648  */
1649 static int
1650 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
1651                                   const struct rte_flow_action *action,
1652                                   const uint64_t item_flags,
1653                                   struct rte_flow_error *error)
1654 {
1655         int ret = 0;
1656
1657         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1658         if (!ret) {
1659                 if (!(item_flags & MLX5_FLOW_LAYER_L4))
1660                         return rte_flow_error_set(error, EINVAL,
1661                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1662                                                   NULL, "no transport layer "
1663                                                   "in pattern");
1664         }
1665         return ret;
1666 }
1667
1668 /**
1669  * Validate the modify-header TTL actions.
1670  *
1671  * @param[in] action_flags
1672  *   Holds the actions detected until now.
1673  * @param[in] action
1674  *   Pointer to the modify action.
1675  * @param[in] item_flags
1676  *   Holds the items detected.
1677  * @param[out] error
1678  *   Pointer to error structure.
1679  *
1680  * @return
1681  *   0 on success, a negative errno value otherwise and rte_errno is set.
1682  */
1683 static int
1684 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
1685                                    const struct rte_flow_action *action,
1686                                    const uint64_t item_flags,
1687                                    struct rte_flow_error *error)
1688 {
1689         int ret = 0;
1690
1691         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1692         if (!ret) {
1693                 if (!(item_flags & MLX5_FLOW_LAYER_L3))
1694                         return rte_flow_error_set(error, EINVAL,
1695                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1696                                                   NULL,
1697                                                   "no IP protocol in pattern");
1698         }
1699         return ret;
1700 }
1701
1702 /**
1703  * Validate jump action.
1704  *
1705  * @param[in] action
1706  *   Pointer to the modify action.
1707  * @param[in] group
1708  *   The group of the current flow.
1709  * @param[out] error
1710  *   Pointer to error structure.
1711  *
1712  * @return
1713  *   0 on success, a negative errno value otherwise and rte_errno is set.
1714  */
1715 static int
1716 flow_dv_validate_action_jump(const struct rte_flow_action *action,
1717                              uint32_t group,
1718                              struct rte_flow_error *error)
1719 {
1720         if (action->type != RTE_FLOW_ACTION_TYPE_JUMP && !action->conf)
1721                 return rte_flow_error_set(error, EINVAL,
1722                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1723                                           NULL, "action configuration not set");
1724         if (group >= ((const struct rte_flow_action_jump *)action->conf)->group)
1725                 return rte_flow_error_set(error, EINVAL,
1726                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1727                                           "target group must be higher then"
1728                                           " the current flow group");
1729         return 0;
1730 }
1731
1732 /*
1733  * Validate the port_id action.
1734  *
1735  * @param[in] dev
1736  *   Pointer to rte_eth_dev structure.
1737  * @param[in] action_flags
1738  *   Bit-fields that holds the actions detected until now.
1739  * @param[in] action
1740  *   Port_id RTE action structure.
1741  * @param[in] attr
1742  *   Attributes of flow that includes this action.
1743  * @param[out] error
1744  *   Pointer to error structure.
1745  *
1746  * @return
1747  *   0 on success, a negative errno value otherwise and rte_errno is set.
1748  */
1749 static int
1750 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
1751                                 uint64_t action_flags,
1752                                 const struct rte_flow_action *action,
1753                                 const struct rte_flow_attr *attr,
1754                                 struct rte_flow_error *error)
1755 {
1756         const struct rte_flow_action_port_id *port_id;
1757         uint16_t port;
1758         uint16_t esw_domain_id;
1759         uint16_t act_port_domain_id;
1760         int ret;
1761
1762         if (!attr->transfer)
1763                 return rte_flow_error_set(error, ENOTSUP,
1764                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1765                                           NULL,
1766                                           "port id action is valid in transfer"
1767                                           " mode only");
1768         if (!action || !action->conf)
1769                 return rte_flow_error_set(error, ENOTSUP,
1770                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1771                                           NULL,
1772                                           "port id action parameters must be"
1773                                           " specified");
1774         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
1775                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
1776                 return rte_flow_error_set(error, EINVAL,
1777                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1778                                           "can have only one fate actions in"
1779                                           " a flow");
1780         ret = mlx5_port_to_eswitch_info(dev->data->port_id,
1781                                         &esw_domain_id, NULL);
1782         if (ret < 0)
1783                 return rte_flow_error_set(error, -ret,
1784                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1785                                           NULL,
1786                                           "failed to obtain E-Switch info");
1787         port_id = action->conf;
1788         port = port_id->original ? dev->data->port_id : port_id->id;
1789         ret = mlx5_port_to_eswitch_info(port, &act_port_domain_id, NULL);
1790         if (ret)
1791                 return rte_flow_error_set
1792                                 (error, -ret,
1793                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
1794                                  "failed to obtain E-Switch port id for port");
1795         if (act_port_domain_id != esw_domain_id)
1796                 return rte_flow_error_set
1797                                 (error, -ret,
1798                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1799                                  "port does not belong to"
1800                                  " E-Switch being configured");
1801         return 0;
1802 }
1803
1804 /**
1805  * Find existing modify-header resource or create and register a new one.
1806  *
1807  * @param dev[in, out]
1808  *   Pointer to rte_eth_dev structure.
1809  * @param[in, out] resource
1810  *   Pointer to modify-header resource.
1811  * @parm[in, out] dev_flow
1812  *   Pointer to the dev_flow.
1813  * @param[out] error
1814  *   pointer to error structure.
1815  *
1816  * @return
1817  *   0 on success otherwise -errno and errno is set.
1818  */
1819 static int
1820 flow_dv_modify_hdr_resource_register
1821                         (struct rte_eth_dev *dev,
1822                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1823                          struct mlx5_flow *dev_flow,
1824                          struct rte_flow_error *error)
1825 {
1826         struct mlx5_priv *priv = dev->data->dev_private;
1827         struct mlx5_ibv_shared *sh = priv->sh;
1828         struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
1829         struct mlx5dv_dr_ns *ns;
1830
1831         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
1832                 ns = sh->fdb_ns;
1833         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
1834                 ns = sh->tx_ns;
1835         else
1836                 ns = sh->rx_ns;
1837         resource->flags =
1838                 dev_flow->flow->group ? 0 : MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
1839         /* Lookup a matching resource from cache. */
1840         LIST_FOREACH(cache_resource, &sh->modify_cmds, next) {
1841                 if (resource->ft_type == cache_resource->ft_type &&
1842                     resource->actions_num == cache_resource->actions_num &&
1843                     resource->flags == cache_resource->flags &&
1844                     !memcmp((const void *)resource->actions,
1845                             (const void *)cache_resource->actions,
1846                             (resource->actions_num *
1847                                             sizeof(resource->actions[0])))) {
1848                         DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
1849                                 (void *)cache_resource,
1850                                 rte_atomic32_read(&cache_resource->refcnt));
1851                         rte_atomic32_inc(&cache_resource->refcnt);
1852                         dev_flow->dv.modify_hdr = cache_resource;
1853                         return 0;
1854                 }
1855         }
1856         /* Register new modify-header resource. */
1857         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1858         if (!cache_resource)
1859                 return rte_flow_error_set(error, ENOMEM,
1860                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1861                                           "cannot allocate resource memory");
1862         *cache_resource = *resource;
1863         cache_resource->verbs_action =
1864                 mlx5_glue->dv_create_flow_action_modify_header
1865                                         (sh->ctx, cache_resource->ft_type,
1866                                          ns, cache_resource->flags,
1867                                          cache_resource->actions_num *
1868                                          sizeof(cache_resource->actions[0]),
1869                                          (uint64_t *)cache_resource->actions);
1870         if (!cache_resource->verbs_action) {
1871                 rte_free(cache_resource);
1872                 return rte_flow_error_set(error, ENOMEM,
1873                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1874                                           NULL, "cannot create action");
1875         }
1876         rte_atomic32_init(&cache_resource->refcnt);
1877         rte_atomic32_inc(&cache_resource->refcnt);
1878         LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
1879         dev_flow->dv.modify_hdr = cache_resource;
1880         DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
1881                 (void *)cache_resource,
1882                 rte_atomic32_read(&cache_resource->refcnt));
1883         return 0;
1884 }
1885
1886 /**
1887  * Get or create a flow counter.
1888  *
1889  * @param[in] dev
1890  *   Pointer to the Ethernet device structure.
1891  * @param[in] shared
1892  *   Indicate if this counter is shared with other flows.
1893  * @param[in] id
1894  *   Counter identifier.
1895  *
1896  * @return
1897  *   pointer to flow counter on success, NULL otherwise and rte_errno is set.
1898  */
1899 static struct mlx5_flow_counter *
1900 flow_dv_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
1901 {
1902         struct mlx5_priv *priv = dev->data->dev_private;
1903         struct mlx5_flow_counter *cnt = NULL;
1904         struct mlx5_devx_counter_set *dcs = NULL;
1905         int ret;
1906
1907         if (!priv->config.devx) {
1908                 ret = -ENOTSUP;
1909                 goto error_exit;
1910         }
1911         if (shared) {
1912                 LIST_FOREACH(cnt, &priv->flow_counters, next) {
1913                         if (cnt->shared && cnt->id == id) {
1914                                 cnt->ref_cnt++;
1915                                 return cnt;
1916                         }
1917                 }
1918         }
1919         cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
1920         dcs = rte_calloc(__func__, 1, sizeof(*dcs), 0);
1921         if (!dcs || !cnt) {
1922                 ret = -ENOMEM;
1923                 goto error_exit;
1924         }
1925         ret = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, dcs);
1926         if (ret)
1927                 goto error_exit;
1928         struct mlx5_flow_counter tmpl = {
1929                 .shared = shared,
1930                 .ref_cnt = 1,
1931                 .id = id,
1932                 .dcs = dcs,
1933         };
1934         tmpl.action = mlx5_glue->dv_create_flow_action_counter(dcs->obj, 0);
1935         if (!tmpl.action) {
1936                 ret = errno;
1937                 goto error_exit;
1938         }
1939         *cnt = tmpl;
1940         LIST_INSERT_HEAD(&priv->flow_counters, cnt, next);
1941         return cnt;
1942 error_exit:
1943         rte_free(cnt);
1944         rte_free(dcs);
1945         rte_errno = -ret;
1946         return NULL;
1947 }
1948
1949 /**
1950  * Release a flow counter.
1951  *
1952  * @param[in] counter
1953  *   Pointer to the counter handler.
1954  */
1955 static void
1956 flow_dv_counter_release(struct mlx5_flow_counter *counter)
1957 {
1958         int ret;
1959
1960         if (!counter)
1961                 return;
1962         if (--counter->ref_cnt == 0) {
1963                 ret = mlx5_devx_cmd_flow_counter_free(counter->dcs->obj);
1964                 if (ret)
1965                         DRV_LOG(ERR, "Failed to free devx counters, %d", ret);
1966                 LIST_REMOVE(counter, next);
1967                 rte_free(counter->dcs);
1968                 rte_free(counter);
1969         }
1970 }
1971
1972 /**
1973  * Verify the @p attributes will be correctly understood by the NIC and store
1974  * them in the @p flow if everything is correct.
1975  *
1976  * @param[in] dev
1977  *   Pointer to dev struct.
1978  * @param[in] attributes
1979  *   Pointer to flow attributes
1980  * @param[out] error
1981  *   Pointer to error structure.
1982  *
1983  * @return
1984  *   0 on success, a negative errno value otherwise and rte_errno is set.
1985  */
1986 static int
1987 flow_dv_validate_attributes(struct rte_eth_dev *dev,
1988                             const struct rte_flow_attr *attributes,
1989                             struct rte_flow_error *error)
1990 {
1991         struct mlx5_priv *priv = dev->data->dev_private;
1992         uint32_t priority_max = priv->config.flow_prio - 1;
1993
1994 #ifndef HAVE_MLX5DV_DR
1995         if (attributes->group)
1996                 return rte_flow_error_set(error, ENOTSUP,
1997                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1998                                           NULL,
1999                                           "groups is not supported");
2000 #endif
2001         if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
2002             attributes->priority >= priority_max)
2003                 return rte_flow_error_set(error, ENOTSUP,
2004                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
2005                                           NULL,
2006                                           "priority out of range");
2007         if (attributes->transfer) {
2008                 if (!priv->config.dv_esw_en)
2009                         return rte_flow_error_set
2010                                 (error, ENOTSUP,
2011                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2012                                  "E-Switch dr is not supported");
2013                 if (!(priv->representor || priv->master))
2014                         return rte_flow_error_set
2015                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2016                                  NULL, "E-Switch configurationd can only be"
2017                                  " done by a master or a representor device");
2018                 if (attributes->egress)
2019                         return rte_flow_error_set
2020                                 (error, ENOTSUP,
2021                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
2022                                  "egress is not supported");
2023                 if (attributes->group >= MLX5_MAX_TABLES_FDB)
2024                         return rte_flow_error_set
2025                                 (error, EINVAL,
2026                                  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
2027                                  NULL, "group must be smaller than "
2028                                  RTE_STR(MLX5_MAX_FDB_TABLES));
2029         }
2030         if (!(attributes->egress ^ attributes->ingress))
2031                 return rte_flow_error_set(error, ENOTSUP,
2032                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
2033                                           "must specify exactly one of "
2034                                           "ingress or egress");
2035         return 0;
2036 }
2037
2038 /**
2039  * Internal validation function. For validating both actions and items.
2040  *
2041  * @param[in] dev
2042  *   Pointer to the rte_eth_dev structure.
2043  * @param[in] attr
2044  *   Pointer to the flow attributes.
2045  * @param[in] items
2046  *   Pointer to the list of items.
2047  * @param[in] actions
2048  *   Pointer to the list of actions.
2049  * @param[out] error
2050  *   Pointer to the error structure.
2051  *
2052  * @return
2053  *   0 on success, a negative errno value otherwise and rte_errno is set.
2054  */
2055 static int
2056 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
2057                  const struct rte_flow_item items[],
2058                  const struct rte_flow_action actions[],
2059                  struct rte_flow_error *error)
2060 {
2061         int ret;
2062         uint64_t action_flags = 0;
2063         uint64_t item_flags = 0;
2064         uint64_t last_item = 0;
2065         uint8_t next_protocol = 0xff;
2066         int actions_n = 0;
2067
2068         if (items == NULL)
2069                 return -1;
2070         ret = flow_dv_validate_attributes(dev, attr, error);
2071         if (ret < 0)
2072                 return ret;
2073         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2074                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2075                 switch (items->type) {
2076                 case RTE_FLOW_ITEM_TYPE_VOID:
2077                         break;
2078                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
2079                         ret = flow_dv_validate_item_port_id
2080                                         (dev, items, attr, item_flags, error);
2081                         if (ret < 0)
2082                                 return ret;
2083                         last_item |= MLX5_FLOW_ITEM_PORT_ID;
2084                         break;
2085                 case RTE_FLOW_ITEM_TYPE_ETH:
2086                         ret = mlx5_flow_validate_item_eth(items, item_flags,
2087                                                           error);
2088                         if (ret < 0)
2089                                 return ret;
2090                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
2091                                              MLX5_FLOW_LAYER_OUTER_L2;
2092                         break;
2093                 case RTE_FLOW_ITEM_TYPE_VLAN:
2094                         ret = mlx5_flow_validate_item_vlan(items, item_flags,
2095                                                            error);
2096                         if (ret < 0)
2097                                 return ret;
2098                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
2099                                              MLX5_FLOW_LAYER_OUTER_VLAN;
2100                         break;
2101                 case RTE_FLOW_ITEM_TYPE_IPV4:
2102                         ret = mlx5_flow_validate_item_ipv4(items, item_flags,
2103                                                            NULL, error);
2104                         if (ret < 0)
2105                                 return ret;
2106                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
2107                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
2108                         if (items->mask != NULL &&
2109                             ((const struct rte_flow_item_ipv4 *)
2110                              items->mask)->hdr.next_proto_id) {
2111                                 next_protocol =
2112                                         ((const struct rte_flow_item_ipv4 *)
2113                                          (items->spec))->hdr.next_proto_id;
2114                                 next_protocol &=
2115                                         ((const struct rte_flow_item_ipv4 *)
2116                                          (items->mask))->hdr.next_proto_id;
2117                         } else {
2118                                 /* Reset for inner layer. */
2119                                 next_protocol = 0xff;
2120                         }
2121                         break;
2122                 case RTE_FLOW_ITEM_TYPE_IPV6:
2123                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
2124                                                            NULL, error);
2125                         if (ret < 0)
2126                                 return ret;
2127                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
2128                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
2129                         if (items->mask != NULL &&
2130                             ((const struct rte_flow_item_ipv6 *)
2131                              items->mask)->hdr.proto) {
2132                                 next_protocol =
2133                                         ((const struct rte_flow_item_ipv6 *)
2134                                          items->spec)->hdr.proto;
2135                                 next_protocol &=
2136                                         ((const struct rte_flow_item_ipv6 *)
2137                                          items->mask)->hdr.proto;
2138                         } else {
2139                                 /* Reset for inner layer. */
2140                                 next_protocol = 0xff;
2141                         }
2142                         break;
2143                 case RTE_FLOW_ITEM_TYPE_TCP:
2144                         ret = mlx5_flow_validate_item_tcp
2145                                                 (items, item_flags,
2146                                                  next_protocol,
2147                                                  &rte_flow_item_tcp_mask,
2148                                                  error);
2149                         if (ret < 0)
2150                                 return ret;
2151                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
2152                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
2153                         break;
2154                 case RTE_FLOW_ITEM_TYPE_UDP:
2155                         ret = mlx5_flow_validate_item_udp(items, item_flags,
2156                                                           next_protocol,
2157                                                           error);
2158                         if (ret < 0)
2159                                 return ret;
2160                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
2161                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
2162                         break;
2163                 case RTE_FLOW_ITEM_TYPE_GRE:
2164                 case RTE_FLOW_ITEM_TYPE_NVGRE:
2165                         ret = mlx5_flow_validate_item_gre(items, item_flags,
2166                                                           next_protocol, error);
2167                         if (ret < 0)
2168                                 return ret;
2169                         last_item = MLX5_FLOW_LAYER_GRE;
2170                         break;
2171                 case RTE_FLOW_ITEM_TYPE_VXLAN:
2172                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
2173                                                             error);
2174                         if (ret < 0)
2175                                 return ret;
2176                         last_item = MLX5_FLOW_LAYER_VXLAN;
2177                         break;
2178                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
2179                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
2180                                                                 item_flags, dev,
2181                                                                 error);
2182                         if (ret < 0)
2183                                 return ret;
2184                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
2185                         break;
2186                 case RTE_FLOW_ITEM_TYPE_MPLS:
2187                         ret = mlx5_flow_validate_item_mpls(dev, items,
2188                                                            item_flags,
2189                                                            last_item, error);
2190                         if (ret < 0)
2191                                 return ret;
2192                         last_item = MLX5_FLOW_LAYER_MPLS;
2193                         break;
2194                 case RTE_FLOW_ITEM_TYPE_META:
2195                         ret = flow_dv_validate_item_meta(dev, items, attr,
2196                                                          error);
2197                         if (ret < 0)
2198                                 return ret;
2199                         last_item = MLX5_FLOW_ITEM_METADATA;
2200                         break;
2201                 default:
2202                         return rte_flow_error_set(error, ENOTSUP,
2203                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2204                                                   NULL, "item not supported");
2205                 }
2206                 item_flags |= last_item;
2207         }
2208         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2209                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
2210                         return rte_flow_error_set(error, ENOTSUP,
2211                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2212                                                   actions, "too many actions");
2213                 switch (actions->type) {
2214                 case RTE_FLOW_ACTION_TYPE_VOID:
2215                         break;
2216                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
2217                         ret = flow_dv_validate_action_port_id(dev,
2218                                                               action_flags,
2219                                                               actions,
2220                                                               attr,
2221                                                               error);
2222                         if (ret)
2223                                 return ret;
2224                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
2225                         ++actions_n;
2226                         break;
2227                 case RTE_FLOW_ACTION_TYPE_FLAG:
2228                         ret = mlx5_flow_validate_action_flag(action_flags,
2229                                                              attr, error);
2230                         if (ret < 0)
2231                                 return ret;
2232                         action_flags |= MLX5_FLOW_ACTION_FLAG;
2233                         ++actions_n;
2234                         break;
2235                 case RTE_FLOW_ACTION_TYPE_MARK:
2236                         ret = mlx5_flow_validate_action_mark(actions,
2237                                                              action_flags,
2238                                                              attr, error);
2239                         if (ret < 0)
2240                                 return ret;
2241                         action_flags |= MLX5_FLOW_ACTION_MARK;
2242                         ++actions_n;
2243                         break;
2244                 case RTE_FLOW_ACTION_TYPE_DROP:
2245                         ret = mlx5_flow_validate_action_drop(action_flags,
2246                                                              attr, error);
2247                         if (ret < 0)
2248                                 return ret;
2249                         action_flags |= MLX5_FLOW_ACTION_DROP;
2250                         ++actions_n;
2251                         break;
2252                 case RTE_FLOW_ACTION_TYPE_QUEUE:
2253                         ret = mlx5_flow_validate_action_queue(actions,
2254                                                               action_flags, dev,
2255                                                               attr, error);
2256                         if (ret < 0)
2257                                 return ret;
2258                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
2259                         ++actions_n;
2260                         break;
2261                 case RTE_FLOW_ACTION_TYPE_RSS:
2262                         ret = mlx5_flow_validate_action_rss(actions,
2263                                                             action_flags, dev,
2264                                                             attr, item_flags,
2265                                                             error);
2266                         if (ret < 0)
2267                                 return ret;
2268                         action_flags |= MLX5_FLOW_ACTION_RSS;
2269                         ++actions_n;
2270                         break;
2271                 case RTE_FLOW_ACTION_TYPE_COUNT:
2272                         ret = flow_dv_validate_action_count(dev, error);
2273                         if (ret < 0)
2274                                 return ret;
2275                         action_flags |= MLX5_FLOW_ACTION_COUNT;
2276                         ++actions_n;
2277                         break;
2278                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
2279                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
2280                         ret = flow_dv_validate_action_l2_encap(action_flags,
2281                                                                actions, attr,
2282                                                                error);
2283                         if (ret < 0)
2284                                 return ret;
2285                         action_flags |= actions->type ==
2286                                         RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
2287                                         MLX5_FLOW_ACTION_VXLAN_ENCAP :
2288                                         MLX5_FLOW_ACTION_NVGRE_ENCAP;
2289                         ++actions_n;
2290                         break;
2291                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
2292                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
2293                         ret = flow_dv_validate_action_l2_decap(action_flags,
2294                                                                attr, error);
2295                         if (ret < 0)
2296                                 return ret;
2297                         action_flags |= actions->type ==
2298                                         RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
2299                                         MLX5_FLOW_ACTION_VXLAN_DECAP :
2300                                         MLX5_FLOW_ACTION_NVGRE_DECAP;
2301                         ++actions_n;
2302                         break;
2303                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
2304                         ret = flow_dv_validate_action_raw_encap(action_flags,
2305                                                                 actions, attr,
2306                                                                 error);
2307                         if (ret < 0)
2308                                 return ret;
2309                         action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
2310                         ++actions_n;
2311                         break;
2312                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
2313                         ret = flow_dv_validate_action_raw_decap(action_flags,
2314                                                                 actions, attr,
2315                                                                 error);
2316                         if (ret < 0)
2317                                 return ret;
2318                         action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
2319                         ++actions_n;
2320                         break;
2321                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
2322                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
2323                         ret = flow_dv_validate_action_modify_mac(action_flags,
2324                                                                  actions,
2325                                                                  item_flags,
2326                                                                  error);
2327                         if (ret < 0)
2328                                 return ret;
2329                         /* Count all modify-header actions as one action. */
2330                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2331                                 ++actions_n;
2332                         action_flags |= actions->type ==
2333                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
2334                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
2335                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
2336                         break;
2337
2338                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
2339                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
2340                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
2341                                                                   actions,
2342                                                                   item_flags,
2343                                                                   error);
2344                         if (ret < 0)
2345                                 return ret;
2346                         /* Count all modify-header actions as one action. */
2347                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2348                                 ++actions_n;
2349                         action_flags |= actions->type ==
2350                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
2351                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
2352                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
2353                         break;
2354                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
2355                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
2356                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
2357                                                                   actions,
2358                                                                   item_flags,
2359                                                                   error);
2360                         if (ret < 0)
2361                                 return ret;
2362                         /* Count all modify-header actions as one action. */
2363                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2364                                 ++actions_n;
2365                         action_flags |= actions->type ==
2366                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
2367                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
2368                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
2369                         break;
2370                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
2371                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
2372                         ret = flow_dv_validate_action_modify_tp(action_flags,
2373                                                                 actions,
2374                                                                 item_flags,
2375                                                                 error);
2376                         if (ret < 0)
2377                                 return ret;
2378                         /* Count all modify-header actions as one action. */
2379                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2380                                 ++actions_n;
2381                         action_flags |= actions->type ==
2382                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
2383                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
2384                                                 MLX5_FLOW_ACTION_SET_TP_DST;
2385                         break;
2386                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
2387                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
2388                         ret = flow_dv_validate_action_modify_ttl(action_flags,
2389                                                                  actions,
2390                                                                  item_flags,
2391                                                                  error);
2392                         if (ret < 0)
2393                                 return ret;
2394                         /* Count all modify-header actions as one action. */
2395                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2396                                 ++actions_n;
2397                         action_flags |= actions->type ==
2398                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
2399                                                 MLX5_FLOW_ACTION_SET_TTL :
2400                                                 MLX5_FLOW_ACTION_DEC_TTL;
2401                         break;
2402                 case RTE_FLOW_ACTION_TYPE_JUMP:
2403                         ret = flow_dv_validate_action_jump(actions,
2404                                                            attr->group, error);
2405                         if (ret)
2406                                 return ret;
2407                         ++actions_n;
2408                         action_flags |= MLX5_FLOW_ACTION_JUMP;
2409                         break;
2410                 default:
2411                         return rte_flow_error_set(error, ENOTSUP,
2412                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2413                                                   actions,
2414                                                   "action not supported");
2415                 }
2416         }
2417         /* Eswitch has few restrictions on using items and actions */
2418         if (attr->transfer) {
2419                 if (action_flags & MLX5_FLOW_ACTION_FLAG)
2420                         return rte_flow_error_set(error, ENOTSUP,
2421                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2422                                                   NULL,
2423                                                   "unsupported action FLAG");
2424                 if (action_flags & MLX5_FLOW_ACTION_MARK)
2425                         return rte_flow_error_set(error, ENOTSUP,
2426                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2427                                                   NULL,
2428                                                   "unsupported action MARK");
2429                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
2430                         return rte_flow_error_set(error, ENOTSUP,
2431                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2432                                                   NULL,
2433                                                   "unsupported action QUEUE");
2434                 if (action_flags & MLX5_FLOW_ACTION_RSS)
2435                         return rte_flow_error_set(error, ENOTSUP,
2436                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2437                                                   NULL,
2438                                                   "unsupported action RSS");
2439                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
2440                         return rte_flow_error_set(error, EINVAL,
2441                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2442                                                   actions,
2443                                                   "no fate action is found");
2444         } else {
2445                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
2446                         return rte_flow_error_set(error, EINVAL,
2447                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2448                                                   actions,
2449                                                   "no fate action is found");
2450         }
2451         return 0;
2452 }
2453
2454 /**
2455  * Internal preparation function. Allocates the DV flow size,
2456  * this size is constant.
2457  *
2458  * @param[in] attr
2459  *   Pointer to the flow attributes.
2460  * @param[in] items
2461  *   Pointer to the list of items.
2462  * @param[in] actions
2463  *   Pointer to the list of actions.
2464  * @param[out] error
2465  *   Pointer to the error structure.
2466  *
2467  * @return
2468  *   Pointer to mlx5_flow object on success,
2469  *   otherwise NULL and rte_errno is set.
2470  */
2471 static struct mlx5_flow *
2472 flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
2473                 const struct rte_flow_item items[] __rte_unused,
2474                 const struct rte_flow_action actions[] __rte_unused,
2475                 struct rte_flow_error *error)
2476 {
2477         uint32_t size = sizeof(struct mlx5_flow);
2478         struct mlx5_flow *flow;
2479
2480         flow = rte_calloc(__func__, 1, size, 0);
2481         if (!flow) {
2482                 rte_flow_error_set(error, ENOMEM,
2483                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2484                                    "not enough memory to create flow");
2485                 return NULL;
2486         }
2487         flow->dv.value.size = MLX5_ST_SZ_DB(fte_match_param);
2488         return flow;
2489 }
2490
2491 #ifndef NDEBUG
2492 /**
2493  * Sanity check for match mask and value. Similar to check_valid_spec() in
2494  * kernel driver. If unmasked bit is present in value, it returns failure.
2495  *
2496  * @param match_mask
2497  *   pointer to match mask buffer.
2498  * @param match_value
2499  *   pointer to match value buffer.
2500  *
2501  * @return
2502  *   0 if valid, -EINVAL otherwise.
2503  */
2504 static int
2505 flow_dv_check_valid_spec(void *match_mask, void *match_value)
2506 {
2507         uint8_t *m = match_mask;
2508         uint8_t *v = match_value;
2509         unsigned int i;
2510
2511         for (i = 0; i < MLX5_ST_SZ_DB(fte_match_param); ++i) {
2512                 if (v[i] & ~m[i]) {
2513                         DRV_LOG(ERR,
2514                                 "match_value differs from match_criteria"
2515                                 " %p[%u] != %p[%u]",
2516                                 match_value, i, match_mask, i);
2517                         return -EINVAL;
2518                 }
2519         }
2520         return 0;
2521 }
2522 #endif
2523
2524 /**
2525  * Add Ethernet item to matcher and to the value.
2526  *
2527  * @param[in, out] matcher
2528  *   Flow matcher.
2529  * @param[in, out] key
2530  *   Flow matcher value.
2531  * @param[in] item
2532  *   Flow pattern to translate.
2533  * @param[in] inner
2534  *   Item is inner pattern.
2535  */
2536 static void
2537 flow_dv_translate_item_eth(void *matcher, void *key,
2538                            const struct rte_flow_item *item, int inner)
2539 {
2540         const struct rte_flow_item_eth *eth_m = item->mask;
2541         const struct rte_flow_item_eth *eth_v = item->spec;
2542         const struct rte_flow_item_eth nic_mask = {
2543                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
2544                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
2545                 .type = RTE_BE16(0xffff),
2546         };
2547         void *headers_m;
2548         void *headers_v;
2549         char *l24_v;
2550         unsigned int i;
2551
2552         if (!eth_v)
2553                 return;
2554         if (!eth_m)
2555                 eth_m = &nic_mask;
2556         if (inner) {
2557                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2558                                          inner_headers);
2559                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2560         } else {
2561                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2562                                          outer_headers);
2563                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2564         }
2565         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
2566                &eth_m->dst, sizeof(eth_m->dst));
2567         /* The value must be in the range of the mask. */
2568         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
2569         for (i = 0; i < sizeof(eth_m->dst); ++i)
2570                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
2571         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
2572                &eth_m->src, sizeof(eth_m->src));
2573         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
2574         /* The value must be in the range of the mask. */
2575         for (i = 0; i < sizeof(eth_m->dst); ++i)
2576                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
2577         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
2578                  rte_be_to_cpu_16(eth_m->type));
2579         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
2580         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
2581 }
2582
2583 /**
2584  * Add VLAN item to matcher and to the value.
2585  *
2586  * @param[in, out] matcher
2587  *   Flow matcher.
2588  * @param[in, out] key
2589  *   Flow matcher value.
2590  * @param[in] item
2591  *   Flow pattern to translate.
2592  * @param[in] inner
2593  *   Item is inner pattern.
2594  */
2595 static void
2596 flow_dv_translate_item_vlan(void *matcher, void *key,
2597                             const struct rte_flow_item *item,
2598                             int inner)
2599 {
2600         const struct rte_flow_item_vlan *vlan_m = item->mask;
2601         const struct rte_flow_item_vlan *vlan_v = item->spec;
2602         const struct rte_flow_item_vlan nic_mask = {
2603                 .tci = RTE_BE16(0x0fff),
2604                 .inner_type = RTE_BE16(0xffff),
2605         };
2606         void *headers_m;
2607         void *headers_v;
2608         uint16_t tci_m;
2609         uint16_t tci_v;
2610
2611         if (!vlan_v)
2612                 return;
2613         if (!vlan_m)
2614                 vlan_m = &nic_mask;
2615         if (inner) {
2616                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2617                                          inner_headers);
2618                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2619         } else {
2620                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2621                                          outer_headers);
2622                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2623         }
2624         tci_m = rte_be_to_cpu_16(vlan_m->tci);
2625         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
2626         MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
2627         MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
2628         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
2629         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
2630         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
2631         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
2632         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
2633         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
2634 }
2635
2636 /**
2637  * Add IPV4 item to matcher and to the value.
2638  *
2639  * @param[in, out] matcher
2640  *   Flow matcher.
2641  * @param[in, out] key
2642  *   Flow matcher value.
2643  * @param[in] item
2644  *   Flow pattern to translate.
2645  * @param[in] inner
2646  *   Item is inner pattern.
2647  * @param[in] group
2648  *   The group to insert the rule.
2649  */
2650 static void
2651 flow_dv_translate_item_ipv4(void *matcher, void *key,
2652                             const struct rte_flow_item *item,
2653                             int inner, uint32_t group)
2654 {
2655         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
2656         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
2657         const struct rte_flow_item_ipv4 nic_mask = {
2658                 .hdr = {
2659                         .src_addr = RTE_BE32(0xffffffff),
2660                         .dst_addr = RTE_BE32(0xffffffff),
2661                         .type_of_service = 0xff,
2662                         .next_proto_id = 0xff,
2663                 },
2664         };
2665         void *headers_m;
2666         void *headers_v;
2667         char *l24_m;
2668         char *l24_v;
2669         uint8_t tos;
2670
2671         if (inner) {
2672                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2673                                          inner_headers);
2674                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2675         } else {
2676                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2677                                          outer_headers);
2678                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2679         }
2680         if (group == 0)
2681                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
2682         else
2683                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x4);
2684         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
2685         if (!ipv4_v)
2686                 return;
2687         if (!ipv4_m)
2688                 ipv4_m = &nic_mask;
2689         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2690                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
2691         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2692                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
2693         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
2694         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
2695         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2696                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
2697         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2698                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
2699         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
2700         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
2701         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
2702         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
2703                  ipv4_m->hdr.type_of_service);
2704         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
2705         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
2706                  ipv4_m->hdr.type_of_service >> 2);
2707         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
2708         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
2709                  ipv4_m->hdr.next_proto_id);
2710         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2711                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
2712 }
2713
2714 /**
2715  * Add IPV6 item to matcher and to the value.
2716  *
2717  * @param[in, out] matcher
2718  *   Flow matcher.
2719  * @param[in, out] key
2720  *   Flow matcher value.
2721  * @param[in] item
2722  *   Flow pattern to translate.
2723  * @param[in] inner
2724  *   Item is inner pattern.
2725  * @param[in] group
2726  *   The group to insert the rule.
2727  */
2728 static void
2729 flow_dv_translate_item_ipv6(void *matcher, void *key,
2730                             const struct rte_flow_item *item,
2731                             int inner, uint32_t group)
2732 {
2733         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
2734         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
2735         const struct rte_flow_item_ipv6 nic_mask = {
2736                 .hdr = {
2737                         .src_addr =
2738                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
2739                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
2740                         .dst_addr =
2741                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
2742                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
2743                         .vtc_flow = RTE_BE32(0xffffffff),
2744                         .proto = 0xff,
2745                         .hop_limits = 0xff,
2746                 },
2747         };
2748         void *headers_m;
2749         void *headers_v;
2750         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2751         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2752         char *l24_m;
2753         char *l24_v;
2754         uint32_t vtc_m;
2755         uint32_t vtc_v;
2756         int i;
2757         int size;
2758
2759         if (inner) {
2760                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2761                                          inner_headers);
2762                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2763         } else {
2764                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2765                                          outer_headers);
2766                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2767         }
2768         if (group == 0)
2769                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
2770         else
2771                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x6);
2772         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
2773         if (!ipv6_v)
2774                 return;
2775         if (!ipv6_m)
2776                 ipv6_m = &nic_mask;
2777         size = sizeof(ipv6_m->hdr.dst_addr);
2778         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2779                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
2780         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2781                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
2782         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
2783         for (i = 0; i < size; ++i)
2784                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
2785         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2786                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
2787         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2788                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
2789         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
2790         for (i = 0; i < size; ++i)
2791                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
2792         /* TOS. */
2793         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
2794         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
2795         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
2796         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
2797         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
2798         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
2799         /* Label. */
2800         if (inner) {
2801                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
2802                          vtc_m);
2803                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
2804                          vtc_v);
2805         } else {
2806                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
2807                          vtc_m);
2808                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
2809                          vtc_v);
2810         }
2811         /* Protocol. */
2812         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
2813                  ipv6_m->hdr.proto);
2814         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2815                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
2816 }
2817
2818 /**
2819  * Add TCP item to matcher and to the value.
2820  *
2821  * @param[in, out] matcher
2822  *   Flow matcher.
2823  * @param[in, out] key
2824  *   Flow matcher value.
2825  * @param[in] item
2826  *   Flow pattern to translate.
2827  * @param[in] inner
2828  *   Item is inner pattern.
2829  */
2830 static void
2831 flow_dv_translate_item_tcp(void *matcher, void *key,
2832                            const struct rte_flow_item *item,
2833                            int inner)
2834 {
2835         const struct rte_flow_item_tcp *tcp_m = item->mask;
2836         const struct rte_flow_item_tcp *tcp_v = item->spec;
2837         void *headers_m;
2838         void *headers_v;
2839
2840         if (inner) {
2841                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2842                                          inner_headers);
2843                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2844         } else {
2845                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2846                                          outer_headers);
2847                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2848         }
2849         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2850         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
2851         if (!tcp_v)
2852                 return;
2853         if (!tcp_m)
2854                 tcp_m = &rte_flow_item_tcp_mask;
2855         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
2856                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
2857         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
2858                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
2859         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
2860                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
2861         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
2862                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
2863 }
2864
2865 /**
2866  * Add UDP item to matcher and to the value.
2867  *
2868  * @param[in, out] matcher
2869  *   Flow matcher.
2870  * @param[in, out] key
2871  *   Flow matcher value.
2872  * @param[in] item
2873  *   Flow pattern to translate.
2874  * @param[in] inner
2875  *   Item is inner pattern.
2876  */
2877 static void
2878 flow_dv_translate_item_udp(void *matcher, void *key,
2879                            const struct rte_flow_item *item,
2880                            int inner)
2881 {
2882         const struct rte_flow_item_udp *udp_m = item->mask;
2883         const struct rte_flow_item_udp *udp_v = item->spec;
2884         void *headers_m;
2885         void *headers_v;
2886
2887         if (inner) {
2888                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2889                                          inner_headers);
2890                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2891         } else {
2892                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2893                                          outer_headers);
2894                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2895         }
2896         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2897         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
2898         if (!udp_v)
2899                 return;
2900         if (!udp_m)
2901                 udp_m = &rte_flow_item_udp_mask;
2902         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
2903                  rte_be_to_cpu_16(udp_m->hdr.src_port));
2904         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
2905                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
2906         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
2907                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
2908         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
2909                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
2910 }
2911
2912 /**
2913  * Add GRE item to matcher and to the value.
2914  *
2915  * @param[in, out] matcher
2916  *   Flow matcher.
2917  * @param[in, out] key
2918  *   Flow matcher value.
2919  * @param[in] item
2920  *   Flow pattern to translate.
2921  * @param[in] inner
2922  *   Item is inner pattern.
2923  */
2924 static void
2925 flow_dv_translate_item_gre(void *matcher, void *key,
2926                            const struct rte_flow_item *item,
2927                            int inner)
2928 {
2929         const struct rte_flow_item_gre *gre_m = item->mask;
2930         const struct rte_flow_item_gre *gre_v = item->spec;
2931         void *headers_m;
2932         void *headers_v;
2933         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2934         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2935
2936         if (inner) {
2937                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2938                                          inner_headers);
2939                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2940         } else {
2941                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2942                                          outer_headers);
2943                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2944         }
2945         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2946         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
2947         if (!gre_v)
2948                 return;
2949         if (!gre_m)
2950                 gre_m = &rte_flow_item_gre_mask;
2951         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
2952                  rte_be_to_cpu_16(gre_m->protocol));
2953         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
2954                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
2955 }
2956
2957 /**
2958  * Add NVGRE item to matcher and to the value.
2959  *
2960  * @param[in, out] matcher
2961  *   Flow matcher.
2962  * @param[in, out] key
2963  *   Flow matcher value.
2964  * @param[in] item
2965  *   Flow pattern to translate.
2966  * @param[in] inner
2967  *   Item is inner pattern.
2968  */
2969 static void
2970 flow_dv_translate_item_nvgre(void *matcher, void *key,
2971                              const struct rte_flow_item *item,
2972                              int inner)
2973 {
2974         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
2975         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
2976         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2977         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2978         const char *tni_flow_id_m = (const char *)nvgre_m->tni;
2979         const char *tni_flow_id_v = (const char *)nvgre_v->tni;
2980         char *gre_key_m;
2981         char *gre_key_v;
2982         int size;
2983         int i;
2984
2985         flow_dv_translate_item_gre(matcher, key, item, inner);
2986         if (!nvgre_v)
2987                 return;
2988         if (!nvgre_m)
2989                 nvgre_m = &rte_flow_item_nvgre_mask;
2990         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
2991         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
2992         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
2993         memcpy(gre_key_m, tni_flow_id_m, size);
2994         for (i = 0; i < size; ++i)
2995                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
2996 }
2997
2998 /**
2999  * Add VXLAN item to matcher and to the value.
3000  *
3001  * @param[in, out] matcher
3002  *   Flow matcher.
3003  * @param[in, out] key
3004  *   Flow matcher value.
3005  * @param[in] item
3006  *   Flow pattern to translate.
3007  * @param[in] inner
3008  *   Item is inner pattern.
3009  */
3010 static void
3011 flow_dv_translate_item_vxlan(void *matcher, void *key,
3012                              const struct rte_flow_item *item,
3013                              int inner)
3014 {
3015         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
3016         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
3017         void *headers_m;
3018         void *headers_v;
3019         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3020         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3021         char *vni_m;
3022         char *vni_v;
3023         uint16_t dport;
3024         int size;
3025         int i;
3026
3027         if (inner) {
3028                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3029                                          inner_headers);
3030                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3031         } else {
3032                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3033                                          outer_headers);
3034                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3035         }
3036         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
3037                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
3038         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
3039                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
3040                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
3041         }
3042         if (!vxlan_v)
3043                 return;
3044         if (!vxlan_m)
3045                 vxlan_m = &rte_flow_item_vxlan_mask;
3046         size = sizeof(vxlan_m->vni);
3047         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
3048         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
3049         memcpy(vni_m, vxlan_m->vni, size);
3050         for (i = 0; i < size; ++i)
3051                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
3052 }
3053
3054 /**
3055  * Add MPLS item to matcher and to the value.
3056  *
3057  * @param[in, out] matcher
3058  *   Flow matcher.
3059  * @param[in, out] key
3060  *   Flow matcher value.
3061  * @param[in] item
3062  *   Flow pattern to translate.
3063  * @param[in] prev_layer
3064  *   The protocol layer indicated in previous item.
3065  * @param[in] inner
3066  *   Item is inner pattern.
3067  */
3068 static void
3069 flow_dv_translate_item_mpls(void *matcher, void *key,
3070                             const struct rte_flow_item *item,
3071                             uint64_t prev_layer,
3072                             int inner)
3073 {
3074         const uint32_t *in_mpls_m = item->mask;
3075         const uint32_t *in_mpls_v = item->spec;
3076         uint32_t *out_mpls_m = 0;
3077         uint32_t *out_mpls_v = 0;
3078         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3079         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3080         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
3081                                      misc_parameters_2);
3082         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
3083         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
3084         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3085
3086         switch (prev_layer) {
3087         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
3088                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
3089                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
3090                          MLX5_UDP_PORT_MPLS);
3091                 break;
3092         case MLX5_FLOW_LAYER_GRE:
3093                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
3094                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
3095                          ETHER_TYPE_MPLS);
3096                 break;
3097         default:
3098                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
3099                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
3100                          IPPROTO_MPLS);
3101                 break;
3102         }
3103         if (!in_mpls_v)
3104                 return;
3105         if (!in_mpls_m)
3106                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
3107         switch (prev_layer) {
3108         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
3109                 out_mpls_m =
3110                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
3111                                                  outer_first_mpls_over_udp);
3112                 out_mpls_v =
3113                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
3114                                                  outer_first_mpls_over_udp);
3115                 break;
3116         case MLX5_FLOW_LAYER_GRE:
3117                 out_mpls_m =
3118                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
3119                                                  outer_first_mpls_over_gre);
3120                 out_mpls_v =
3121                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
3122                                                  outer_first_mpls_over_gre);
3123                 break;
3124         default:
3125                 /* Inner MPLS not over GRE is not supported. */
3126                 if (!inner) {
3127                         out_mpls_m =
3128                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
3129                                                          misc2_m,
3130                                                          outer_first_mpls);
3131                         out_mpls_v =
3132                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
3133                                                          misc2_v,
3134                                                          outer_first_mpls);
3135                 }
3136                 break;
3137         }
3138         if (out_mpls_m && out_mpls_v) {
3139                 *out_mpls_m = *in_mpls_m;
3140                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
3141         }
3142 }
3143
3144 /**
3145  * Add META item to matcher
3146  *
3147  * @param[in, out] matcher
3148  *   Flow matcher.
3149  * @param[in, out] key
3150  *   Flow matcher value.
3151  * @param[in] item
3152  *   Flow pattern to translate.
3153  * @param[in] inner
3154  *   Item is inner pattern.
3155  */
3156 static void
3157 flow_dv_translate_item_meta(void *matcher, void *key,
3158                             const struct rte_flow_item *item)
3159 {
3160         const struct rte_flow_item_meta *meta_m;
3161         const struct rte_flow_item_meta *meta_v;
3162         void *misc2_m =
3163                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
3164         void *misc2_v =
3165                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
3166
3167         meta_m = (const void *)item->mask;
3168         if (!meta_m)
3169                 meta_m = &rte_flow_item_meta_mask;
3170         meta_v = (const void *)item->spec;
3171         if (meta_v) {
3172                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a,
3173                          rte_be_to_cpu_32(meta_m->data));
3174                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a,
3175                          rte_be_to_cpu_32(meta_v->data & meta_m->data));
3176         }
3177 }
3178
3179 /**
3180  * Add source vport match to the specified matcher.
3181  *
3182  * @param[in, out] matcher
3183  *   Flow matcher.
3184  * @param[in, out] key
3185  *   Flow matcher value.
3186  * @param[in] port
3187  *   Source vport value to match
3188  * @param[in] mask
3189  *   Mask
3190  */
3191 static void
3192 flow_dv_translate_item_source_vport(void *matcher, void *key,
3193                                     int16_t port, uint16_t mask)
3194 {
3195         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3196         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3197
3198         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
3199         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
3200 }
3201
3202 /**
3203  * Translate port-id item to eswitch match on  port-id.
3204  *
3205  * @param[in] dev
3206  *   The devich to configure through.
3207  * @param[in, out] matcher
3208  *   Flow matcher.
3209  * @param[in, out] key
3210  *   Flow matcher value.
3211  * @param[in] item
3212  *   Flow pattern to translate.
3213  *
3214  * @return
3215  *   0 on success, a negative errno value otherwise.
3216  */
3217 static int
3218 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
3219                                void *key, const struct rte_flow_item *item)
3220 {
3221         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
3222         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
3223         uint16_t mask, val, id;
3224         int ret;
3225
3226         mask = pid_m ? pid_m->id : 0xffff;
3227         id = pid_v ? pid_v->id : dev->data->port_id;
3228         ret = mlx5_port_to_eswitch_info(id, NULL, &val);
3229         if (ret)
3230                 return ret;
3231         flow_dv_translate_item_source_vport(matcher, key, val, mask);
3232         return 0;
3233 }
3234
3235 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
3236
3237 #define HEADER_IS_ZERO(match_criteria, headers)                              \
3238         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
3239                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
3240
3241 /**
3242  * Calculate flow matcher enable bitmap.
3243  *
3244  * @param match_criteria
3245  *   Pointer to flow matcher criteria.
3246  *
3247  * @return
3248  *   Bitmap of enabled fields.
3249  */
3250 static uint8_t
3251 flow_dv_matcher_enable(uint32_t *match_criteria)
3252 {
3253         uint8_t match_criteria_enable;
3254
3255         match_criteria_enable =
3256                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
3257                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
3258         match_criteria_enable |=
3259                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
3260                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
3261         match_criteria_enable |=
3262                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
3263                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
3264         match_criteria_enable |=
3265                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
3266                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
3267 #ifdef HAVE_MLX5DV_DR
3268         match_criteria_enable |=
3269                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
3270                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
3271 #endif
3272         return match_criteria_enable;
3273 }
3274
3275
3276 /**
3277  * Get a flow table.
3278  *
3279  * @param dev[in, out]
3280  *   Pointer to rte_eth_dev structure.
3281  * @param[in] table_id
3282  *   Table id to use.
3283  * @param[in] egress
3284  *   Direction of the table.
3285  * @param[in] transfer
3286  *   E-Switch or NIC flow.
3287  * @param[out] error
3288  *   pointer to error structure.
3289  *
3290  * @return
3291  *   Returns tables resource based on the index, NULL in case of failed.
3292  */
3293 static struct mlx5_flow_tbl_resource *
3294 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
3295                          uint32_t table_id, uint8_t egress,
3296                          uint8_t transfer,
3297                          struct rte_flow_error *error)
3298 {
3299         struct mlx5_priv *priv = dev->data->dev_private;
3300         struct mlx5_ibv_shared *sh = priv->sh;
3301         struct mlx5_flow_tbl_resource *tbl;
3302
3303 #ifdef HAVE_MLX5DV_DR
3304         if (transfer) {
3305                 tbl = &sh->fdb_tbl[table_id];
3306                 if (!tbl->obj)
3307                         tbl->obj = mlx5_glue->dr_create_flow_tbl
3308                                 (sh->fdb_ns, table_id);
3309         } else if (egress) {
3310                 tbl = &sh->tx_tbl[table_id];
3311                 if (!tbl->obj)
3312                         tbl->obj = mlx5_glue->dr_create_flow_tbl
3313                                 (sh->tx_ns, table_id);
3314         } else {
3315                 tbl = &sh->rx_tbl[table_id];
3316                 if (!tbl->obj)
3317                         tbl->obj = mlx5_glue->dr_create_flow_tbl
3318                                 (sh->rx_ns, table_id);
3319         }
3320         if (!tbl->obj) {
3321                 rte_flow_error_set(error, ENOMEM,
3322                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3323                                    NULL, "cannot create table");
3324                 return NULL;
3325         }
3326         rte_atomic32_inc(&tbl->refcnt);
3327         return tbl;
3328 #else
3329         (void)error;
3330         (void)tbl;
3331         if (transfer)
3332                 return &sh->fdb_tbl[table_id];
3333         else if (egress)
3334                 return &sh->tx_tbl[table_id];
3335         else
3336                 return &sh->rx_tbl[table_id];
3337 #endif
3338 }
3339
3340 /**
3341  * Release a flow table.
3342  *
3343  * @param[in] tbl
3344  *   Table resource to be released.
3345  *
3346  * @return
3347  *   Returns 0 if table was released, else return 1;
3348  */
3349 static int
3350 flow_dv_tbl_resource_release(struct mlx5_flow_tbl_resource *tbl)
3351 {
3352         if (!tbl)
3353                 return 0;
3354         if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
3355                 mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
3356                 tbl->obj = NULL;
3357                 return 0;
3358         }
3359         return 1;
3360 }
3361
3362 /**
3363  * Register the flow matcher.
3364  *
3365  * @param dev[in, out]
3366  *   Pointer to rte_eth_dev structure.
3367  * @param[in, out] matcher
3368  *   Pointer to flow matcher.
3369  * @parm[in, out] dev_flow
3370  *   Pointer to the dev_flow.
3371  * @param[out] error
3372  *   pointer to error structure.
3373  *
3374  * @return
3375  *   0 on success otherwise -errno and errno is set.
3376  */
3377 static int
3378 flow_dv_matcher_register(struct rte_eth_dev *dev,
3379                          struct mlx5_flow_dv_matcher *matcher,
3380                          struct mlx5_flow *dev_flow,
3381                          struct rte_flow_error *error)
3382 {
3383         struct mlx5_priv *priv = dev->data->dev_private;
3384         struct mlx5_ibv_shared *sh = priv->sh;
3385         struct mlx5_flow_dv_matcher *cache_matcher;
3386         struct mlx5dv_flow_matcher_attr dv_attr = {
3387                 .type = IBV_FLOW_ATTR_NORMAL,
3388                 .match_mask = (void *)&matcher->mask,
3389         };
3390         struct mlx5_flow_tbl_resource *tbl = NULL;
3391
3392         /* Lookup from cache. */
3393         LIST_FOREACH(cache_matcher, &sh->matchers, next) {
3394                 if (matcher->crc == cache_matcher->crc &&
3395                     matcher->priority == cache_matcher->priority &&
3396                     matcher->egress == cache_matcher->egress &&
3397                     matcher->group == cache_matcher->group &&
3398                     matcher->transfer == cache_matcher->transfer &&
3399                     !memcmp((const void *)matcher->mask.buf,
3400                             (const void *)cache_matcher->mask.buf,
3401                             cache_matcher->mask.size)) {
3402                         DRV_LOG(DEBUG,
3403                                 "priority %hd use %s matcher %p: refcnt %d++",
3404                                 cache_matcher->priority,
3405                                 cache_matcher->egress ? "tx" : "rx",
3406                                 (void *)cache_matcher,
3407                                 rte_atomic32_read(&cache_matcher->refcnt));
3408                         rte_atomic32_inc(&cache_matcher->refcnt);
3409                         dev_flow->dv.matcher = cache_matcher;
3410                         return 0;
3411                 }
3412         }
3413         /* Register new matcher. */
3414         cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
3415         if (!cache_matcher)
3416                 return rte_flow_error_set(error, ENOMEM,
3417                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3418                                           "cannot allocate matcher memory");
3419         tbl = flow_dv_tbl_resource_get(dev, matcher->group * MLX5_GROUP_FACTOR,
3420                                        matcher->egress, matcher->transfer,
3421                                        error);
3422         if (!tbl) {
3423                 rte_free(cache_matcher);
3424                 return rte_flow_error_set(error, ENOMEM,
3425                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3426                                           NULL, "cannot create table");
3427         }
3428         *cache_matcher = *matcher;
3429         dv_attr.match_criteria_enable =
3430                 flow_dv_matcher_enable(cache_matcher->mask.buf);
3431         dv_attr.priority = matcher->priority;
3432         if (matcher->egress)
3433                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
3434         cache_matcher->matcher_object =
3435                 mlx5_glue->dv_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj);
3436         if (!cache_matcher->matcher_object) {
3437                 rte_free(cache_matcher);
3438 #ifdef HAVE_MLX5DV_DR
3439                 flow_dv_tbl_resource_release(tbl);
3440 #endif
3441                 return rte_flow_error_set(error, ENOMEM,
3442                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3443                                           NULL, "cannot create matcher");
3444         }
3445         rte_atomic32_inc(&cache_matcher->refcnt);
3446         LIST_INSERT_HEAD(&sh->matchers, cache_matcher, next);
3447         dev_flow->dv.matcher = cache_matcher;
3448         DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
3449                 cache_matcher->priority,
3450                 cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
3451                 rte_atomic32_read(&cache_matcher->refcnt));
3452         rte_atomic32_inc(&tbl->refcnt);
3453         return 0;
3454 }
3455
3456 /**
3457  * Find existing tag resource or create and register a new one.
3458  *
3459  * @param dev[in, out]
3460  *   Pointer to rte_eth_dev structure.
3461  * @param[in, out] resource
3462  *   Pointer to tag resource.
3463  * @parm[in, out] dev_flow
3464  *   Pointer to the dev_flow.
3465  * @param[out] error
3466  *   pointer to error structure.
3467  *
3468  * @return
3469  *   0 on success otherwise -errno and errno is set.
3470  */
3471 static int
3472 flow_dv_tag_resource_register
3473                         (struct rte_eth_dev *dev,
3474                          struct mlx5_flow_dv_tag_resource *resource,
3475                          struct mlx5_flow *dev_flow,
3476                          struct rte_flow_error *error)
3477 {
3478         struct mlx5_priv *priv = dev->data->dev_private;
3479         struct mlx5_ibv_shared *sh = priv->sh;
3480         struct mlx5_flow_dv_tag_resource *cache_resource;
3481
3482         /* Lookup a matching resource from cache. */
3483         LIST_FOREACH(cache_resource, &sh->tags, next) {
3484                 if (resource->tag == cache_resource->tag) {
3485                         DRV_LOG(DEBUG, "tag resource %p: refcnt %d++",
3486                                 (void *)cache_resource,
3487                                 rte_atomic32_read(&cache_resource->refcnt));
3488                         rte_atomic32_inc(&cache_resource->refcnt);
3489                         dev_flow->flow->tag_resource = cache_resource;
3490                         return 0;
3491                 }
3492         }
3493         /* Register new  resource. */
3494         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
3495         if (!cache_resource)
3496                 return rte_flow_error_set(error, ENOMEM,
3497                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3498                                           "cannot allocate resource memory");
3499         *cache_resource = *resource;
3500         cache_resource->action = mlx5_glue->dv_create_flow_action_tag
3501                 (resource->tag);
3502         if (!cache_resource->action) {
3503                 rte_free(cache_resource);
3504                 return rte_flow_error_set(error, ENOMEM,
3505                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3506                                           NULL, "cannot create action");
3507         }
3508         rte_atomic32_init(&cache_resource->refcnt);
3509         rte_atomic32_inc(&cache_resource->refcnt);
3510         LIST_INSERT_HEAD(&sh->tags, cache_resource, next);
3511         dev_flow->flow->tag_resource = cache_resource;
3512         DRV_LOG(DEBUG, "new tag resource %p: refcnt %d++",
3513                 (void *)cache_resource,
3514                 rte_atomic32_read(&cache_resource->refcnt));
3515         return 0;
3516 }
3517
3518 /**
3519  * Release the tag.
3520  *
3521  * @param dev
3522  *   Pointer to Ethernet device.
3523  * @param flow
3524  *   Pointer to mlx5_flow.
3525  *
3526  * @return
3527  *   1 while a reference on it exists, 0 when freed.
3528  */
3529 static int
3530 flow_dv_tag_release(struct rte_eth_dev *dev,
3531                     struct mlx5_flow_dv_tag_resource *tag)
3532 {
3533         assert(tag);
3534         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
3535                 dev->data->port_id, (void *)tag,
3536                 rte_atomic32_read(&tag->refcnt));
3537         if (rte_atomic32_dec_and_test(&tag->refcnt)) {
3538                 claim_zero(mlx5_glue->destroy_flow_action(tag->action));
3539                 LIST_REMOVE(tag, next);
3540                 DRV_LOG(DEBUG, "port %u tag %p: removed",
3541                         dev->data->port_id, (void *)tag);
3542                 rte_free(tag);
3543                 return 0;
3544         }
3545         return 1;
3546 }
3547
3548 /**
3549  * Translate port ID action to vport.
3550  *
3551  * @param[in] dev
3552  *   Pointer to rte_eth_dev structure.
3553  * @param[in] action
3554  *   Pointer to the port ID action.
3555  * @param[out] dst_port_id
3556  *   The target port ID.
3557  * @param[out] error
3558  *   Pointer to the error structure.
3559  *
3560  * @return
3561  *   0 on success, a negative errno value otherwise and rte_errno is set.
3562  */
3563 static int
3564 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
3565                                  const struct rte_flow_action *action,
3566                                  uint32_t *dst_port_id,
3567                                  struct rte_flow_error *error)
3568 {
3569         uint32_t port;
3570         uint16_t port_id;
3571         int ret;
3572         const struct rte_flow_action_port_id *conf =
3573                         (const struct rte_flow_action_port_id *)action->conf;
3574
3575         port = conf->original ? dev->data->port_id : conf->id;
3576         ret = mlx5_port_to_eswitch_info(port, NULL, &port_id);
3577         if (ret)
3578                 return rte_flow_error_set(error, -ret,
3579                                           RTE_FLOW_ERROR_TYPE_ACTION,
3580                                           NULL,
3581                                           "No eswitch info was found for port");
3582         *dst_port_id = port_id;
3583         return 0;
3584 }
3585
3586 /**
3587  * Fill the flow with DV spec.
3588  *
3589  * @param[in] dev
3590  *   Pointer to rte_eth_dev structure.
3591  * @param[in, out] dev_flow
3592  *   Pointer to the sub flow.
3593  * @param[in] attr
3594  *   Pointer to the flow attributes.
3595  * @param[in] items
3596  *   Pointer to the list of items.
3597  * @param[in] actions
3598  *   Pointer to the list of actions.
3599  * @param[out] error
3600  *   Pointer to the error structure.
3601  *
3602  * @return
3603  *   0 on success, a negative errno value otherwise and rte_errno is set.
3604  */
3605 static int
3606 flow_dv_translate(struct rte_eth_dev *dev,
3607                   struct mlx5_flow *dev_flow,
3608                   const struct rte_flow_attr *attr,
3609                   const struct rte_flow_item items[],
3610                   const struct rte_flow_action actions[],
3611                   struct rte_flow_error *error)
3612 {
3613         struct mlx5_priv *priv = dev->data->dev_private;
3614         struct rte_flow *flow = dev_flow->flow;
3615         uint64_t item_flags = 0;
3616         uint64_t last_item = 0;
3617         uint64_t action_flags = 0;
3618         uint64_t priority = attr->priority;
3619         struct mlx5_flow_dv_matcher matcher = {
3620                 .mask = {
3621                         .size = sizeof(matcher.mask.buf),
3622                 },
3623         };
3624         int actions_n = 0;
3625         bool actions_end = false;
3626         struct mlx5_flow_dv_modify_hdr_resource res = {
3627                 .ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3628                                           MLX5DV_FLOW_TABLE_TYPE_NIC_RX
3629         };
3630         union flow_dv_attr flow_attr = { .attr = 0 };
3631         struct mlx5_flow_dv_tag_resource tag_resource;
3632         uint32_t modify_action_position = UINT32_MAX;
3633         void *match_mask = matcher.mask.buf;
3634         void *match_value = dev_flow->dv.value.buf;
3635
3636         flow->group = attr->group;
3637         if (attr->transfer)
3638                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
3639         if (priority == MLX5_FLOW_PRIO_RSVD)
3640                 priority = priv->config.flow_prio - 1;
3641         for (; !actions_end ; actions++) {
3642                 const struct rte_flow_action_queue *queue;
3643                 const struct rte_flow_action_rss *rss;
3644                 const struct rte_flow_action *action = actions;
3645                 const struct rte_flow_action_count *count = action->conf;
3646                 const uint8_t *rss_key;
3647                 const struct rte_flow_action_jump *jump_data;
3648                 struct mlx5_flow_dv_jump_tbl_resource jump_tbl_resource;
3649                 struct mlx5_flow_tbl_resource *tbl;
3650                 uint32_t port_id = 0;
3651                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
3652
3653                 switch (actions->type) {
3654                 case RTE_FLOW_ACTION_TYPE_VOID:
3655                         break;
3656                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
3657                         if (flow_dv_translate_action_port_id(dev, action,
3658                                                              &port_id, error))
3659                                 return -rte_errno;
3660                         port_id_resource.port_id = port_id;
3661                         if (flow_dv_port_id_action_resource_register
3662                             (dev, &port_id_resource, dev_flow, error))
3663                                 return -rte_errno;
3664                         dev_flow->dv.actions[actions_n++] =
3665                                 dev_flow->dv.port_id_action->action;
3666                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
3667                         break;
3668                 case RTE_FLOW_ACTION_TYPE_FLAG:
3669                         tag_resource.tag =
3670                                 mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
3671                         if (!flow->tag_resource)
3672                                 if (flow_dv_tag_resource_register
3673                                     (dev, &tag_resource, dev_flow, error))
3674                                         return errno;
3675                         dev_flow->dv.actions[actions_n++] =
3676                                 flow->tag_resource->action;
3677                         action_flags |= MLX5_FLOW_ACTION_FLAG;
3678                         break;
3679                 case RTE_FLOW_ACTION_TYPE_MARK:
3680                         tag_resource.tag = mlx5_flow_mark_set
3681                               (((const struct rte_flow_action_mark *)
3682                                (actions->conf))->id);
3683                         if (!flow->tag_resource)
3684                                 if (flow_dv_tag_resource_register
3685                                     (dev, &tag_resource, dev_flow, error))
3686                                         return errno;
3687                         dev_flow->dv.actions[actions_n++] =
3688                                 flow->tag_resource->action;
3689                         action_flags |= MLX5_FLOW_ACTION_MARK;
3690                         break;
3691                 case RTE_FLOW_ACTION_TYPE_DROP:
3692                         action_flags |= MLX5_FLOW_ACTION_DROP;
3693                         break;
3694                 case RTE_FLOW_ACTION_TYPE_QUEUE:
3695                         queue = actions->conf;
3696                         flow->rss.queue_num = 1;
3697                         (*flow->queue)[0] = queue->index;
3698                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
3699                         break;
3700                 case RTE_FLOW_ACTION_TYPE_RSS:
3701                         rss = actions->conf;
3702                         if (flow->queue)
3703                                 memcpy((*flow->queue), rss->queue,
3704                                        rss->queue_num * sizeof(uint16_t));
3705                         flow->rss.queue_num = rss->queue_num;
3706                         /* NULL RSS key indicates default RSS key. */
3707                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
3708                         memcpy(flow->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
3709                         /* RSS type 0 indicates default RSS type ETH_RSS_IP. */
3710                         flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
3711                         flow->rss.level = rss->level;
3712                         action_flags |= MLX5_FLOW_ACTION_RSS;
3713                         break;
3714                 case RTE_FLOW_ACTION_TYPE_COUNT:
3715                         if (!priv->config.devx) {
3716                                 rte_errno = ENOTSUP;
3717                                 goto cnt_err;
3718                         }
3719                         flow->counter = flow_dv_counter_new(dev, count->shared,
3720                                                             count->id);
3721                         if (flow->counter == NULL)
3722                                 goto cnt_err;
3723                         dev_flow->dv.actions[actions_n++] =
3724                                 flow->counter->action;
3725                         action_flags |= MLX5_FLOW_ACTION_COUNT;
3726                         break;
3727 cnt_err:
3728                         if (rte_errno == ENOTSUP)
3729                                 return rte_flow_error_set
3730                                               (error, ENOTSUP,
3731                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3732                                                NULL,
3733                                                "count action not supported");
3734                         else
3735                                 return rte_flow_error_set
3736                                                 (error, rte_errno,
3737                                                  RTE_FLOW_ERROR_TYPE_ACTION,
3738                                                  action,
3739                                                  "cannot create counter"
3740                                                   " object.");
3741                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3742                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3743                         if (flow_dv_create_action_l2_encap(dev, actions,
3744                                                            dev_flow,
3745                                                            attr->transfer,
3746                                                            error))
3747                                 return -rte_errno;
3748                         dev_flow->dv.actions[actions_n++] =
3749                                 dev_flow->dv.encap_decap->verbs_action;
3750                         action_flags |= actions->type ==
3751                                         RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
3752                                         MLX5_FLOW_ACTION_VXLAN_ENCAP :
3753                                         MLX5_FLOW_ACTION_NVGRE_ENCAP;
3754                         break;
3755                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
3756                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
3757                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
3758                                                            attr->transfer,
3759                                                            error))
3760                                 return -rte_errno;
3761                         dev_flow->dv.actions[actions_n++] =
3762                                 dev_flow->dv.encap_decap->verbs_action;
3763                         action_flags |= actions->type ==
3764                                         RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
3765                                         MLX5_FLOW_ACTION_VXLAN_DECAP :
3766                                         MLX5_FLOW_ACTION_NVGRE_DECAP;
3767                         break;
3768                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3769                         /* Handle encap with preceding decap. */
3770                         if (action_flags & MLX5_FLOW_ACTION_RAW_DECAP) {
3771                                 if (flow_dv_create_action_raw_encap
3772                                         (dev, actions, dev_flow, attr, error))
3773                                         return -rte_errno;
3774                                 dev_flow->dv.actions[actions_n++] =
3775                                         dev_flow->dv.encap_decap->verbs_action;
3776                         } else {
3777                                 /* Handle encap without preceding decap. */
3778                                 if (flow_dv_create_action_l2_encap
3779                                     (dev, actions, dev_flow, attr->transfer,
3780                                      error))
3781                                         return -rte_errno;
3782                                 dev_flow->dv.actions[actions_n++] =
3783                                         dev_flow->dv.encap_decap->verbs_action;
3784                         }
3785                         action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
3786                         break;
3787                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
3788                         /* Check if this decap is followed by encap. */
3789                         for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
3790                                action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
3791                                action++) {
3792                         }
3793                         /* Handle decap only if it isn't followed by encap. */
3794                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
3795                                 if (flow_dv_create_action_l2_decap
3796                                     (dev, dev_flow, attr->transfer, error))
3797                                         return -rte_errno;
3798                                 dev_flow->dv.actions[actions_n++] =
3799                                         dev_flow->dv.encap_decap->verbs_action;
3800                         }
3801                         /* If decap is followed by encap, handle it at encap. */
3802                         action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
3803                         break;
3804                 case RTE_FLOW_ACTION_TYPE_JUMP:
3805                         jump_data = action->conf;
3806                         tbl = flow_dv_tbl_resource_get(dev, jump_data->group *
3807                                                        MLX5_GROUP_FACTOR,
3808                                                        attr->egress,
3809                                                        attr->transfer, error);
3810                         if (!tbl)
3811                                 return rte_flow_error_set
3812                                                 (error, errno,
3813                                                  RTE_FLOW_ERROR_TYPE_ACTION,
3814                                                  NULL,
3815                                                  "cannot create jump action.");
3816                         jump_tbl_resource.tbl = tbl;
3817                         if (flow_dv_jump_tbl_resource_register
3818                             (dev, &jump_tbl_resource, dev_flow, error)) {
3819                                 flow_dv_tbl_resource_release(tbl);
3820                                 return rte_flow_error_set
3821                                                 (error, errno,
3822                                                  RTE_FLOW_ERROR_TYPE_ACTION,
3823                                                  NULL,
3824                                                  "cannot create jump action.");
3825                         }
3826                         dev_flow->dv.actions[actions_n++] =
3827                                 dev_flow->dv.jump->action;
3828                         action_flags |= MLX5_FLOW_ACTION_JUMP;
3829                         break;
3830                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
3831                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
3832                         if (flow_dv_convert_action_modify_mac(&res, actions,
3833                                                               error))
3834                                 return -rte_errno;
3835                         action_flags |= actions->type ==
3836                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
3837                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
3838                                         MLX5_FLOW_ACTION_SET_MAC_DST;
3839                         break;
3840                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
3841                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
3842                         if (flow_dv_convert_action_modify_ipv4(&res, actions,
3843                                                                error))
3844                                 return -rte_errno;
3845                         action_flags |= actions->type ==
3846                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
3847                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
3848                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
3849                         break;
3850                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
3851                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
3852                         if (flow_dv_convert_action_modify_ipv6(&res, actions,
3853                                                                error))
3854                                 return -rte_errno;
3855                         action_flags |= actions->type ==
3856                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
3857                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
3858                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
3859                         break;
3860                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
3861                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
3862                         if (flow_dv_convert_action_modify_tp(&res, actions,
3863                                                              items, &flow_attr,
3864                                                              error))
3865                                 return -rte_errno;
3866                         action_flags |= actions->type ==
3867                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
3868                                         MLX5_FLOW_ACTION_SET_TP_SRC :
3869                                         MLX5_FLOW_ACTION_SET_TP_DST;
3870                         break;
3871                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
3872                         if (flow_dv_convert_action_modify_dec_ttl(&res, items,
3873                                                                   &flow_attr,
3874                                                                   error))
3875                                 return -rte_errno;
3876                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
3877                         break;
3878                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
3879                         if (flow_dv_convert_action_modify_ttl(&res, actions,
3880                                                              items, &flow_attr,
3881                                                              error))
3882                                 return -rte_errno;
3883                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
3884                         break;
3885                 case RTE_FLOW_ACTION_TYPE_END:
3886                         actions_end = true;
3887                         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) {
3888                                 /* create modify action if needed. */
3889                                 if (flow_dv_modify_hdr_resource_register
3890                                                                 (dev, &res,
3891                                                                  dev_flow,
3892                                                                  error))
3893                                         return -rte_errno;
3894                                 dev_flow->dv.actions[modify_action_position] =
3895                                         dev_flow->dv.modify_hdr->verbs_action;
3896                         }
3897                         break;
3898                 default:
3899                         break;
3900                 }
3901                 if ((action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) &&
3902                     modify_action_position == UINT32_MAX)
3903                         modify_action_position = actions_n++;
3904         }
3905         dev_flow->dv.actions_n = actions_n;
3906         flow->actions = action_flags;
3907         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3908                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
3909
3910                 switch (items->type) {
3911                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
3912                         flow_dv_translate_item_port_id(dev, match_mask,
3913                                                        match_value, items);
3914                         last_item = MLX5_FLOW_ITEM_PORT_ID;
3915                         break;
3916                 case RTE_FLOW_ITEM_TYPE_ETH:
3917                         flow_dv_translate_item_eth(match_mask, match_value,
3918                                                    items, tunnel);
3919                         matcher.priority = MLX5_PRIORITY_MAP_L2;
3920                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
3921                                              MLX5_FLOW_LAYER_OUTER_L2;
3922                         break;
3923                 case RTE_FLOW_ITEM_TYPE_VLAN:
3924                         flow_dv_translate_item_vlan(match_mask, match_value,
3925                                                     items, tunnel);
3926                         matcher.priority = MLX5_PRIORITY_MAP_L2;
3927                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
3928                                               MLX5_FLOW_LAYER_INNER_VLAN) :
3929                                              (MLX5_FLOW_LAYER_OUTER_L2 |
3930                                               MLX5_FLOW_LAYER_OUTER_VLAN);
3931                         break;
3932                 case RTE_FLOW_ITEM_TYPE_IPV4:
3933                         flow_dv_translate_item_ipv4(match_mask, match_value,
3934                                                     items, tunnel, attr->group);
3935                         matcher.priority = MLX5_PRIORITY_MAP_L3;
3936                         dev_flow->dv.hash_fields |=
3937                                 mlx5_flow_hashfields_adjust
3938                                         (dev_flow, tunnel,
3939                                          MLX5_IPV4_LAYER_TYPES,
3940                                          MLX5_IPV4_IBV_RX_HASH);
3941                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
3942                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3943                         break;
3944                 case RTE_FLOW_ITEM_TYPE_IPV6:
3945                         flow_dv_translate_item_ipv6(match_mask, match_value,
3946                                                     items, tunnel, attr->group);
3947                         matcher.priority = MLX5_PRIORITY_MAP_L3;
3948                         dev_flow->dv.hash_fields |=
3949                                 mlx5_flow_hashfields_adjust
3950                                         (dev_flow, tunnel,
3951                                          MLX5_IPV6_LAYER_TYPES,
3952                                          MLX5_IPV6_IBV_RX_HASH);
3953                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
3954                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3955                         break;
3956                 case RTE_FLOW_ITEM_TYPE_TCP:
3957                         flow_dv_translate_item_tcp(match_mask, match_value,
3958                                                    items, tunnel);
3959                         matcher.priority = MLX5_PRIORITY_MAP_L4;
3960                         dev_flow->dv.hash_fields |=
3961                                 mlx5_flow_hashfields_adjust
3962                                         (dev_flow, tunnel, ETH_RSS_TCP,
3963                                          IBV_RX_HASH_SRC_PORT_TCP |
3964                                          IBV_RX_HASH_DST_PORT_TCP);
3965                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
3966                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
3967                         break;
3968                 case RTE_FLOW_ITEM_TYPE_UDP:
3969                         flow_dv_translate_item_udp(match_mask, match_value,
3970                                                    items, tunnel);
3971                         matcher.priority = MLX5_PRIORITY_MAP_L4;
3972                         dev_flow->dv.hash_fields |=
3973                                 mlx5_flow_hashfields_adjust
3974                                         (dev_flow, tunnel, ETH_RSS_UDP,
3975                                          IBV_RX_HASH_SRC_PORT_UDP |
3976                                          IBV_RX_HASH_DST_PORT_UDP);
3977                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
3978                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
3979                         break;
3980                 case RTE_FLOW_ITEM_TYPE_GRE:
3981                         flow_dv_translate_item_gre(match_mask, match_value,
3982                                                    items, tunnel);
3983                         last_item = MLX5_FLOW_LAYER_GRE;
3984                         break;
3985                 case RTE_FLOW_ITEM_TYPE_NVGRE:
3986                         flow_dv_translate_item_nvgre(match_mask, match_value,
3987                                                      items, tunnel);
3988                         last_item = MLX5_FLOW_LAYER_GRE;
3989                         break;
3990                 case RTE_FLOW_ITEM_TYPE_VXLAN:
3991                         flow_dv_translate_item_vxlan(match_mask, match_value,
3992                                                      items, tunnel);
3993                         last_item = MLX5_FLOW_LAYER_VXLAN;
3994                         break;
3995                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3996                         flow_dv_translate_item_vxlan(match_mask, match_value,
3997                                                      items, tunnel);
3998                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
3999                         break;
4000                 case RTE_FLOW_ITEM_TYPE_MPLS:
4001                         flow_dv_translate_item_mpls(match_mask, match_value,
4002                                                     items, last_item, tunnel);
4003                         last_item = MLX5_FLOW_LAYER_MPLS;
4004                         break;
4005                 case RTE_FLOW_ITEM_TYPE_META:
4006                         flow_dv_translate_item_meta(match_mask, match_value,
4007                                                     items);
4008                         last_item = MLX5_FLOW_ITEM_METADATA;
4009                         break;
4010                 default:
4011                         break;
4012                 }
4013                 item_flags |= last_item;
4014         }
4015         /*
4016          * In case of ingress traffic when E-Switch mode is enabled,
4017          * we have two cases where we need to set the source port manually.
4018          * The first one, is in case of Nic steering rule, and the second is
4019          * E-Switch rule where no port_id item was found. In both cases
4020          * the source port is set according the current port in use.
4021          */
4022         if ((attr->ingress && !(item_flags & MLX5_FLOW_ITEM_PORT_ID)) &&
4023             (priv->representor || priv->master)) {
4024                 if (flow_dv_translate_item_port_id(dev, match_mask,
4025                                                    match_value, NULL))
4026                         return -rte_errno;
4027         }
4028         assert(!flow_dv_check_valid_spec(matcher.mask.buf,
4029                                          dev_flow->dv.value.buf));
4030         dev_flow->layers = item_flags;
4031         /* Register matcher. */
4032         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
4033                                     matcher.mask.size);
4034         matcher.priority = mlx5_flow_adjust_priority(dev, priority,
4035                                                      matcher.priority);
4036         matcher.egress = attr->egress;
4037         matcher.group = attr->group;
4038         matcher.transfer = attr->transfer;
4039         if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
4040                 return -rte_errno;
4041         return 0;
4042 }
4043
4044 /**
4045  * Apply the flow to the NIC.
4046  *
4047  * @param[in] dev
4048  *   Pointer to the Ethernet device structure.
4049  * @param[in, out] flow
4050  *   Pointer to flow structure.
4051  * @param[out] error
4052  *   Pointer to error structure.
4053  *
4054  * @return
4055  *   0 on success, a negative errno value otherwise and rte_errno is set.
4056  */
4057 static int
4058 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
4059               struct rte_flow_error *error)
4060 {
4061         struct mlx5_flow_dv *dv;
4062         struct mlx5_flow *dev_flow;
4063         struct mlx5_priv *priv = dev->data->dev_private;
4064         int n;
4065         int err;
4066
4067         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
4068                 dv = &dev_flow->dv;
4069                 n = dv->actions_n;
4070                 if (flow->actions & MLX5_FLOW_ACTION_DROP) {
4071                         if (flow->transfer) {
4072                                 dv->actions[n++] = priv->sh->esw_drop_action;
4073                         } else {
4074                                 dv->hrxq = mlx5_hrxq_drop_new(dev);
4075                                 if (!dv->hrxq) {
4076                                         rte_flow_error_set
4077                                                 (error, errno,
4078                                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4079                                                  NULL,
4080                                                  "cannot get drop hash queue");
4081                                         goto error;
4082                                 }
4083                                 dv->actions[n++] = dv->hrxq->action;
4084                         }
4085                 } else if (flow->actions &
4086                            (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
4087                         struct mlx5_hrxq *hrxq;
4088
4089                         hrxq = mlx5_hrxq_get(dev, flow->key,
4090                                              MLX5_RSS_HASH_KEY_LEN,
4091                                              dv->hash_fields,
4092                                              (*flow->queue),
4093                                              flow->rss.queue_num);
4094                         if (!hrxq)
4095                                 hrxq = mlx5_hrxq_new
4096                                         (dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
4097                                          dv->hash_fields, (*flow->queue),
4098                                          flow->rss.queue_num,
4099                                          !!(dev_flow->layers &
4100                                             MLX5_FLOW_LAYER_TUNNEL));
4101                         if (!hrxq) {
4102                                 rte_flow_error_set
4103                                         (error, rte_errno,
4104                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4105                                          "cannot get hash queue");
4106                                 goto error;
4107                         }
4108                         dv->hrxq = hrxq;
4109                         dv->actions[n++] = dv->hrxq->action;
4110                 }
4111                 dv->flow =
4112                         mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
4113                                                   (void *)&dv->value, n,
4114                                                   dv->actions);
4115                 if (!dv->flow) {
4116                         rte_flow_error_set(error, errno,
4117                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4118                                            NULL,
4119                                            "hardware refuses to create flow");
4120                         goto error;
4121                 }
4122         }
4123         return 0;
4124 error:
4125         err = rte_errno; /* Save rte_errno before cleanup. */
4126         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
4127                 struct mlx5_flow_dv *dv = &dev_flow->dv;
4128                 if (dv->hrxq) {
4129                         if (flow->actions & MLX5_FLOW_ACTION_DROP)
4130                                 mlx5_hrxq_drop_release(dev);
4131                         else
4132                                 mlx5_hrxq_release(dev, dv->hrxq);
4133                         dv->hrxq = NULL;
4134                 }
4135         }
4136         rte_errno = err; /* Restore rte_errno. */
4137         return -rte_errno;
4138 }
4139
4140 /**
4141  * Release the flow matcher.
4142  *
4143  * @param dev
4144  *   Pointer to Ethernet device.
4145  * @param flow
4146  *   Pointer to mlx5_flow.
4147  *
4148  * @return
4149  *   1 while a reference on it exists, 0 when freed.
4150  */
4151 static int
4152 flow_dv_matcher_release(struct rte_eth_dev *dev,
4153                         struct mlx5_flow *flow)
4154 {
4155         struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
4156         struct mlx5_priv *priv = dev->data->dev_private;
4157         struct mlx5_ibv_shared *sh = priv->sh;
4158         struct mlx5_flow_tbl_resource *tbl;
4159
4160         assert(matcher->matcher_object);
4161         DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
4162                 dev->data->port_id, (void *)matcher,
4163                 rte_atomic32_read(&matcher->refcnt));
4164         if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
4165                 claim_zero(mlx5_glue->dv_destroy_flow_matcher
4166                            (matcher->matcher_object));
4167                 LIST_REMOVE(matcher, next);
4168                 if (matcher->egress)
4169                         tbl = &sh->tx_tbl[matcher->group];
4170                 else
4171                         tbl = &sh->rx_tbl[matcher->group];
4172                 flow_dv_tbl_resource_release(tbl);
4173                 rte_free(matcher);
4174                 DRV_LOG(DEBUG, "port %u matcher %p: removed",
4175                         dev->data->port_id, (void *)matcher);
4176                 return 0;
4177         }
4178         return 1;
4179 }
4180
4181 /**
4182  * Release an encap/decap resource.
4183  *
4184  * @param flow
4185  *   Pointer to mlx5_flow.
4186  *
4187  * @return
4188  *   1 while a reference on it exists, 0 when freed.
4189  */
4190 static int
4191 flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
4192 {
4193         struct mlx5_flow_dv_encap_decap_resource *cache_resource =
4194                                                 flow->dv.encap_decap;
4195
4196         assert(cache_resource->verbs_action);
4197         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
4198                 (void *)cache_resource,
4199                 rte_atomic32_read(&cache_resource->refcnt));
4200         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
4201                 claim_zero(mlx5_glue->destroy_flow_action
4202                                 (cache_resource->verbs_action));
4203                 LIST_REMOVE(cache_resource, next);
4204                 rte_free(cache_resource);
4205                 DRV_LOG(DEBUG, "encap/decap resource %p: removed",
4206                         (void *)cache_resource);
4207                 return 0;
4208         }
4209         return 1;
4210 }
4211
4212 /**
4213  * Release an jump to table action resource.
4214  *
4215  * @param flow
4216  *   Pointer to mlx5_flow.
4217  *
4218  * @return
4219  *   1 while a reference on it exists, 0 when freed.
4220  */
4221 static int
4222 flow_dv_jump_tbl_resource_release(struct mlx5_flow *flow)
4223 {
4224         struct mlx5_flow_dv_jump_tbl_resource *cache_resource =
4225                                                 flow->dv.jump;
4226
4227         assert(cache_resource->action);
4228         DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--",
4229                 (void *)cache_resource,
4230                 rte_atomic32_read(&cache_resource->refcnt));
4231         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
4232                 claim_zero(mlx5_glue->destroy_flow_action
4233                                 (cache_resource->action));
4234                 LIST_REMOVE(cache_resource, next);
4235                 flow_dv_tbl_resource_release(cache_resource->tbl);
4236                 rte_free(cache_resource);
4237                 DRV_LOG(DEBUG, "jump table resource %p: removed",
4238                         (void *)cache_resource);
4239                 return 0;
4240         }
4241         return 1;
4242 }
4243
4244 /**
4245  * Release a modify-header resource.
4246  *
4247  * @param flow
4248  *   Pointer to mlx5_flow.
4249  *
4250  * @return
4251  *   1 while a reference on it exists, 0 when freed.
4252  */
4253 static int
4254 flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
4255 {
4256         struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
4257                                                 flow->dv.modify_hdr;
4258
4259         assert(cache_resource->verbs_action);
4260         DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
4261                 (void *)cache_resource,
4262                 rte_atomic32_read(&cache_resource->refcnt));
4263         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
4264                 claim_zero(mlx5_glue->destroy_flow_action
4265                                 (cache_resource->verbs_action));
4266                 LIST_REMOVE(cache_resource, next);
4267                 rte_free(cache_resource);
4268                 DRV_LOG(DEBUG, "modify-header resource %p: removed",
4269                         (void *)cache_resource);
4270                 return 0;
4271         }
4272         return 1;
4273 }
4274
4275 /**
4276  * Release port ID action resource.
4277  *
4278  * @param flow
4279  *   Pointer to mlx5_flow.
4280  *
4281  * @return
4282  *   1 while a reference on it exists, 0 when freed.
4283  */
4284 static int
4285 flow_dv_port_id_action_resource_release(struct mlx5_flow *flow)
4286 {
4287         struct mlx5_flow_dv_port_id_action_resource *cache_resource =
4288                 flow->dv.port_id_action;
4289
4290         assert(cache_resource->action);
4291         DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--",
4292                 (void *)cache_resource,
4293                 rte_atomic32_read(&cache_resource->refcnt));
4294         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
4295                 claim_zero(mlx5_glue->destroy_flow_action
4296                                 (cache_resource->action));
4297                 LIST_REMOVE(cache_resource, next);
4298                 rte_free(cache_resource);
4299                 DRV_LOG(DEBUG, "port id action resource %p: removed",
4300                         (void *)cache_resource);
4301                 return 0;
4302         }
4303         return 1;
4304 }
4305
4306 /**
4307  * Remove the flow from the NIC but keeps it in memory.
4308  *
4309  * @param[in] dev
4310  *   Pointer to Ethernet device.
4311  * @param[in, out] flow
4312  *   Pointer to flow structure.
4313  */
4314 static void
4315 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
4316 {
4317         struct mlx5_flow_dv *dv;
4318         struct mlx5_flow *dev_flow;
4319
4320         if (!flow)
4321                 return;
4322         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
4323                 dv = &dev_flow->dv;
4324                 if (dv->flow) {
4325                         claim_zero(mlx5_glue->dv_destroy_flow(dv->flow));
4326                         dv->flow = NULL;
4327                 }
4328                 if (dv->hrxq) {
4329                         if (flow->actions & MLX5_FLOW_ACTION_DROP)
4330                                 mlx5_hrxq_drop_release(dev);
4331                         else
4332                                 mlx5_hrxq_release(dev, dv->hrxq);
4333                         dv->hrxq = NULL;
4334                 }
4335         }
4336 }
4337
4338 /**
4339  * Remove the flow from the NIC and the memory.
4340  *
4341  * @param[in] dev
4342  *   Pointer to the Ethernet device structure.
4343  * @param[in, out] flow
4344  *   Pointer to flow structure.
4345  */
4346 static void
4347 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
4348 {
4349         struct mlx5_flow *dev_flow;
4350
4351         if (!flow)
4352                 return;
4353         flow_dv_remove(dev, flow);
4354         if (flow->counter) {
4355                 flow_dv_counter_release(flow->counter);
4356                 flow->counter = NULL;
4357         }
4358         if (flow->tag_resource) {
4359                 flow_dv_tag_release(dev, flow->tag_resource);
4360                 flow->tag_resource = NULL;
4361         }
4362         while (!LIST_EMPTY(&flow->dev_flows)) {
4363                 dev_flow = LIST_FIRST(&flow->dev_flows);
4364                 LIST_REMOVE(dev_flow, next);
4365                 if (dev_flow->dv.matcher)
4366                         flow_dv_matcher_release(dev, dev_flow);
4367                 if (dev_flow->dv.encap_decap)
4368                         flow_dv_encap_decap_resource_release(dev_flow);
4369                 if (dev_flow->dv.modify_hdr)
4370                         flow_dv_modify_hdr_resource_release(dev_flow);
4371                 if (dev_flow->dv.jump)
4372                         flow_dv_jump_tbl_resource_release(dev_flow);
4373                 if (dev_flow->dv.port_id_action)
4374                         flow_dv_port_id_action_resource_release(dev_flow);
4375                 rte_free(dev_flow);
4376         }
4377 }
4378
4379 /**
4380  * Query a dv flow  rule for its statistics via devx.
4381  *
4382  * @param[in] dev
4383  *   Pointer to Ethernet device.
4384  * @param[in] flow
4385  *   Pointer to the sub flow.
4386  * @param[out] data
4387  *   data retrieved by the query.
4388  * @param[out] error
4389  *   Perform verbose error reporting if not NULL.
4390  *
4391  * @return
4392  *   0 on success, a negative errno value otherwise and rte_errno is set.
4393  */
4394 static int
4395 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
4396                     void *data, struct rte_flow_error *error)
4397 {
4398         struct mlx5_priv *priv = dev->data->dev_private;
4399         struct rte_flow_query_count *qc = data;
4400         uint64_t pkts = 0;
4401         uint64_t bytes = 0;
4402         int err;
4403
4404         if (!priv->config.devx)
4405                 return rte_flow_error_set(error, ENOTSUP,
4406                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4407                                           NULL,
4408                                           "counters are not supported");
4409         if (flow->counter) {
4410                 err = mlx5_devx_cmd_flow_counter_query
4411                                                 (flow->counter->dcs,
4412                                                  qc->reset, &pkts, &bytes);
4413                 if (err)
4414                         return rte_flow_error_set
4415                                 (error, err,
4416                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4417                                  NULL,
4418                                  "cannot read counters");
4419                 qc->hits_set = 1;
4420                 qc->bytes_set = 1;
4421                 qc->hits = pkts - flow->counter->hits;
4422                 qc->bytes = bytes - flow->counter->bytes;
4423                 if (qc->reset) {
4424                         flow->counter->hits = pkts;
4425                         flow->counter->bytes = bytes;
4426                 }
4427                 return 0;
4428         }
4429         return rte_flow_error_set(error, EINVAL,
4430                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4431                                   NULL,
4432                                   "counters are not available");
4433 }
4434
4435 /**
4436  * Query a flow.
4437  *
4438  * @see rte_flow_query()
4439  * @see rte_flow_ops
4440  */
4441 static int
4442 flow_dv_query(struct rte_eth_dev *dev,
4443               struct rte_flow *flow __rte_unused,
4444               const struct rte_flow_action *actions __rte_unused,
4445               void *data __rte_unused,
4446               struct rte_flow_error *error __rte_unused)
4447 {
4448         int ret = -EINVAL;
4449
4450         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
4451                 switch (actions->type) {
4452                 case RTE_FLOW_ACTION_TYPE_VOID:
4453                         break;
4454                 case RTE_FLOW_ACTION_TYPE_COUNT:
4455                         ret = flow_dv_query_count(dev, flow, data, error);
4456                         break;
4457                 default:
4458                         return rte_flow_error_set(error, ENOTSUP,
4459                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4460                                                   actions,
4461                                                   "action not supported");
4462                 }
4463         }
4464         return ret;
4465 }
4466
4467 /*
4468  * Mutex-protected thunk to flow_dv_translate().
4469  */
4470 static int
4471 flow_d_translate(struct rte_eth_dev *dev,
4472                  struct mlx5_flow *dev_flow,
4473                  const struct rte_flow_attr *attr,
4474                  const struct rte_flow_item items[],
4475                  const struct rte_flow_action actions[],
4476                  struct rte_flow_error *error)
4477 {
4478         int ret;
4479
4480         flow_d_shared_lock(dev);
4481         ret = flow_dv_translate(dev, dev_flow, attr, items, actions, error);
4482         flow_d_shared_unlock(dev);
4483         return ret;
4484 }
4485
4486 /*
4487  * Mutex-protected thunk to flow_dv_apply().
4488  */
4489 static int
4490 flow_d_apply(struct rte_eth_dev *dev,
4491              struct rte_flow *flow,
4492              struct rte_flow_error *error)
4493 {
4494         int ret;
4495
4496         flow_d_shared_lock(dev);
4497         ret = flow_dv_apply(dev, flow, error);
4498         flow_d_shared_unlock(dev);
4499         return ret;
4500 }
4501
4502 /*
4503  * Mutex-protected thunk to flow_dv_remove().
4504  */
4505 static void
4506 flow_d_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
4507 {
4508         flow_d_shared_lock(dev);
4509         flow_dv_remove(dev, flow);
4510         flow_d_shared_unlock(dev);
4511 }
4512
4513 /*
4514  * Mutex-protected thunk to flow_dv_destroy().
4515  */
4516 static void
4517 flow_d_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
4518 {
4519         flow_d_shared_lock(dev);
4520         flow_dv_destroy(dev, flow);
4521         flow_d_shared_unlock(dev);
4522 }
4523
4524 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
4525         .validate = flow_dv_validate,
4526         .prepare = flow_dv_prepare,
4527         .translate = flow_d_translate,
4528         .apply = flow_d_apply,
4529         .remove = flow_d_remove,
4530         .destroy = flow_d_destroy,
4531         .query = flow_dv_query,
4532 };
4533
4534 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */