net/mlx5: match GRE key and present bits
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9
10 /* Verbs header. */
11 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
12 #ifdef PEDANTIC
13 #pragma GCC diagnostic ignored "-Wpedantic"
14 #endif
15 #include <infiniband/verbs.h>
16 #ifdef PEDANTIC
17 #pragma GCC diagnostic error "-Wpedantic"
18 #endif
19
20 #include <rte_common.h>
21 #include <rte_ether.h>
22 #include <rte_ethdev_driver.h>
23 #include <rte_flow.h>
24 #include <rte_flow_driver.h>
25 #include <rte_malloc.h>
26 #include <rte_ip.h>
27 #include <rte_gre.h>
28
29 #include "mlx5.h"
30 #include "mlx5_defs.h"
31 #include "mlx5_glue.h"
32 #include "mlx5_flow.h"
33 #include "mlx5_prm.h"
34 #include "mlx5_rxtx.h"
35
36 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
37
38 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
39 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
40 #endif
41
42 #ifndef HAVE_MLX5DV_DR_ESWITCH
43 #ifndef MLX5DV_FLOW_TABLE_TYPE_FDB
44 #define MLX5DV_FLOW_TABLE_TYPE_FDB 0
45 #endif
46 #endif
47
48 #ifndef HAVE_MLX5DV_DR
49 #define MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL 1
50 #endif
51
52 union flow_dv_attr {
53         struct {
54                 uint32_t valid:1;
55                 uint32_t ipv4:1;
56                 uint32_t ipv6:1;
57                 uint32_t tcp:1;
58                 uint32_t udp:1;
59                 uint32_t reserved:27;
60         };
61         uint32_t attr;
62 };
63
64 /**
65  * Initialize flow attributes structure according to flow items' types.
66  *
67  * @param[in] item
68  *   Pointer to item specification.
69  * @param[out] attr
70  *   Pointer to flow attributes structure.
71  */
72 static void
73 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr)
74 {
75         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
76                 switch (item->type) {
77                 case RTE_FLOW_ITEM_TYPE_IPV4:
78                         attr->ipv4 = 1;
79                         break;
80                 case RTE_FLOW_ITEM_TYPE_IPV6:
81                         attr->ipv6 = 1;
82                         break;
83                 case RTE_FLOW_ITEM_TYPE_UDP:
84                         attr->udp = 1;
85                         break;
86                 case RTE_FLOW_ITEM_TYPE_TCP:
87                         attr->tcp = 1;
88                         break;
89                 default:
90                         break;
91                 }
92         }
93         attr->valid = 1;
94 }
95
96 struct field_modify_info {
97         uint32_t size; /* Size of field in protocol header, in bytes. */
98         uint32_t offset; /* Offset of field in protocol header, in bytes. */
99         enum mlx5_modification_field id;
100 };
101
102 struct field_modify_info modify_eth[] = {
103         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
104         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
105         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
106         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
107         {0, 0, 0},
108 };
109
110 struct field_modify_info modify_ipv4[] = {
111         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
112         {4, 12, MLX5_MODI_OUT_SIPV4},
113         {4, 16, MLX5_MODI_OUT_DIPV4},
114         {0, 0, 0},
115 };
116
117 struct field_modify_info modify_ipv6[] = {
118         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
119         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
120         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
121         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
122         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
123         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
124         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
125         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
126         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
127         {0, 0, 0},
128 };
129
130 struct field_modify_info modify_udp[] = {
131         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
132         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
133         {0, 0, 0},
134 };
135
136 struct field_modify_info modify_tcp[] = {
137         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
138         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
139         {4, 4, MLX5_MODI_OUT_TCP_SEQ_NUM},
140         {4, 8, MLX5_MODI_OUT_TCP_ACK_NUM},
141         {0, 0, 0},
142 };
143
144 /**
145  * Acquire the synchronizing object to protect multithreaded access
146  * to shared dv context. Lock occurs only if context is actually
147  * shared, i.e. we have multiport IB device and representors are
148  * created.
149  *
150  * @param[in] dev
151  *   Pointer to the rte_eth_dev structure.
152  */
153 static void
154 flow_d_shared_lock(struct rte_eth_dev *dev)
155 {
156         struct mlx5_priv *priv = dev->data->dev_private;
157         struct mlx5_ibv_shared *sh = priv->sh;
158
159         if (sh->dv_refcnt > 1) {
160                 int ret;
161
162                 ret = pthread_mutex_lock(&sh->dv_mutex);
163                 assert(!ret);
164                 (void)ret;
165         }
166 }
167
168 static void
169 flow_d_shared_unlock(struct rte_eth_dev *dev)
170 {
171         struct mlx5_priv *priv = dev->data->dev_private;
172         struct mlx5_ibv_shared *sh = priv->sh;
173
174         if (sh->dv_refcnt > 1) {
175                 int ret;
176
177                 ret = pthread_mutex_unlock(&sh->dv_mutex);
178                 assert(!ret);
179                 (void)ret;
180         }
181 }
182
183 /**
184  * Convert modify-header action to DV specification.
185  *
186  * @param[in] item
187  *   Pointer to item specification.
188  * @param[in] field
189  *   Pointer to field modification information.
190  * @param[in,out] resource
191  *   Pointer to the modify-header resource.
192  * @param[in] type
193  *   Type of modification.
194  * @param[out] error
195  *   Pointer to the error structure.
196  *
197  * @return
198  *   0 on success, a negative errno value otherwise and rte_errno is set.
199  */
200 static int
201 flow_dv_convert_modify_action(struct rte_flow_item *item,
202                               struct field_modify_info *field,
203                               struct mlx5_flow_dv_modify_hdr_resource *resource,
204                               uint32_t type,
205                               struct rte_flow_error *error)
206 {
207         uint32_t i = resource->actions_num;
208         struct mlx5_modification_cmd *actions = resource->actions;
209         const uint8_t *spec = item->spec;
210         const uint8_t *mask = item->mask;
211         uint32_t set;
212
213         while (field->size) {
214                 set = 0;
215                 /* Generate modify command for each mask segment. */
216                 memcpy(&set, &mask[field->offset], field->size);
217                 if (set) {
218                         if (i >= MLX5_MODIFY_NUM)
219                                 return rte_flow_error_set(error, EINVAL,
220                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
221                                          "too many items to modify");
222                         actions[i].action_type = type;
223                         actions[i].field = field->id;
224                         actions[i].length = field->size ==
225                                         4 ? 0 : field->size * 8;
226                         rte_memcpy(&actions[i].data[4 - field->size],
227                                    &spec[field->offset], field->size);
228                         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
229                         ++i;
230                 }
231                 if (resource->actions_num != i)
232                         resource->actions_num = i;
233                 field++;
234         }
235         if (!resource->actions_num)
236                 return rte_flow_error_set(error, EINVAL,
237                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
238                                           "invalid modification flow item");
239         return 0;
240 }
241
242 /**
243  * Convert modify-header set IPv4 address action to DV specification.
244  *
245  * @param[in,out] resource
246  *   Pointer to the modify-header resource.
247  * @param[in] action
248  *   Pointer to action specification.
249  * @param[out] error
250  *   Pointer to the error structure.
251  *
252  * @return
253  *   0 on success, a negative errno value otherwise and rte_errno is set.
254  */
255 static int
256 flow_dv_convert_action_modify_ipv4
257                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
258                          const struct rte_flow_action *action,
259                          struct rte_flow_error *error)
260 {
261         const struct rte_flow_action_set_ipv4 *conf =
262                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
263         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
264         struct rte_flow_item_ipv4 ipv4;
265         struct rte_flow_item_ipv4 ipv4_mask;
266
267         memset(&ipv4, 0, sizeof(ipv4));
268         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
269         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
270                 ipv4.hdr.src_addr = conf->ipv4_addr;
271                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
272         } else {
273                 ipv4.hdr.dst_addr = conf->ipv4_addr;
274                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
275         }
276         item.spec = &ipv4;
277         item.mask = &ipv4_mask;
278         return flow_dv_convert_modify_action(&item, modify_ipv4, resource,
279                                              MLX5_MODIFICATION_TYPE_SET, error);
280 }
281
282 /**
283  * Convert modify-header set IPv6 address action to DV specification.
284  *
285  * @param[in,out] resource
286  *   Pointer to the modify-header resource.
287  * @param[in] action
288  *   Pointer to action specification.
289  * @param[out] error
290  *   Pointer to the error structure.
291  *
292  * @return
293  *   0 on success, a negative errno value otherwise and rte_errno is set.
294  */
295 static int
296 flow_dv_convert_action_modify_ipv6
297                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
298                          const struct rte_flow_action *action,
299                          struct rte_flow_error *error)
300 {
301         const struct rte_flow_action_set_ipv6 *conf =
302                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
303         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
304         struct rte_flow_item_ipv6 ipv6;
305         struct rte_flow_item_ipv6 ipv6_mask;
306
307         memset(&ipv6, 0, sizeof(ipv6));
308         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
309         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
310                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
311                        sizeof(ipv6.hdr.src_addr));
312                 memcpy(&ipv6_mask.hdr.src_addr,
313                        &rte_flow_item_ipv6_mask.hdr.src_addr,
314                        sizeof(ipv6.hdr.src_addr));
315         } else {
316                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
317                        sizeof(ipv6.hdr.dst_addr));
318                 memcpy(&ipv6_mask.hdr.dst_addr,
319                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
320                        sizeof(ipv6.hdr.dst_addr));
321         }
322         item.spec = &ipv6;
323         item.mask = &ipv6_mask;
324         return flow_dv_convert_modify_action(&item, modify_ipv6, resource,
325                                              MLX5_MODIFICATION_TYPE_SET, error);
326 }
327
328 /**
329  * Convert modify-header set MAC address action to DV specification.
330  *
331  * @param[in,out] resource
332  *   Pointer to the modify-header resource.
333  * @param[in] action
334  *   Pointer to action specification.
335  * @param[out] error
336  *   Pointer to the error structure.
337  *
338  * @return
339  *   0 on success, a negative errno value otherwise and rte_errno is set.
340  */
341 static int
342 flow_dv_convert_action_modify_mac
343                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
344                          const struct rte_flow_action *action,
345                          struct rte_flow_error *error)
346 {
347         const struct rte_flow_action_set_mac *conf =
348                 (const struct rte_flow_action_set_mac *)(action->conf);
349         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
350         struct rte_flow_item_eth eth;
351         struct rte_flow_item_eth eth_mask;
352
353         memset(&eth, 0, sizeof(eth));
354         memset(&eth_mask, 0, sizeof(eth_mask));
355         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
356                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
357                        sizeof(eth.src.addr_bytes));
358                 memcpy(&eth_mask.src.addr_bytes,
359                        &rte_flow_item_eth_mask.src.addr_bytes,
360                        sizeof(eth_mask.src.addr_bytes));
361         } else {
362                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
363                        sizeof(eth.dst.addr_bytes));
364                 memcpy(&eth_mask.dst.addr_bytes,
365                        &rte_flow_item_eth_mask.dst.addr_bytes,
366                        sizeof(eth_mask.dst.addr_bytes));
367         }
368         item.spec = &eth;
369         item.mask = &eth_mask;
370         return flow_dv_convert_modify_action(&item, modify_eth, resource,
371                                              MLX5_MODIFICATION_TYPE_SET, error);
372 }
373
374 /**
375  * Convert modify-header set TP action to DV specification.
376  *
377  * @param[in,out] resource
378  *   Pointer to the modify-header resource.
379  * @param[in] action
380  *   Pointer to action specification.
381  * @param[in] items
382  *   Pointer to rte_flow_item objects list.
383  * @param[in] attr
384  *   Pointer to flow attributes structure.
385  * @param[out] error
386  *   Pointer to the error structure.
387  *
388  * @return
389  *   0 on success, a negative errno value otherwise and rte_errno is set.
390  */
391 static int
392 flow_dv_convert_action_modify_tp
393                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
394                          const struct rte_flow_action *action,
395                          const struct rte_flow_item *items,
396                          union flow_dv_attr *attr,
397                          struct rte_flow_error *error)
398 {
399         const struct rte_flow_action_set_tp *conf =
400                 (const struct rte_flow_action_set_tp *)(action->conf);
401         struct rte_flow_item item;
402         struct rte_flow_item_udp udp;
403         struct rte_flow_item_udp udp_mask;
404         struct rte_flow_item_tcp tcp;
405         struct rte_flow_item_tcp tcp_mask;
406         struct field_modify_info *field;
407
408         if (!attr->valid)
409                 flow_dv_attr_init(items, attr);
410         if (attr->udp) {
411                 memset(&udp, 0, sizeof(udp));
412                 memset(&udp_mask, 0, sizeof(udp_mask));
413                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
414                         udp.hdr.src_port = conf->port;
415                         udp_mask.hdr.src_port =
416                                         rte_flow_item_udp_mask.hdr.src_port;
417                 } else {
418                         udp.hdr.dst_port = conf->port;
419                         udp_mask.hdr.dst_port =
420                                         rte_flow_item_udp_mask.hdr.dst_port;
421                 }
422                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
423                 item.spec = &udp;
424                 item.mask = &udp_mask;
425                 field = modify_udp;
426         }
427         if (attr->tcp) {
428                 memset(&tcp, 0, sizeof(tcp));
429                 memset(&tcp_mask, 0, sizeof(tcp_mask));
430                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
431                         tcp.hdr.src_port = conf->port;
432                         tcp_mask.hdr.src_port =
433                                         rte_flow_item_tcp_mask.hdr.src_port;
434                 } else {
435                         tcp.hdr.dst_port = conf->port;
436                         tcp_mask.hdr.dst_port =
437                                         rte_flow_item_tcp_mask.hdr.dst_port;
438                 }
439                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
440                 item.spec = &tcp;
441                 item.mask = &tcp_mask;
442                 field = modify_tcp;
443         }
444         return flow_dv_convert_modify_action(&item, field, resource,
445                                              MLX5_MODIFICATION_TYPE_SET, error);
446 }
447
448 /**
449  * Convert modify-header set TTL action to DV specification.
450  *
451  * @param[in,out] resource
452  *   Pointer to the modify-header resource.
453  * @param[in] action
454  *   Pointer to action specification.
455  * @param[in] items
456  *   Pointer to rte_flow_item objects list.
457  * @param[in] attr
458  *   Pointer to flow attributes structure.
459  * @param[out] error
460  *   Pointer to the error structure.
461  *
462  * @return
463  *   0 on success, a negative errno value otherwise and rte_errno is set.
464  */
465 static int
466 flow_dv_convert_action_modify_ttl
467                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
468                          const struct rte_flow_action *action,
469                          const struct rte_flow_item *items,
470                          union flow_dv_attr *attr,
471                          struct rte_flow_error *error)
472 {
473         const struct rte_flow_action_set_ttl *conf =
474                 (const struct rte_flow_action_set_ttl *)(action->conf);
475         struct rte_flow_item item;
476         struct rte_flow_item_ipv4 ipv4;
477         struct rte_flow_item_ipv4 ipv4_mask;
478         struct rte_flow_item_ipv6 ipv6;
479         struct rte_flow_item_ipv6 ipv6_mask;
480         struct field_modify_info *field;
481
482         if (!attr->valid)
483                 flow_dv_attr_init(items, attr);
484         if (attr->ipv4) {
485                 memset(&ipv4, 0, sizeof(ipv4));
486                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
487                 ipv4.hdr.time_to_live = conf->ttl_value;
488                 ipv4_mask.hdr.time_to_live = 0xFF;
489                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
490                 item.spec = &ipv4;
491                 item.mask = &ipv4_mask;
492                 field = modify_ipv4;
493         }
494         if (attr->ipv6) {
495                 memset(&ipv6, 0, sizeof(ipv6));
496                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
497                 ipv6.hdr.hop_limits = conf->ttl_value;
498                 ipv6_mask.hdr.hop_limits = 0xFF;
499                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
500                 item.spec = &ipv6;
501                 item.mask = &ipv6_mask;
502                 field = modify_ipv6;
503         }
504         return flow_dv_convert_modify_action(&item, field, resource,
505                                              MLX5_MODIFICATION_TYPE_SET, error);
506 }
507
508 /**
509  * Convert modify-header decrement TTL action to DV specification.
510  *
511  * @param[in,out] resource
512  *   Pointer to the modify-header resource.
513  * @param[in] action
514  *   Pointer to action specification.
515  * @param[in] items
516  *   Pointer to rte_flow_item objects list.
517  * @param[in] attr
518  *   Pointer to flow attributes structure.
519  * @param[out] error
520  *   Pointer to the error structure.
521  *
522  * @return
523  *   0 on success, a negative errno value otherwise and rte_errno is set.
524  */
525 static int
526 flow_dv_convert_action_modify_dec_ttl
527                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
528                          const struct rte_flow_item *items,
529                          union flow_dv_attr *attr,
530                          struct rte_flow_error *error)
531 {
532         struct rte_flow_item item;
533         struct rte_flow_item_ipv4 ipv4;
534         struct rte_flow_item_ipv4 ipv4_mask;
535         struct rte_flow_item_ipv6 ipv6;
536         struct rte_flow_item_ipv6 ipv6_mask;
537         struct field_modify_info *field;
538
539         if (!attr->valid)
540                 flow_dv_attr_init(items, attr);
541         if (attr->ipv4) {
542                 memset(&ipv4, 0, sizeof(ipv4));
543                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
544                 ipv4.hdr.time_to_live = 0xFF;
545                 ipv4_mask.hdr.time_to_live = 0xFF;
546                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
547                 item.spec = &ipv4;
548                 item.mask = &ipv4_mask;
549                 field = modify_ipv4;
550         }
551         if (attr->ipv6) {
552                 memset(&ipv6, 0, sizeof(ipv6));
553                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
554                 ipv6.hdr.hop_limits = 0xFF;
555                 ipv6_mask.hdr.hop_limits = 0xFF;
556                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
557                 item.spec = &ipv6;
558                 item.mask = &ipv6_mask;
559                 field = modify_ipv6;
560         }
561         return flow_dv_convert_modify_action(&item, field, resource,
562                                              MLX5_MODIFICATION_TYPE_ADD, error);
563 }
564
565 /**
566  * Convert modify-header increment/decrement TCP Sequence number
567  * to DV specification.
568  *
569  * @param[in,out] resource
570  *   Pointer to the modify-header resource.
571  * @param[in] action
572  *   Pointer to action specification.
573  * @param[out] error
574  *   Pointer to the error structure.
575  *
576  * @return
577  *   0 on success, a negative errno value otherwise and rte_errno is set.
578  */
579 static int
580 flow_dv_convert_action_modify_tcp_seq
581                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
582                          const struct rte_flow_action *action,
583                          struct rte_flow_error *error)
584 {
585         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
586         uint64_t value = rte_be_to_cpu_32(*conf);
587         struct rte_flow_item item;
588         struct rte_flow_item_tcp tcp;
589         struct rte_flow_item_tcp tcp_mask;
590
591         memset(&tcp, 0, sizeof(tcp));
592         memset(&tcp_mask, 0, sizeof(tcp_mask));
593         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ)
594                 /*
595                  * The HW has no decrement operation, only increment operation.
596                  * To simulate decrement X from Y using increment operation
597                  * we need to add UINT32_MAX X times to Y.
598                  * Each adding of UINT32_MAX decrements Y by 1.
599                  */
600                 value *= UINT32_MAX;
601         tcp.hdr.sent_seq = rte_cpu_to_be_32((uint32_t)value);
602         tcp_mask.hdr.sent_seq = RTE_BE32(UINT32_MAX);
603         item.type = RTE_FLOW_ITEM_TYPE_TCP;
604         item.spec = &tcp;
605         item.mask = &tcp_mask;
606         return flow_dv_convert_modify_action(&item, modify_tcp, resource,
607                                              MLX5_MODIFICATION_TYPE_ADD, error);
608 }
609
610 /**
611  * Convert modify-header increment/decrement TCP Acknowledgment number
612  * to DV specification.
613  *
614  * @param[in,out] resource
615  *   Pointer to the modify-header resource.
616  * @param[in] action
617  *   Pointer to action specification.
618  * @param[out] error
619  *   Pointer to the error structure.
620  *
621  * @return
622  *   0 on success, a negative errno value otherwise and rte_errno is set.
623  */
624 static int
625 flow_dv_convert_action_modify_tcp_ack
626                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
627                          const struct rte_flow_action *action,
628                          struct rte_flow_error *error)
629 {
630         const rte_be32_t *conf = (const rte_be32_t *)(action->conf);
631         uint64_t value = rte_be_to_cpu_32(*conf);
632         struct rte_flow_item item;
633         struct rte_flow_item_tcp tcp;
634         struct rte_flow_item_tcp tcp_mask;
635
636         memset(&tcp, 0, sizeof(tcp));
637         memset(&tcp_mask, 0, sizeof(tcp_mask));
638         if (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK)
639                 /*
640                  * The HW has no decrement operation, only increment operation.
641                  * To simulate decrement X from Y using increment operation
642                  * we need to add UINT32_MAX X times to Y.
643                  * Each adding of UINT32_MAX decrements Y by 1.
644                  */
645                 value *= UINT32_MAX;
646         tcp.hdr.recv_ack = rte_cpu_to_be_32((uint32_t)value);
647         tcp_mask.hdr.recv_ack = RTE_BE32(UINT32_MAX);
648         item.type = RTE_FLOW_ITEM_TYPE_TCP;
649         item.spec = &tcp;
650         item.mask = &tcp_mask;
651         return flow_dv_convert_modify_action(&item, modify_tcp, resource,
652                                              MLX5_MODIFICATION_TYPE_ADD, error);
653 }
654
655 /**
656  * Validate META item.
657  *
658  * @param[in] dev
659  *   Pointer to the rte_eth_dev structure.
660  * @param[in] item
661  *   Item specification.
662  * @param[in] attr
663  *   Attributes of flow that includes this item.
664  * @param[out] error
665  *   Pointer to error structure.
666  *
667  * @return
668  *   0 on success, a negative errno value otherwise and rte_errno is set.
669  */
670 static int
671 flow_dv_validate_item_meta(struct rte_eth_dev *dev,
672                            const struct rte_flow_item *item,
673                            const struct rte_flow_attr *attr,
674                            struct rte_flow_error *error)
675 {
676         const struct rte_flow_item_meta *spec = item->spec;
677         const struct rte_flow_item_meta *mask = item->mask;
678         const struct rte_flow_item_meta nic_mask = {
679                 .data = RTE_BE32(UINT32_MAX)
680         };
681         int ret;
682         uint64_t offloads = dev->data->dev_conf.txmode.offloads;
683
684         if (!(offloads & DEV_TX_OFFLOAD_MATCH_METADATA))
685                 return rte_flow_error_set(error, EPERM,
686                                           RTE_FLOW_ERROR_TYPE_ITEM,
687                                           NULL,
688                                           "match on metadata offload "
689                                           "configuration is off for this port");
690         if (!spec)
691                 return rte_flow_error_set(error, EINVAL,
692                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
693                                           item->spec,
694                                           "data cannot be empty");
695         if (!spec->data)
696                 return rte_flow_error_set(error, EINVAL,
697                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
698                                           NULL,
699                                           "data cannot be zero");
700         if (!mask)
701                 mask = &rte_flow_item_meta_mask;
702         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
703                                         (const uint8_t *)&nic_mask,
704                                         sizeof(struct rte_flow_item_meta),
705                                         error);
706         if (ret < 0)
707                 return ret;
708         if (attr->ingress)
709                 return rte_flow_error_set(error, ENOTSUP,
710                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
711                                           NULL,
712                                           "pattern not supported for ingress");
713         return 0;
714 }
715
716 /**
717  * Validate vport item.
718  *
719  * @param[in] dev
720  *   Pointer to the rte_eth_dev structure.
721  * @param[in] item
722  *   Item specification.
723  * @param[in] attr
724  *   Attributes of flow that includes this item.
725  * @param[in] item_flags
726  *   Bit-fields that holds the items detected until now.
727  * @param[out] error
728  *   Pointer to error structure.
729  *
730  * @return
731  *   0 on success, a negative errno value otherwise and rte_errno is set.
732  */
733 static int
734 flow_dv_validate_item_port_id(struct rte_eth_dev *dev,
735                               const struct rte_flow_item *item,
736                               const struct rte_flow_attr *attr,
737                               uint64_t item_flags,
738                               struct rte_flow_error *error)
739 {
740         const struct rte_flow_item_port_id *spec = item->spec;
741         const struct rte_flow_item_port_id *mask = item->mask;
742         const struct rte_flow_item_port_id switch_mask = {
743                         .id = 0xffffffff,
744         };
745         uint16_t esw_domain_id;
746         uint16_t item_port_esw_domain_id;
747         int ret;
748
749         if (!attr->transfer)
750                 return rte_flow_error_set(error, EINVAL,
751                                           RTE_FLOW_ERROR_TYPE_ITEM,
752                                           NULL,
753                                           "match on port id is valid only"
754                                           " when transfer flag is enabled");
755         if (item_flags & MLX5_FLOW_ITEM_PORT_ID)
756                 return rte_flow_error_set(error, ENOTSUP,
757                                           RTE_FLOW_ERROR_TYPE_ITEM, item,
758                                           "multiple source ports are not"
759                                           " supported");
760         if (!mask)
761                 mask = &switch_mask;
762         if (mask->id != 0xffffffff)
763                 return rte_flow_error_set(error, ENOTSUP,
764                                            RTE_FLOW_ERROR_TYPE_ITEM_MASK,
765                                            mask,
766                                            "no support for partial mask on"
767                                            " \"id\" field");
768         ret = mlx5_flow_item_acceptable
769                                 (item, (const uint8_t *)mask,
770                                  (const uint8_t *)&rte_flow_item_port_id_mask,
771                                  sizeof(struct rte_flow_item_port_id),
772                                  error);
773         if (ret)
774                 return ret;
775         if (!spec)
776                 return 0;
777         ret = mlx5_port_to_eswitch_info(spec->id, &item_port_esw_domain_id,
778                                         NULL);
779         if (ret)
780                 return rte_flow_error_set(error, -ret,
781                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
782                                           "failed to obtain E-Switch info for"
783                                           " port");
784         ret = mlx5_port_to_eswitch_info(dev->data->port_id,
785                                         &esw_domain_id, NULL);
786         if (ret < 0)
787                 return rte_flow_error_set(error, -ret,
788                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
789                                           NULL,
790                                           "failed to obtain E-Switch info");
791         if (item_port_esw_domain_id != esw_domain_id)
792                 return rte_flow_error_set(error, -ret,
793                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC, spec,
794                                           "cannot match on a port from a"
795                                           " different E-Switch");
796         return 0;
797 }
798
799 /**
800  * Validate count action.
801  *
802  * @param[in] dev
803  *   device otr.
804  * @param[out] error
805  *   Pointer to error structure.
806  *
807  * @return
808  *   0 on success, a negative errno value otherwise and rte_errno is set.
809  */
810 static int
811 flow_dv_validate_action_count(struct rte_eth_dev *dev,
812                               struct rte_flow_error *error)
813 {
814         struct mlx5_priv *priv = dev->data->dev_private;
815
816         if (!priv->config.devx)
817                 goto notsup_err;
818 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
819         return 0;
820 #endif
821 notsup_err:
822         return rte_flow_error_set
823                       (error, ENOTSUP,
824                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
825                        NULL,
826                        "count action not supported");
827 }
828
829 /**
830  * Validate the L2 encap action.
831  *
832  * @param[in] action_flags
833  *   Holds the actions detected until now.
834  * @param[in] action
835  *   Pointer to the encap action.
836  * @param[in] attr
837  *   Pointer to flow attributes
838  * @param[out] error
839  *   Pointer to error structure.
840  *
841  * @return
842  *   0 on success, a negative errno value otherwise and rte_errno is set.
843  */
844 static int
845 flow_dv_validate_action_l2_encap(uint64_t action_flags,
846                                  const struct rte_flow_action *action,
847                                  const struct rte_flow_attr *attr,
848                                  struct rte_flow_error *error)
849 {
850         if (!(action->conf))
851                 return rte_flow_error_set(error, EINVAL,
852                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
853                                           "configuration cannot be null");
854         if (action_flags & MLX5_FLOW_ACTION_DROP)
855                 return rte_flow_error_set(error, EINVAL,
856                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
857                                           "can't drop and encap in same flow");
858         if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
859                 return rte_flow_error_set(error, EINVAL,
860                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
861                                           "can only have a single encap or"
862                                           " decap action in a flow");
863         if (!attr->transfer && attr->ingress)
864                 return rte_flow_error_set(error, ENOTSUP,
865                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
866                                           NULL,
867                                           "encap action not supported for "
868                                           "ingress");
869         return 0;
870 }
871
872 /**
873  * Validate the L2 decap action.
874  *
875  * @param[in] action_flags
876  *   Holds the actions detected until now.
877  * @param[in] attr
878  *   Pointer to flow attributes
879  * @param[out] error
880  *   Pointer to error structure.
881  *
882  * @return
883  *   0 on success, a negative errno value otherwise and rte_errno is set.
884  */
885 static int
886 flow_dv_validate_action_l2_decap(uint64_t action_flags,
887                                  const struct rte_flow_attr *attr,
888                                  struct rte_flow_error *error)
889 {
890         if (action_flags & MLX5_FLOW_ACTION_DROP)
891                 return rte_flow_error_set(error, EINVAL,
892                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
893                                           "can't drop and decap in same flow");
894         if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
895                 return rte_flow_error_set(error, EINVAL,
896                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
897                                           "can only have a single encap or"
898                                           " decap action in a flow");
899         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
900                 return rte_flow_error_set(error, EINVAL,
901                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
902                                           "can't have decap action after"
903                                           " modify action");
904         if (attr->egress)
905                 return rte_flow_error_set(error, ENOTSUP,
906                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
907                                           NULL,
908                                           "decap action not supported for "
909                                           "egress");
910         return 0;
911 }
912
913 /**
914  * Validate the raw encap action.
915  *
916  * @param[in] action_flags
917  *   Holds the actions detected until now.
918  * @param[in] action
919  *   Pointer to the encap action.
920  * @param[in] attr
921  *   Pointer to flow attributes
922  * @param[out] error
923  *   Pointer to error structure.
924  *
925  * @return
926  *   0 on success, a negative errno value otherwise and rte_errno is set.
927  */
928 static int
929 flow_dv_validate_action_raw_encap(uint64_t action_flags,
930                                   const struct rte_flow_action *action,
931                                   const struct rte_flow_attr *attr,
932                                   struct rte_flow_error *error)
933 {
934         if (!(action->conf))
935                 return rte_flow_error_set(error, EINVAL,
936                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
937                                           "configuration cannot be null");
938         if (action_flags & MLX5_FLOW_ACTION_DROP)
939                 return rte_flow_error_set(error, EINVAL,
940                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
941                                           "can't drop and encap in same flow");
942         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
943                 return rte_flow_error_set(error, EINVAL,
944                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
945                                           "can only have a single encap"
946                                           " action in a flow");
947         /* encap without preceding decap is not supported for ingress */
948         if (!attr->transfer &&  attr->ingress &&
949             !(action_flags & MLX5_FLOW_ACTION_RAW_DECAP))
950                 return rte_flow_error_set(error, ENOTSUP,
951                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
952                                           NULL,
953                                           "encap action not supported for "
954                                           "ingress");
955         return 0;
956 }
957
958 /**
959  * Validate the raw decap action.
960  *
961  * @param[in] action_flags
962  *   Holds the actions detected until now.
963  * @param[in] action
964  *   Pointer to the encap action.
965  * @param[in] attr
966  *   Pointer to flow attributes
967  * @param[out] error
968  *   Pointer to error structure.
969  *
970  * @return
971  *   0 on success, a negative errno value otherwise and rte_errno is set.
972  */
973 static int
974 flow_dv_validate_action_raw_decap(uint64_t action_flags,
975                                   const struct rte_flow_action *action,
976                                   const struct rte_flow_attr *attr,
977                                   struct rte_flow_error *error)
978 {
979         if (action_flags & MLX5_FLOW_ACTION_DROP)
980                 return rte_flow_error_set(error, EINVAL,
981                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
982                                           "can't drop and decap in same flow");
983         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
984                 return rte_flow_error_set(error, EINVAL,
985                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
986                                           "can't have encap action before"
987                                           " decap action");
988         if (action_flags & MLX5_FLOW_DECAP_ACTIONS)
989                 return rte_flow_error_set(error, EINVAL,
990                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
991                                           "can only have a single decap"
992                                           " action in a flow");
993         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
994                 return rte_flow_error_set(error, EINVAL,
995                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
996                                           "can't have decap action after"
997                                           " modify action");
998         /* decap action is valid on egress only if it is followed by encap */
999         if (attr->egress) {
1000                 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
1001                        action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
1002                        action++) {
1003                 }
1004                 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
1005                         return rte_flow_error_set
1006                                         (error, ENOTSUP,
1007                                          RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1008                                          NULL, "decap action not supported"
1009                                          " for egress");
1010         }
1011         return 0;
1012 }
1013
1014 /**
1015  * Find existing encap/decap resource or create and register a new one.
1016  *
1017  * @param dev[in, out]
1018  *   Pointer to rte_eth_dev structure.
1019  * @param[in, out] resource
1020  *   Pointer to encap/decap resource.
1021  * @parm[in, out] dev_flow
1022  *   Pointer to the dev_flow.
1023  * @param[out] error
1024  *   pointer to error structure.
1025  *
1026  * @return
1027  *   0 on success otherwise -errno and errno is set.
1028  */
1029 static int
1030 flow_dv_encap_decap_resource_register
1031                         (struct rte_eth_dev *dev,
1032                          struct mlx5_flow_dv_encap_decap_resource *resource,
1033                          struct mlx5_flow *dev_flow,
1034                          struct rte_flow_error *error)
1035 {
1036         struct mlx5_priv *priv = dev->data->dev_private;
1037         struct mlx5_ibv_shared *sh = priv->sh;
1038         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
1039         struct rte_flow *flow = dev_flow->flow;
1040         struct mlx5dv_dr_domain *domain;
1041
1042         resource->flags = flow->group ? 0 : 1;
1043         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
1044                 domain = sh->fdb_domain;
1045         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_RX)
1046                 domain = sh->rx_domain;
1047         else
1048                 domain = sh->tx_domain;
1049
1050         /* Lookup a matching resource from cache. */
1051         LIST_FOREACH(cache_resource, &sh->encaps_decaps, next) {
1052                 if (resource->reformat_type == cache_resource->reformat_type &&
1053                     resource->ft_type == cache_resource->ft_type &&
1054                     resource->flags == cache_resource->flags &&
1055                     resource->size == cache_resource->size &&
1056                     !memcmp((const void *)resource->buf,
1057                             (const void *)cache_resource->buf,
1058                             resource->size)) {
1059                         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
1060                                 (void *)cache_resource,
1061                                 rte_atomic32_read(&cache_resource->refcnt));
1062                         rte_atomic32_inc(&cache_resource->refcnt);
1063                         dev_flow->dv.encap_decap = cache_resource;
1064                         return 0;
1065                 }
1066         }
1067         /* Register new encap/decap resource. */
1068         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1069         if (!cache_resource)
1070                 return rte_flow_error_set(error, ENOMEM,
1071                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1072                                           "cannot allocate resource memory");
1073         *cache_resource = *resource;
1074         cache_resource->verbs_action =
1075                 mlx5_glue->dv_create_flow_action_packet_reformat
1076                         (sh->ctx, cache_resource->reformat_type,
1077                          cache_resource->ft_type, domain, cache_resource->flags,
1078                          cache_resource->size,
1079                          (cache_resource->size ? cache_resource->buf : NULL));
1080         if (!cache_resource->verbs_action) {
1081                 rte_free(cache_resource);
1082                 return rte_flow_error_set(error, ENOMEM,
1083                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1084                                           NULL, "cannot create action");
1085         }
1086         rte_atomic32_init(&cache_resource->refcnt);
1087         rte_atomic32_inc(&cache_resource->refcnt);
1088         LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next);
1089         dev_flow->dv.encap_decap = cache_resource;
1090         DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
1091                 (void *)cache_resource,
1092                 rte_atomic32_read(&cache_resource->refcnt));
1093         return 0;
1094 }
1095
1096 /**
1097  * Find existing table jump resource or create and register a new one.
1098  *
1099  * @param dev[in, out]
1100  *   Pointer to rte_eth_dev structure.
1101  * @param[in, out] resource
1102  *   Pointer to jump table resource.
1103  * @parm[in, out] dev_flow
1104  *   Pointer to the dev_flow.
1105  * @param[out] error
1106  *   pointer to error structure.
1107  *
1108  * @return
1109  *   0 on success otherwise -errno and errno is set.
1110  */
1111 static int
1112 flow_dv_jump_tbl_resource_register
1113                         (struct rte_eth_dev *dev,
1114                          struct mlx5_flow_dv_jump_tbl_resource *resource,
1115                          struct mlx5_flow *dev_flow,
1116                          struct rte_flow_error *error)
1117 {
1118         struct mlx5_priv *priv = dev->data->dev_private;
1119         struct mlx5_ibv_shared *sh = priv->sh;
1120         struct mlx5_flow_dv_jump_tbl_resource *cache_resource;
1121
1122         /* Lookup a matching resource from cache. */
1123         LIST_FOREACH(cache_resource, &sh->jump_tbl, next) {
1124                 if (resource->tbl == cache_resource->tbl) {
1125                         DRV_LOG(DEBUG, "jump table resource resource %p: refcnt %d++",
1126                                 (void *)cache_resource,
1127                                 rte_atomic32_read(&cache_resource->refcnt));
1128                         rte_atomic32_inc(&cache_resource->refcnt);
1129                         dev_flow->dv.jump = cache_resource;
1130                         return 0;
1131                 }
1132         }
1133         /* Register new jump table resource. */
1134         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1135         if (!cache_resource)
1136                 return rte_flow_error_set(error, ENOMEM,
1137                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1138                                           "cannot allocate resource memory");
1139         *cache_resource = *resource;
1140         cache_resource->action =
1141                 mlx5_glue->dr_create_flow_action_dest_flow_tbl
1142                 (resource->tbl->obj);
1143         if (!cache_resource->action) {
1144                 rte_free(cache_resource);
1145                 return rte_flow_error_set(error, ENOMEM,
1146                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1147                                           NULL, "cannot create action");
1148         }
1149         rte_atomic32_init(&cache_resource->refcnt);
1150         rte_atomic32_inc(&cache_resource->refcnt);
1151         LIST_INSERT_HEAD(&sh->jump_tbl, cache_resource, next);
1152         dev_flow->dv.jump = cache_resource;
1153         DRV_LOG(DEBUG, "new jump table  resource %p: refcnt %d++",
1154                 (void *)cache_resource,
1155                 rte_atomic32_read(&cache_resource->refcnt));
1156         return 0;
1157 }
1158
1159 /**
1160  * Find existing table port ID resource or create and register a new one.
1161  *
1162  * @param dev[in, out]
1163  *   Pointer to rte_eth_dev structure.
1164  * @param[in, out] resource
1165  *   Pointer to port ID action resource.
1166  * @parm[in, out] dev_flow
1167  *   Pointer to the dev_flow.
1168  * @param[out] error
1169  *   pointer to error structure.
1170  *
1171  * @return
1172  *   0 on success otherwise -errno and errno is set.
1173  */
1174 static int
1175 flow_dv_port_id_action_resource_register
1176                         (struct rte_eth_dev *dev,
1177                          struct mlx5_flow_dv_port_id_action_resource *resource,
1178                          struct mlx5_flow *dev_flow,
1179                          struct rte_flow_error *error)
1180 {
1181         struct mlx5_priv *priv = dev->data->dev_private;
1182         struct mlx5_ibv_shared *sh = priv->sh;
1183         struct mlx5_flow_dv_port_id_action_resource *cache_resource;
1184
1185         /* Lookup a matching resource from cache. */
1186         LIST_FOREACH(cache_resource, &sh->port_id_action_list, next) {
1187                 if (resource->port_id == cache_resource->port_id) {
1188                         DRV_LOG(DEBUG, "port id action resource resource %p: "
1189                                 "refcnt %d++",
1190                                 (void *)cache_resource,
1191                                 rte_atomic32_read(&cache_resource->refcnt));
1192                         rte_atomic32_inc(&cache_resource->refcnt);
1193                         dev_flow->dv.port_id_action = cache_resource;
1194                         return 0;
1195                 }
1196         }
1197         /* Register new port id action resource. */
1198         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1199         if (!cache_resource)
1200                 return rte_flow_error_set(error, ENOMEM,
1201                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1202                                           "cannot allocate resource memory");
1203         *cache_resource = *resource;
1204         cache_resource->action =
1205                 mlx5_glue->dr_create_flow_action_dest_vport
1206                         (priv->sh->fdb_domain, resource->port_id);
1207         if (!cache_resource->action) {
1208                 rte_free(cache_resource);
1209                 return rte_flow_error_set(error, ENOMEM,
1210                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1211                                           NULL, "cannot create action");
1212         }
1213         rte_atomic32_init(&cache_resource->refcnt);
1214         rte_atomic32_inc(&cache_resource->refcnt);
1215         LIST_INSERT_HEAD(&sh->port_id_action_list, cache_resource, next);
1216         dev_flow->dv.port_id_action = cache_resource;
1217         DRV_LOG(DEBUG, "new port id action resource %p: refcnt %d++",
1218                 (void *)cache_resource,
1219                 rte_atomic32_read(&cache_resource->refcnt));
1220         return 0;
1221 }
1222
1223 /**
1224  * Get the size of specific rte_flow_item_type
1225  *
1226  * @param[in] item_type
1227  *   Tested rte_flow_item_type.
1228  *
1229  * @return
1230  *   sizeof struct item_type, 0 if void or irrelevant.
1231  */
1232 static size_t
1233 flow_dv_get_item_len(const enum rte_flow_item_type item_type)
1234 {
1235         size_t retval;
1236
1237         switch (item_type) {
1238         case RTE_FLOW_ITEM_TYPE_ETH:
1239                 retval = sizeof(struct rte_flow_item_eth);
1240                 break;
1241         case RTE_FLOW_ITEM_TYPE_VLAN:
1242                 retval = sizeof(struct rte_flow_item_vlan);
1243                 break;
1244         case RTE_FLOW_ITEM_TYPE_IPV4:
1245                 retval = sizeof(struct rte_flow_item_ipv4);
1246                 break;
1247         case RTE_FLOW_ITEM_TYPE_IPV6:
1248                 retval = sizeof(struct rte_flow_item_ipv6);
1249                 break;
1250         case RTE_FLOW_ITEM_TYPE_UDP:
1251                 retval = sizeof(struct rte_flow_item_udp);
1252                 break;
1253         case RTE_FLOW_ITEM_TYPE_TCP:
1254                 retval = sizeof(struct rte_flow_item_tcp);
1255                 break;
1256         case RTE_FLOW_ITEM_TYPE_VXLAN:
1257                 retval = sizeof(struct rte_flow_item_vxlan);
1258                 break;
1259         case RTE_FLOW_ITEM_TYPE_GRE:
1260                 retval = sizeof(struct rte_flow_item_gre);
1261                 break;
1262         case RTE_FLOW_ITEM_TYPE_NVGRE:
1263                 retval = sizeof(struct rte_flow_item_nvgre);
1264                 break;
1265         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1266                 retval = sizeof(struct rte_flow_item_vxlan_gpe);
1267                 break;
1268         case RTE_FLOW_ITEM_TYPE_MPLS:
1269                 retval = sizeof(struct rte_flow_item_mpls);
1270                 break;
1271         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
1272         default:
1273                 retval = 0;
1274                 break;
1275         }
1276         return retval;
1277 }
1278
1279 #define MLX5_ENCAP_IPV4_VERSION         0x40
1280 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
1281 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
1282 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
1283 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
1284 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
1285 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
1286
1287 /**
1288  * Convert the encap action data from list of rte_flow_item to raw buffer
1289  *
1290  * @param[in] items
1291  *   Pointer to rte_flow_item objects list.
1292  * @param[out] buf
1293  *   Pointer to the output buffer.
1294  * @param[out] size
1295  *   Pointer to the output buffer size.
1296  * @param[out] error
1297  *   Pointer to the error structure.
1298  *
1299  * @return
1300  *   0 on success, a negative errno value otherwise and rte_errno is set.
1301  */
1302 static int
1303 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
1304                            size_t *size, struct rte_flow_error *error)
1305 {
1306         struct rte_ether_hdr *eth = NULL;
1307         struct rte_vlan_hdr *vlan = NULL;
1308         struct rte_ipv4_hdr *ipv4 = NULL;
1309         struct rte_ipv6_hdr *ipv6 = NULL;
1310         struct rte_udp_hdr *udp = NULL;
1311         struct rte_vxlan_hdr *vxlan = NULL;
1312         struct rte_vxlan_gpe_hdr *vxlan_gpe = NULL;
1313         struct rte_gre_hdr *gre = NULL;
1314         size_t len;
1315         size_t temp_size = 0;
1316
1317         if (!items)
1318                 return rte_flow_error_set(error, EINVAL,
1319                                           RTE_FLOW_ERROR_TYPE_ACTION,
1320                                           NULL, "invalid empty data");
1321         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1322                 len = flow_dv_get_item_len(items->type);
1323                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
1324                         return rte_flow_error_set(error, EINVAL,
1325                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1326                                                   (void *)items->type,
1327                                                   "items total size is too big"
1328                                                   " for encap action");
1329                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
1330                 switch (items->type) {
1331                 case RTE_FLOW_ITEM_TYPE_ETH:
1332                         eth = (struct rte_ether_hdr *)&buf[temp_size];
1333                         break;
1334                 case RTE_FLOW_ITEM_TYPE_VLAN:
1335                         vlan = (struct rte_vlan_hdr *)&buf[temp_size];
1336                         if (!eth)
1337                                 return rte_flow_error_set(error, EINVAL,
1338                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1339                                                 (void *)items->type,
1340                                                 "eth header not found");
1341                         if (!eth->ether_type)
1342                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_VLAN);
1343                         break;
1344                 case RTE_FLOW_ITEM_TYPE_IPV4:
1345                         ipv4 = (struct rte_ipv4_hdr *)&buf[temp_size];
1346                         if (!vlan && !eth)
1347                                 return rte_flow_error_set(error, EINVAL,
1348                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1349                                                 (void *)items->type,
1350                                                 "neither eth nor vlan"
1351                                                 " header found");
1352                         if (vlan && !vlan->eth_proto)
1353                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV4);
1354                         else if (eth && !eth->ether_type)
1355                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV4);
1356                         if (!ipv4->version_ihl)
1357                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
1358                                                     MLX5_ENCAP_IPV4_IHL_MIN;
1359                         if (!ipv4->time_to_live)
1360                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
1361                         break;
1362                 case RTE_FLOW_ITEM_TYPE_IPV6:
1363                         ipv6 = (struct rte_ipv6_hdr *)&buf[temp_size];
1364                         if (!vlan && !eth)
1365                                 return rte_flow_error_set(error, EINVAL,
1366                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1367                                                 (void *)items->type,
1368                                                 "neither eth nor vlan"
1369                                                 " header found");
1370                         if (vlan && !vlan->eth_proto)
1371                                 vlan->eth_proto = RTE_BE16(RTE_ETHER_TYPE_IPV6);
1372                         else if (eth && !eth->ether_type)
1373                                 eth->ether_type = RTE_BE16(RTE_ETHER_TYPE_IPV6);
1374                         if (!ipv6->vtc_flow)
1375                                 ipv6->vtc_flow =
1376                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
1377                         if (!ipv6->hop_limits)
1378                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
1379                         break;
1380                 case RTE_FLOW_ITEM_TYPE_UDP:
1381                         udp = (struct rte_udp_hdr *)&buf[temp_size];
1382                         if (!ipv4 && !ipv6)
1383                                 return rte_flow_error_set(error, EINVAL,
1384                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1385                                                 (void *)items->type,
1386                                                 "ip header not found");
1387                         if (ipv4 && !ipv4->next_proto_id)
1388                                 ipv4->next_proto_id = IPPROTO_UDP;
1389                         else if (ipv6 && !ipv6->proto)
1390                                 ipv6->proto = IPPROTO_UDP;
1391                         break;
1392                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1393                         vxlan = (struct rte_vxlan_hdr *)&buf[temp_size];
1394                         if (!udp)
1395                                 return rte_flow_error_set(error, EINVAL,
1396                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1397                                                 (void *)items->type,
1398                                                 "udp header not found");
1399                         if (!udp->dst_port)
1400                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
1401                         if (!vxlan->vx_flags)
1402                                 vxlan->vx_flags =
1403                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
1404                         break;
1405                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1406                         vxlan_gpe = (struct rte_vxlan_gpe_hdr *)&buf[temp_size];
1407                         if (!udp)
1408                                 return rte_flow_error_set(error, EINVAL,
1409                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1410                                                 (void *)items->type,
1411                                                 "udp header not found");
1412                         if (!vxlan_gpe->proto)
1413                                 return rte_flow_error_set(error, EINVAL,
1414                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1415                                                 (void *)items->type,
1416                                                 "next protocol not found");
1417                         if (!udp->dst_port)
1418                                 udp->dst_port =
1419                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
1420                         if (!vxlan_gpe->vx_flags)
1421                                 vxlan_gpe->vx_flags =
1422                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
1423                         break;
1424                 case RTE_FLOW_ITEM_TYPE_GRE:
1425                 case RTE_FLOW_ITEM_TYPE_NVGRE:
1426                         gre = (struct rte_gre_hdr *)&buf[temp_size];
1427                         if (!gre->proto)
1428                                 return rte_flow_error_set(error, EINVAL,
1429                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1430                                                 (void *)items->type,
1431                                                 "next protocol not found");
1432                         if (!ipv4 && !ipv6)
1433                                 return rte_flow_error_set(error, EINVAL,
1434                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1435                                                 (void *)items->type,
1436                                                 "ip header not found");
1437                         if (ipv4 && !ipv4->next_proto_id)
1438                                 ipv4->next_proto_id = IPPROTO_GRE;
1439                         else if (ipv6 && !ipv6->proto)
1440                                 ipv6->proto = IPPROTO_GRE;
1441                         break;
1442                 case RTE_FLOW_ITEM_TYPE_VOID:
1443                         break;
1444                 default:
1445                         return rte_flow_error_set(error, EINVAL,
1446                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1447                                                   (void *)items->type,
1448                                                   "unsupported item type");
1449                         break;
1450                 }
1451                 temp_size += len;
1452         }
1453         *size = temp_size;
1454         return 0;
1455 }
1456
1457 static int
1458 flow_dv_zero_encap_udp_csum(void *data, struct rte_flow_error *error)
1459 {
1460         struct rte_ether_hdr *eth = NULL;
1461         struct rte_vlan_hdr *vlan = NULL;
1462         struct rte_ipv6_hdr *ipv6 = NULL;
1463         struct rte_udp_hdr *udp = NULL;
1464         char *next_hdr;
1465         uint16_t proto;
1466
1467         eth = (struct rte_ether_hdr *)data;
1468         next_hdr = (char *)(eth + 1);
1469         proto = RTE_BE16(eth->ether_type);
1470
1471         /* VLAN skipping */
1472         while (proto == RTE_ETHER_TYPE_VLAN || proto == RTE_ETHER_TYPE_QINQ) {
1473                 next_hdr += sizeof(struct rte_vlan_hdr);
1474                 vlan = (struct rte_vlan_hdr *)next_hdr;
1475                 proto = RTE_BE16(vlan->eth_proto);
1476         }
1477
1478         /* HW calculates IPv4 csum. no need to proceed */
1479         if (proto == RTE_ETHER_TYPE_IPV4)
1480                 return 0;
1481
1482         /* non IPv4/IPv6 header. not supported */
1483         if (proto != RTE_ETHER_TYPE_IPV6) {
1484                 return rte_flow_error_set(error, ENOTSUP,
1485                                           RTE_FLOW_ERROR_TYPE_ACTION,
1486                                           NULL, "Cannot offload non IPv4/IPv6");
1487         }
1488
1489         ipv6 = (struct rte_ipv6_hdr *)next_hdr;
1490
1491         /* ignore non UDP */
1492         if (ipv6->proto != IPPROTO_UDP)
1493                 return 0;
1494
1495         udp = (struct rte_udp_hdr *)(ipv6 + 1);
1496         udp->dgram_cksum = 0;
1497
1498         return 0;
1499 }
1500
1501 /**
1502  * Convert L2 encap action to DV specification.
1503  *
1504  * @param[in] dev
1505  *   Pointer to rte_eth_dev structure.
1506  * @param[in] action
1507  *   Pointer to action structure.
1508  * @param[in, out] dev_flow
1509  *   Pointer to the mlx5_flow.
1510  * @param[in] transfer
1511  *   Mark if the flow is E-Switch flow.
1512  * @param[out] error
1513  *   Pointer to the error structure.
1514  *
1515  * @return
1516  *   0 on success, a negative errno value otherwise and rte_errno is set.
1517  */
1518 static int
1519 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
1520                                const struct rte_flow_action *action,
1521                                struct mlx5_flow *dev_flow,
1522                                uint8_t transfer,
1523                                struct rte_flow_error *error)
1524 {
1525         const struct rte_flow_item *encap_data;
1526         const struct rte_flow_action_raw_encap *raw_encap_data;
1527         struct mlx5_flow_dv_encap_decap_resource res = {
1528                 .reformat_type =
1529                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
1530                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
1531                                       MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
1532         };
1533
1534         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
1535                 raw_encap_data =
1536                         (const struct rte_flow_action_raw_encap *)action->conf;
1537                 res.size = raw_encap_data->size;
1538                 memcpy(res.buf, raw_encap_data->data, res.size);
1539                 if (flow_dv_zero_encap_udp_csum(res.buf, error))
1540                         return -rte_errno;
1541         } else {
1542                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
1543                         encap_data =
1544                                 ((const struct rte_flow_action_vxlan_encap *)
1545                                                 action->conf)->definition;
1546                 else
1547                         encap_data =
1548                                 ((const struct rte_flow_action_nvgre_encap *)
1549                                                 action->conf)->definition;
1550                 if (flow_dv_convert_encap_data(encap_data, res.buf,
1551                                                &res.size, error))
1552                         return -rte_errno;
1553         }
1554         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1555                 return rte_flow_error_set(error, EINVAL,
1556                                           RTE_FLOW_ERROR_TYPE_ACTION,
1557                                           NULL, "can't create L2 encap action");
1558         return 0;
1559 }
1560
1561 /**
1562  * Convert L2 decap action to DV specification.
1563  *
1564  * @param[in] dev
1565  *   Pointer to rte_eth_dev structure.
1566  * @param[in, out] dev_flow
1567  *   Pointer to the mlx5_flow.
1568  * @param[in] transfer
1569  *   Mark if the flow is E-Switch flow.
1570  * @param[out] error
1571  *   Pointer to the error structure.
1572  *
1573  * @return
1574  *   0 on success, a negative errno value otherwise and rte_errno is set.
1575  */
1576 static int
1577 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
1578                                struct mlx5_flow *dev_flow,
1579                                uint8_t transfer,
1580                                struct rte_flow_error *error)
1581 {
1582         struct mlx5_flow_dv_encap_decap_resource res = {
1583                 .size = 0,
1584                 .reformat_type =
1585                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
1586                 .ft_type = transfer ? MLX5DV_FLOW_TABLE_TYPE_FDB :
1587                                       MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
1588         };
1589
1590         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1591                 return rte_flow_error_set(error, EINVAL,
1592                                           RTE_FLOW_ERROR_TYPE_ACTION,
1593                                           NULL, "can't create L2 decap action");
1594         return 0;
1595 }
1596
1597 /**
1598  * Convert raw decap/encap (L3 tunnel) action to DV specification.
1599  *
1600  * @param[in] dev
1601  *   Pointer to rte_eth_dev structure.
1602  * @param[in] action
1603  *   Pointer to action structure.
1604  * @param[in, out] dev_flow
1605  *   Pointer to the mlx5_flow.
1606  * @param[in] attr
1607  *   Pointer to the flow attributes.
1608  * @param[out] error
1609  *   Pointer to the error structure.
1610  *
1611  * @return
1612  *   0 on success, a negative errno value otherwise and rte_errno is set.
1613  */
1614 static int
1615 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
1616                                 const struct rte_flow_action *action,
1617                                 struct mlx5_flow *dev_flow,
1618                                 const struct rte_flow_attr *attr,
1619                                 struct rte_flow_error *error)
1620 {
1621         const struct rte_flow_action_raw_encap *encap_data;
1622         struct mlx5_flow_dv_encap_decap_resource res;
1623
1624         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
1625         res.size = encap_data->size;
1626         memcpy(res.buf, encap_data->data, res.size);
1627         res.reformat_type = attr->egress ?
1628                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL :
1629                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
1630         if (attr->transfer)
1631                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
1632         else
1633                 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
1634                                              MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
1635         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1636                 return rte_flow_error_set(error, EINVAL,
1637                                           RTE_FLOW_ERROR_TYPE_ACTION,
1638                                           NULL, "can't create encap action");
1639         return 0;
1640 }
1641
1642 /**
1643  * Validate the modify-header actions.
1644  *
1645  * @param[in] action_flags
1646  *   Holds the actions detected until now.
1647  * @param[in] action
1648  *   Pointer to the modify action.
1649  * @param[out] error
1650  *   Pointer to error structure.
1651  *
1652  * @return
1653  *   0 on success, a negative errno value otherwise and rte_errno is set.
1654  */
1655 static int
1656 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
1657                                    const struct rte_flow_action *action,
1658                                    struct rte_flow_error *error)
1659 {
1660         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
1661                 return rte_flow_error_set(error, EINVAL,
1662                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1663                                           NULL, "action configuration not set");
1664         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
1665                 return rte_flow_error_set(error, EINVAL,
1666                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1667                                           "can't have encap action before"
1668                                           " modify action");
1669         return 0;
1670 }
1671
1672 /**
1673  * Validate the modify-header MAC address actions.
1674  *
1675  * @param[in] action_flags
1676  *   Holds the actions detected until now.
1677  * @param[in] action
1678  *   Pointer to the modify action.
1679  * @param[in] item_flags
1680  *   Holds the items detected.
1681  * @param[out] error
1682  *   Pointer to error structure.
1683  *
1684  * @return
1685  *   0 on success, a negative errno value otherwise and rte_errno is set.
1686  */
1687 static int
1688 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
1689                                    const struct rte_flow_action *action,
1690                                    const uint64_t item_flags,
1691                                    struct rte_flow_error *error)
1692 {
1693         int ret = 0;
1694
1695         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1696         if (!ret) {
1697                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
1698                         return rte_flow_error_set(error, EINVAL,
1699                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1700                                                   NULL,
1701                                                   "no L2 item in pattern");
1702         }
1703         return ret;
1704 }
1705
1706 /**
1707  * Validate the modify-header IPv4 address actions.
1708  *
1709  * @param[in] action_flags
1710  *   Holds the actions detected until now.
1711  * @param[in] action
1712  *   Pointer to the modify action.
1713  * @param[in] item_flags
1714  *   Holds the items detected.
1715  * @param[out] error
1716  *   Pointer to error structure.
1717  *
1718  * @return
1719  *   0 on success, a negative errno value otherwise and rte_errno is set.
1720  */
1721 static int
1722 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
1723                                     const struct rte_flow_action *action,
1724                                     const uint64_t item_flags,
1725                                     struct rte_flow_error *error)
1726 {
1727         int ret = 0;
1728
1729         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1730         if (!ret) {
1731                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
1732                         return rte_flow_error_set(error, EINVAL,
1733                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1734                                                   NULL,
1735                                                   "no ipv4 item in pattern");
1736         }
1737         return ret;
1738 }
1739
1740 /**
1741  * Validate the modify-header IPv6 address actions.
1742  *
1743  * @param[in] action_flags
1744  *   Holds the actions detected until now.
1745  * @param[in] action
1746  *   Pointer to the modify action.
1747  * @param[in] item_flags
1748  *   Holds the items detected.
1749  * @param[out] error
1750  *   Pointer to error structure.
1751  *
1752  * @return
1753  *   0 on success, a negative errno value otherwise and rte_errno is set.
1754  */
1755 static int
1756 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
1757                                     const struct rte_flow_action *action,
1758                                     const uint64_t item_flags,
1759                                     struct rte_flow_error *error)
1760 {
1761         int ret = 0;
1762
1763         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1764         if (!ret) {
1765                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
1766                         return rte_flow_error_set(error, EINVAL,
1767                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1768                                                   NULL,
1769                                                   "no ipv6 item in pattern");
1770         }
1771         return ret;
1772 }
1773
1774 /**
1775  * Validate the modify-header TP actions.
1776  *
1777  * @param[in] action_flags
1778  *   Holds the actions detected until now.
1779  * @param[in] action
1780  *   Pointer to the modify action.
1781  * @param[in] item_flags
1782  *   Holds the items detected.
1783  * @param[out] error
1784  *   Pointer to error structure.
1785  *
1786  * @return
1787  *   0 on success, a negative errno value otherwise and rte_errno is set.
1788  */
1789 static int
1790 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
1791                                   const struct rte_flow_action *action,
1792                                   const uint64_t item_flags,
1793                                   struct rte_flow_error *error)
1794 {
1795         int ret = 0;
1796
1797         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1798         if (!ret) {
1799                 if (!(item_flags & MLX5_FLOW_LAYER_L4))
1800                         return rte_flow_error_set(error, EINVAL,
1801                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1802                                                   NULL, "no transport layer "
1803                                                   "in pattern");
1804         }
1805         return ret;
1806 }
1807
1808 /**
1809  * Validate the modify-header actions of increment/decrement
1810  * TCP Sequence-number.
1811  *
1812  * @param[in] action_flags
1813  *   Holds the actions detected until now.
1814  * @param[in] action
1815  *   Pointer to the modify action.
1816  * @param[in] item_flags
1817  *   Holds the items detected.
1818  * @param[out] error
1819  *   Pointer to error structure.
1820  *
1821  * @return
1822  *   0 on success, a negative errno value otherwise and rte_errno is set.
1823  */
1824 static int
1825 flow_dv_validate_action_modify_tcp_seq(const uint64_t action_flags,
1826                                        const struct rte_flow_action *action,
1827                                        const uint64_t item_flags,
1828                                        struct rte_flow_error *error)
1829 {
1830         int ret = 0;
1831
1832         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1833         if (!ret) {
1834                 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
1835                         return rte_flow_error_set(error, EINVAL,
1836                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1837                                                   NULL, "no TCP item in"
1838                                                   " pattern");
1839                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ &&
1840                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_SEQ)) ||
1841                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ &&
1842                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_SEQ)))
1843                         return rte_flow_error_set(error, EINVAL,
1844                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1845                                                   NULL,
1846                                                   "cannot decrease and increase"
1847                                                   " TCP sequence number"
1848                                                   " at the same time");
1849         }
1850         return ret;
1851 }
1852
1853 /**
1854  * Validate the modify-header actions of increment/decrement
1855  * TCP Acknowledgment number.
1856  *
1857  * @param[in] action_flags
1858  *   Holds the actions detected until now.
1859  * @param[in] action
1860  *   Pointer to the modify action.
1861  * @param[in] item_flags
1862  *   Holds the items detected.
1863  * @param[out] error
1864  *   Pointer to error structure.
1865  *
1866  * @return
1867  *   0 on success, a negative errno value otherwise and rte_errno is set.
1868  */
1869 static int
1870 flow_dv_validate_action_modify_tcp_ack(const uint64_t action_flags,
1871                                        const struct rte_flow_action *action,
1872                                        const uint64_t item_flags,
1873                                        struct rte_flow_error *error)
1874 {
1875         int ret = 0;
1876
1877         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1878         if (!ret) {
1879                 if (!(item_flags & MLX5_FLOW_LAYER_OUTER_L4_TCP))
1880                         return rte_flow_error_set(error, EINVAL,
1881                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1882                                                   NULL, "no TCP item in"
1883                                                   " pattern");
1884                 if ((action->type == RTE_FLOW_ACTION_TYPE_INC_TCP_ACK &&
1885                         (action_flags & MLX5_FLOW_ACTION_DEC_TCP_ACK)) ||
1886                     (action->type == RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK &&
1887                         (action_flags & MLX5_FLOW_ACTION_INC_TCP_ACK)))
1888                         return rte_flow_error_set(error, EINVAL,
1889                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1890                                                   NULL,
1891                                                   "cannot decrease and increase"
1892                                                   " TCP acknowledgment number"
1893                                                   " at the same time");
1894         }
1895         return ret;
1896 }
1897
1898 /**
1899  * Validate the modify-header TTL actions.
1900  *
1901  * @param[in] action_flags
1902  *   Holds the actions detected until now.
1903  * @param[in] action
1904  *   Pointer to the modify action.
1905  * @param[in] item_flags
1906  *   Holds the items detected.
1907  * @param[out] error
1908  *   Pointer to error structure.
1909  *
1910  * @return
1911  *   0 on success, a negative errno value otherwise and rte_errno is set.
1912  */
1913 static int
1914 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
1915                                    const struct rte_flow_action *action,
1916                                    const uint64_t item_flags,
1917                                    struct rte_flow_error *error)
1918 {
1919         int ret = 0;
1920
1921         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1922         if (!ret) {
1923                 if (!(item_flags & MLX5_FLOW_LAYER_L3))
1924                         return rte_flow_error_set(error, EINVAL,
1925                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1926                                                   NULL,
1927                                                   "no IP protocol in pattern");
1928         }
1929         return ret;
1930 }
1931
1932 /**
1933  * Validate jump action.
1934  *
1935  * @param[in] action
1936  *   Pointer to the modify action.
1937  * @param[in] group
1938  *   The group of the current flow.
1939  * @param[out] error
1940  *   Pointer to error structure.
1941  *
1942  * @return
1943  *   0 on success, a negative errno value otherwise and rte_errno is set.
1944  */
1945 static int
1946 flow_dv_validate_action_jump(const struct rte_flow_action *action,
1947                              uint32_t group,
1948                              struct rte_flow_error *error)
1949 {
1950         if (action->type != RTE_FLOW_ACTION_TYPE_JUMP && !action->conf)
1951                 return rte_flow_error_set(error, EINVAL,
1952                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1953                                           NULL, "action configuration not set");
1954         if (group >= ((const struct rte_flow_action_jump *)action->conf)->group)
1955                 return rte_flow_error_set(error, EINVAL,
1956                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1957                                           "target group must be higher then"
1958                                           " the current flow group");
1959         return 0;
1960 }
1961
1962 /*
1963  * Validate the port_id action.
1964  *
1965  * @param[in] dev
1966  *   Pointer to rte_eth_dev structure.
1967  * @param[in] action_flags
1968  *   Bit-fields that holds the actions detected until now.
1969  * @param[in] action
1970  *   Port_id RTE action structure.
1971  * @param[in] attr
1972  *   Attributes of flow that includes this action.
1973  * @param[out] error
1974  *   Pointer to error structure.
1975  *
1976  * @return
1977  *   0 on success, a negative errno value otherwise and rte_errno is set.
1978  */
1979 static int
1980 flow_dv_validate_action_port_id(struct rte_eth_dev *dev,
1981                                 uint64_t action_flags,
1982                                 const struct rte_flow_action *action,
1983                                 const struct rte_flow_attr *attr,
1984                                 struct rte_flow_error *error)
1985 {
1986         const struct rte_flow_action_port_id *port_id;
1987         uint16_t port;
1988         uint16_t esw_domain_id;
1989         uint16_t act_port_domain_id;
1990         int ret;
1991
1992         if (!attr->transfer)
1993                 return rte_flow_error_set(error, ENOTSUP,
1994                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1995                                           NULL,
1996                                           "port id action is valid in transfer"
1997                                           " mode only");
1998         if (!action || !action->conf)
1999                 return rte_flow_error_set(error, ENOTSUP,
2000                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
2001                                           NULL,
2002                                           "port id action parameters must be"
2003                                           " specified");
2004         if (action_flags & (MLX5_FLOW_FATE_ACTIONS |
2005                             MLX5_FLOW_FATE_ESWITCH_ACTIONS))
2006                 return rte_flow_error_set(error, EINVAL,
2007                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2008                                           "can have only one fate actions in"
2009                                           " a flow");
2010         ret = mlx5_port_to_eswitch_info(dev->data->port_id,
2011                                         &esw_domain_id, NULL);
2012         if (ret < 0)
2013                 return rte_flow_error_set(error, -ret,
2014                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2015                                           NULL,
2016                                           "failed to obtain E-Switch info");
2017         port_id = action->conf;
2018         port = port_id->original ? dev->data->port_id : port_id->id;
2019         ret = mlx5_port_to_eswitch_info(port, &act_port_domain_id, NULL);
2020         if (ret)
2021                 return rte_flow_error_set
2022                                 (error, -ret,
2023                                  RTE_FLOW_ERROR_TYPE_ACTION_CONF, port_id,
2024                                  "failed to obtain E-Switch port id for port");
2025         if (act_port_domain_id != esw_domain_id)
2026                 return rte_flow_error_set
2027                                 (error, -ret,
2028                                  RTE_FLOW_ERROR_TYPE_ACTION, NULL,
2029                                  "port does not belong to"
2030                                  " E-Switch being configured");
2031         return 0;
2032 }
2033
2034 /**
2035  * Find existing modify-header resource or create and register a new one.
2036  *
2037  * @param dev[in, out]
2038  *   Pointer to rte_eth_dev structure.
2039  * @param[in, out] resource
2040  *   Pointer to modify-header resource.
2041  * @parm[in, out] dev_flow
2042  *   Pointer to the dev_flow.
2043  * @param[out] error
2044  *   pointer to error structure.
2045  *
2046  * @return
2047  *   0 on success otherwise -errno and errno is set.
2048  */
2049 static int
2050 flow_dv_modify_hdr_resource_register
2051                         (struct rte_eth_dev *dev,
2052                          struct mlx5_flow_dv_modify_hdr_resource *resource,
2053                          struct mlx5_flow *dev_flow,
2054                          struct rte_flow_error *error)
2055 {
2056         struct mlx5_priv *priv = dev->data->dev_private;
2057         struct mlx5_ibv_shared *sh = priv->sh;
2058         struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
2059         struct mlx5dv_dr_domain *ns;
2060
2061         if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_FDB)
2062                 ns = sh->fdb_domain;
2063         else if (resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX)
2064                 ns = sh->tx_domain;
2065         else
2066                 ns = sh->rx_domain;
2067         resource->flags =
2068                 dev_flow->flow->group ? 0 : MLX5DV_DR_ACTION_FLAGS_ROOT_LEVEL;
2069         /* Lookup a matching resource from cache. */
2070         LIST_FOREACH(cache_resource, &sh->modify_cmds, next) {
2071                 if (resource->ft_type == cache_resource->ft_type &&
2072                     resource->actions_num == cache_resource->actions_num &&
2073                     resource->flags == cache_resource->flags &&
2074                     !memcmp((const void *)resource->actions,
2075                             (const void *)cache_resource->actions,
2076                             (resource->actions_num *
2077                                             sizeof(resource->actions[0])))) {
2078                         DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
2079                                 (void *)cache_resource,
2080                                 rte_atomic32_read(&cache_resource->refcnt));
2081                         rte_atomic32_inc(&cache_resource->refcnt);
2082                         dev_flow->dv.modify_hdr = cache_resource;
2083                         return 0;
2084                 }
2085         }
2086         /* Register new modify-header resource. */
2087         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
2088         if (!cache_resource)
2089                 return rte_flow_error_set(error, ENOMEM,
2090                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2091                                           "cannot allocate resource memory");
2092         *cache_resource = *resource;
2093         cache_resource->verbs_action =
2094                 mlx5_glue->dv_create_flow_action_modify_header
2095                                         (sh->ctx, cache_resource->ft_type,
2096                                          ns, cache_resource->flags,
2097                                          cache_resource->actions_num *
2098                                          sizeof(cache_resource->actions[0]),
2099                                          (uint64_t *)cache_resource->actions);
2100         if (!cache_resource->verbs_action) {
2101                 rte_free(cache_resource);
2102                 return rte_flow_error_set(error, ENOMEM,
2103                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2104                                           NULL, "cannot create action");
2105         }
2106         rte_atomic32_init(&cache_resource->refcnt);
2107         rte_atomic32_inc(&cache_resource->refcnt);
2108         LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
2109         dev_flow->dv.modify_hdr = cache_resource;
2110         DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
2111                 (void *)cache_resource,
2112                 rte_atomic32_read(&cache_resource->refcnt));
2113         return 0;
2114 }
2115
2116 /**
2117  * Get or create a flow counter.
2118  *
2119  * @param[in] dev
2120  *   Pointer to the Ethernet device structure.
2121  * @param[in] shared
2122  *   Indicate if this counter is shared with other flows.
2123  * @param[in] id
2124  *   Counter identifier.
2125  *
2126  * @return
2127  *   pointer to flow counter on success, NULL otherwise and rte_errno is set.
2128  */
2129 static struct mlx5_flow_counter *
2130 flow_dv_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
2131 {
2132         struct mlx5_priv *priv = dev->data->dev_private;
2133         struct mlx5_flow_counter *cnt = NULL;
2134         struct mlx5_devx_counter_set *dcs = NULL;
2135         int ret;
2136
2137         if (!priv->config.devx) {
2138                 ret = -ENOTSUP;
2139                 goto error_exit;
2140         }
2141         if (shared) {
2142                 LIST_FOREACH(cnt, &priv->flow_counters, next) {
2143                         if (cnt->shared && cnt->id == id) {
2144                                 cnt->ref_cnt++;
2145                                 return cnt;
2146                         }
2147                 }
2148         }
2149         cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
2150         dcs = rte_calloc(__func__, 1, sizeof(*dcs), 0);
2151         if (!dcs || !cnt) {
2152                 ret = -ENOMEM;
2153                 goto error_exit;
2154         }
2155         ret = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, dcs);
2156         if (ret)
2157                 goto error_exit;
2158         struct mlx5_flow_counter tmpl = {
2159                 .shared = shared,
2160                 .ref_cnt = 1,
2161                 .id = id,
2162                 .dcs = dcs,
2163         };
2164         tmpl.action = mlx5_glue->dv_create_flow_action_counter(dcs->obj, 0);
2165         if (!tmpl.action) {
2166                 ret = errno;
2167                 goto error_exit;
2168         }
2169         *cnt = tmpl;
2170         LIST_INSERT_HEAD(&priv->flow_counters, cnt, next);
2171         return cnt;
2172 error_exit:
2173         rte_free(cnt);
2174         rte_free(dcs);
2175         rte_errno = -ret;
2176         return NULL;
2177 }
2178
2179 /**
2180  * Release a flow counter.
2181  *
2182  * @param[in] counter
2183  *   Pointer to the counter handler.
2184  */
2185 static void
2186 flow_dv_counter_release(struct mlx5_flow_counter *counter)
2187 {
2188         int ret;
2189
2190         if (!counter)
2191                 return;
2192         if (--counter->ref_cnt == 0) {
2193                 ret = mlx5_devx_cmd_flow_counter_free(counter->dcs->obj);
2194                 if (ret)
2195                         DRV_LOG(ERR, "Failed to free devx counters, %d", ret);
2196                 LIST_REMOVE(counter, next);
2197                 rte_free(counter->dcs);
2198                 rte_free(counter);
2199         }
2200 }
2201
2202 /**
2203  * Verify the @p attributes will be correctly understood by the NIC and store
2204  * them in the @p flow if everything is correct.
2205  *
2206  * @param[in] dev
2207  *   Pointer to dev struct.
2208  * @param[in] attributes
2209  *   Pointer to flow attributes
2210  * @param[out] error
2211  *   Pointer to error structure.
2212  *
2213  * @return
2214  *   0 on success, a negative errno value otherwise and rte_errno is set.
2215  */
2216 static int
2217 flow_dv_validate_attributes(struct rte_eth_dev *dev,
2218                             const struct rte_flow_attr *attributes,
2219                             struct rte_flow_error *error)
2220 {
2221         struct mlx5_priv *priv = dev->data->dev_private;
2222         uint32_t priority_max = priv->config.flow_prio - 1;
2223
2224 #ifndef HAVE_MLX5DV_DR
2225         if (attributes->group)
2226                 return rte_flow_error_set(error, ENOTSUP,
2227                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
2228                                           NULL,
2229                                           "groups is not supported");
2230 #endif
2231         if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
2232             attributes->priority >= priority_max)
2233                 return rte_flow_error_set(error, ENOTSUP,
2234                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
2235                                           NULL,
2236                                           "priority out of range");
2237         if (attributes->transfer) {
2238                 if (!priv->config.dv_esw_en)
2239                         return rte_flow_error_set
2240                                 (error, ENOTSUP,
2241                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2242                                  "E-Switch dr is not supported");
2243                 if (!(priv->representor || priv->master))
2244                         return rte_flow_error_set
2245                                 (error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2246                                  NULL, "E-Switch configurationd can only be"
2247                                  " done by a master or a representor device");
2248                 if (attributes->egress)
2249                         return rte_flow_error_set
2250                                 (error, ENOTSUP,
2251                                  RTE_FLOW_ERROR_TYPE_ATTR_EGRESS, attributes,
2252                                  "egress is not supported");
2253                 if (attributes->group >= MLX5_MAX_TABLES_FDB)
2254                         return rte_flow_error_set
2255                                 (error, EINVAL,
2256                                  RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
2257                                  NULL, "group must be smaller than "
2258                                  RTE_STR(MLX5_MAX_FDB_TABLES));
2259         }
2260         if (!(attributes->egress ^ attributes->ingress))
2261                 return rte_flow_error_set(error, ENOTSUP,
2262                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
2263                                           "must specify exactly one of "
2264                                           "ingress or egress");
2265         return 0;
2266 }
2267
2268 /**
2269  * Internal validation function. For validating both actions and items.
2270  *
2271  * @param[in] dev
2272  *   Pointer to the rte_eth_dev structure.
2273  * @param[in] attr
2274  *   Pointer to the flow attributes.
2275  * @param[in] items
2276  *   Pointer to the list of items.
2277  * @param[in] actions
2278  *   Pointer to the list of actions.
2279  * @param[out] error
2280  *   Pointer to the error structure.
2281  *
2282  * @return
2283  *   0 on success, a negative errno value otherwise and rte_errno is set.
2284  */
2285 static int
2286 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
2287                  const struct rte_flow_item items[],
2288                  const struct rte_flow_action actions[],
2289                  struct rte_flow_error *error)
2290 {
2291         int ret;
2292         uint64_t action_flags = 0;
2293         uint64_t item_flags = 0;
2294         uint64_t last_item = 0;
2295         uint8_t next_protocol = 0xff;
2296         int actions_n = 0;
2297         const struct rte_flow_item *gre_item = NULL;
2298         struct rte_flow_item_tcp nic_tcp_mask = {
2299                 .hdr = {
2300                         .tcp_flags = 0xFF,
2301                         .src_port = RTE_BE16(UINT16_MAX),
2302                         .dst_port = RTE_BE16(UINT16_MAX),
2303                 }
2304         };
2305
2306         if (items == NULL)
2307                 return -1;
2308         ret = flow_dv_validate_attributes(dev, attr, error);
2309         if (ret < 0)
2310                 return ret;
2311         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2312                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2313                 switch (items->type) {
2314                 case RTE_FLOW_ITEM_TYPE_VOID:
2315                         break;
2316                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
2317                         ret = flow_dv_validate_item_port_id
2318                                         (dev, items, attr, item_flags, error);
2319                         if (ret < 0)
2320                                 return ret;
2321                         last_item |= MLX5_FLOW_ITEM_PORT_ID;
2322                         break;
2323                 case RTE_FLOW_ITEM_TYPE_ETH:
2324                         ret = mlx5_flow_validate_item_eth(items, item_flags,
2325                                                           error);
2326                         if (ret < 0)
2327                                 return ret;
2328                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
2329                                              MLX5_FLOW_LAYER_OUTER_L2;
2330                         break;
2331                 case RTE_FLOW_ITEM_TYPE_VLAN:
2332                         ret = mlx5_flow_validate_item_vlan(items, item_flags,
2333                                                            error);
2334                         if (ret < 0)
2335                                 return ret;
2336                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
2337                                              MLX5_FLOW_LAYER_OUTER_VLAN;
2338                         break;
2339                 case RTE_FLOW_ITEM_TYPE_IPV4:
2340                         ret = mlx5_flow_validate_item_ipv4(items, item_flags,
2341                                                            NULL, error);
2342                         if (ret < 0)
2343                                 return ret;
2344                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
2345                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
2346                         if (items->mask != NULL &&
2347                             ((const struct rte_flow_item_ipv4 *)
2348                              items->mask)->hdr.next_proto_id) {
2349                                 next_protocol =
2350                                         ((const struct rte_flow_item_ipv4 *)
2351                                          (items->spec))->hdr.next_proto_id;
2352                                 next_protocol &=
2353                                         ((const struct rte_flow_item_ipv4 *)
2354                                          (items->mask))->hdr.next_proto_id;
2355                         } else {
2356                                 /* Reset for inner layer. */
2357                                 next_protocol = 0xff;
2358                         }
2359                         break;
2360                 case RTE_FLOW_ITEM_TYPE_IPV6:
2361                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
2362                                                            NULL, error);
2363                         if (ret < 0)
2364                                 return ret;
2365                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
2366                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
2367                         if (items->mask != NULL &&
2368                             ((const struct rte_flow_item_ipv6 *)
2369                              items->mask)->hdr.proto) {
2370                                 next_protocol =
2371                                         ((const struct rte_flow_item_ipv6 *)
2372                                          items->spec)->hdr.proto;
2373                                 next_protocol &=
2374                                         ((const struct rte_flow_item_ipv6 *)
2375                                          items->mask)->hdr.proto;
2376                         } else {
2377                                 /* Reset for inner layer. */
2378                                 next_protocol = 0xff;
2379                         }
2380                         break;
2381                 case RTE_FLOW_ITEM_TYPE_TCP:
2382                         ret = mlx5_flow_validate_item_tcp
2383                                                 (items, item_flags,
2384                                                  next_protocol,
2385                                                  &nic_tcp_mask,
2386                                                  error);
2387                         if (ret < 0)
2388                                 return ret;
2389                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
2390                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
2391                         break;
2392                 case RTE_FLOW_ITEM_TYPE_UDP:
2393                         ret = mlx5_flow_validate_item_udp(items, item_flags,
2394                                                           next_protocol,
2395                                                           error);
2396                         if (ret < 0)
2397                                 return ret;
2398                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
2399                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
2400                         break;
2401                 case RTE_FLOW_ITEM_TYPE_GRE:
2402                 case RTE_FLOW_ITEM_TYPE_NVGRE:
2403                         ret = mlx5_flow_validate_item_gre(items, item_flags,
2404                                                           next_protocol, error);
2405                         if (ret < 0)
2406                                 return ret;
2407                         gre_item = items;
2408                         last_item = MLX5_FLOW_LAYER_GRE;
2409                         break;
2410                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
2411                         ret = mlx5_flow_validate_item_gre_key
2412                                 (items, item_flags, gre_item, error);
2413                         if (ret < 0)
2414                                 return ret;
2415                         item_flags |= MLX5_FLOW_LAYER_GRE_KEY;
2416                         break;
2417                 case RTE_FLOW_ITEM_TYPE_VXLAN:
2418                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
2419                                                             error);
2420                         if (ret < 0)
2421                                 return ret;
2422                         last_item = MLX5_FLOW_LAYER_VXLAN;
2423                         break;
2424                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
2425                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
2426                                                                 item_flags, dev,
2427                                                                 error);
2428                         if (ret < 0)
2429                                 return ret;
2430                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
2431                         break;
2432                 case RTE_FLOW_ITEM_TYPE_MPLS:
2433                         ret = mlx5_flow_validate_item_mpls(dev, items,
2434                                                            item_flags,
2435                                                            last_item, error);
2436                         if (ret < 0)
2437                                 return ret;
2438                         last_item = MLX5_FLOW_LAYER_MPLS;
2439                         break;
2440                 case RTE_FLOW_ITEM_TYPE_META:
2441                         ret = flow_dv_validate_item_meta(dev, items, attr,
2442                                                          error);
2443                         if (ret < 0)
2444                                 return ret;
2445                         last_item = MLX5_FLOW_ITEM_METADATA;
2446                         break;
2447                 case RTE_FLOW_ITEM_TYPE_ICMP:
2448                         ret = mlx5_flow_validate_item_icmp(items, item_flags,
2449                                                            next_protocol,
2450                                                            error);
2451                         if (ret < 0)
2452                                 return ret;
2453                         item_flags |= MLX5_FLOW_LAYER_ICMP;
2454                         break;
2455                 case RTE_FLOW_ITEM_TYPE_ICMP6:
2456                         ret = mlx5_flow_validate_item_icmp6(items, item_flags,
2457                                                             next_protocol,
2458                                                             error);
2459                         if (ret < 0)
2460                                 return ret;
2461                         item_flags |= MLX5_FLOW_LAYER_ICMP6;
2462                         break;
2463                 default:
2464                         return rte_flow_error_set(error, ENOTSUP,
2465                                                   RTE_FLOW_ERROR_TYPE_ITEM,
2466                                                   NULL, "item not supported");
2467                 }
2468                 item_flags |= last_item;
2469         }
2470         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
2471                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
2472                         return rte_flow_error_set(error, ENOTSUP,
2473                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2474                                                   actions, "too many actions");
2475                 switch (actions->type) {
2476                 case RTE_FLOW_ACTION_TYPE_VOID:
2477                         break;
2478                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
2479                         ret = flow_dv_validate_action_port_id(dev,
2480                                                               action_flags,
2481                                                               actions,
2482                                                               attr,
2483                                                               error);
2484                         if (ret)
2485                                 return ret;
2486                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
2487                         ++actions_n;
2488                         break;
2489                 case RTE_FLOW_ACTION_TYPE_FLAG:
2490                         ret = mlx5_flow_validate_action_flag(action_flags,
2491                                                              attr, error);
2492                         if (ret < 0)
2493                                 return ret;
2494                         action_flags |= MLX5_FLOW_ACTION_FLAG;
2495                         ++actions_n;
2496                         break;
2497                 case RTE_FLOW_ACTION_TYPE_MARK:
2498                         ret = mlx5_flow_validate_action_mark(actions,
2499                                                              action_flags,
2500                                                              attr, error);
2501                         if (ret < 0)
2502                                 return ret;
2503                         action_flags |= MLX5_FLOW_ACTION_MARK;
2504                         ++actions_n;
2505                         break;
2506                 case RTE_FLOW_ACTION_TYPE_DROP:
2507                         ret = mlx5_flow_validate_action_drop(action_flags,
2508                                                              attr, error);
2509                         if (ret < 0)
2510                                 return ret;
2511                         action_flags |= MLX5_FLOW_ACTION_DROP;
2512                         ++actions_n;
2513                         break;
2514                 case RTE_FLOW_ACTION_TYPE_QUEUE:
2515                         ret = mlx5_flow_validate_action_queue(actions,
2516                                                               action_flags, dev,
2517                                                               attr, error);
2518                         if (ret < 0)
2519                                 return ret;
2520                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
2521                         ++actions_n;
2522                         break;
2523                 case RTE_FLOW_ACTION_TYPE_RSS:
2524                         ret = mlx5_flow_validate_action_rss(actions,
2525                                                             action_flags, dev,
2526                                                             attr, item_flags,
2527                                                             error);
2528                         if (ret < 0)
2529                                 return ret;
2530                         action_flags |= MLX5_FLOW_ACTION_RSS;
2531                         ++actions_n;
2532                         break;
2533                 case RTE_FLOW_ACTION_TYPE_COUNT:
2534                         ret = flow_dv_validate_action_count(dev, error);
2535                         if (ret < 0)
2536                                 return ret;
2537                         action_flags |= MLX5_FLOW_ACTION_COUNT;
2538                         ++actions_n;
2539                         break;
2540                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
2541                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
2542                         ret = flow_dv_validate_action_l2_encap(action_flags,
2543                                                                actions, attr,
2544                                                                error);
2545                         if (ret < 0)
2546                                 return ret;
2547                         action_flags |= actions->type ==
2548                                         RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
2549                                         MLX5_FLOW_ACTION_VXLAN_ENCAP :
2550                                         MLX5_FLOW_ACTION_NVGRE_ENCAP;
2551                         ++actions_n;
2552                         break;
2553                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
2554                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
2555                         ret = flow_dv_validate_action_l2_decap(action_flags,
2556                                                                attr, error);
2557                         if (ret < 0)
2558                                 return ret;
2559                         action_flags |= actions->type ==
2560                                         RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
2561                                         MLX5_FLOW_ACTION_VXLAN_DECAP :
2562                                         MLX5_FLOW_ACTION_NVGRE_DECAP;
2563                         ++actions_n;
2564                         break;
2565                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
2566                         ret = flow_dv_validate_action_raw_encap(action_flags,
2567                                                                 actions, attr,
2568                                                                 error);
2569                         if (ret < 0)
2570                                 return ret;
2571                         action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
2572                         ++actions_n;
2573                         break;
2574                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
2575                         ret = flow_dv_validate_action_raw_decap(action_flags,
2576                                                                 actions, attr,
2577                                                                 error);
2578                         if (ret < 0)
2579                                 return ret;
2580                         action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
2581                         ++actions_n;
2582                         break;
2583                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
2584                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
2585                         ret = flow_dv_validate_action_modify_mac(action_flags,
2586                                                                  actions,
2587                                                                  item_flags,
2588                                                                  error);
2589                         if (ret < 0)
2590                                 return ret;
2591                         /* Count all modify-header actions as one action. */
2592                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2593                                 ++actions_n;
2594                         action_flags |= actions->type ==
2595                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
2596                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
2597                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
2598                         break;
2599
2600                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
2601                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
2602                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
2603                                                                   actions,
2604                                                                   item_flags,
2605                                                                   error);
2606                         if (ret < 0)
2607                                 return ret;
2608                         /* Count all modify-header actions as one action. */
2609                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2610                                 ++actions_n;
2611                         action_flags |= actions->type ==
2612                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
2613                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
2614                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
2615                         break;
2616                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
2617                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
2618                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
2619                                                                   actions,
2620                                                                   item_flags,
2621                                                                   error);
2622                         if (ret < 0)
2623                                 return ret;
2624                         /* Count all modify-header actions as one action. */
2625                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2626                                 ++actions_n;
2627                         action_flags |= actions->type ==
2628                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
2629                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
2630                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
2631                         break;
2632                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
2633                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
2634                         ret = flow_dv_validate_action_modify_tp(action_flags,
2635                                                                 actions,
2636                                                                 item_flags,
2637                                                                 error);
2638                         if (ret < 0)
2639                                 return ret;
2640                         /* Count all modify-header actions as one action. */
2641                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2642                                 ++actions_n;
2643                         action_flags |= actions->type ==
2644                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
2645                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
2646                                                 MLX5_FLOW_ACTION_SET_TP_DST;
2647                         break;
2648                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
2649                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
2650                         ret = flow_dv_validate_action_modify_ttl(action_flags,
2651                                                                  actions,
2652                                                                  item_flags,
2653                                                                  error);
2654                         if (ret < 0)
2655                                 return ret;
2656                         /* Count all modify-header actions as one action. */
2657                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2658                                 ++actions_n;
2659                         action_flags |= actions->type ==
2660                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
2661                                                 MLX5_FLOW_ACTION_SET_TTL :
2662                                                 MLX5_FLOW_ACTION_DEC_TTL;
2663                         break;
2664                 case RTE_FLOW_ACTION_TYPE_JUMP:
2665                         ret = flow_dv_validate_action_jump(actions,
2666                                                            attr->group, error);
2667                         if (ret)
2668                                 return ret;
2669                         ++actions_n;
2670                         action_flags |= MLX5_FLOW_ACTION_JUMP;
2671                         break;
2672                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
2673                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
2674                         ret = flow_dv_validate_action_modify_tcp_seq
2675                                                                 (action_flags,
2676                                                                  actions,
2677                                                                  item_flags,
2678                                                                  error);
2679                         if (ret < 0)
2680                                 return ret;
2681                         /* Count all modify-header actions as one action. */
2682                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2683                                 ++actions_n;
2684                         action_flags |= actions->type ==
2685                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
2686                                                 MLX5_FLOW_ACTION_INC_TCP_SEQ :
2687                                                 MLX5_FLOW_ACTION_DEC_TCP_SEQ;
2688                         break;
2689                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
2690                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
2691                         ret = flow_dv_validate_action_modify_tcp_ack
2692                                                                 (action_flags,
2693                                                                  actions,
2694                                                                  item_flags,
2695                                                                  error);
2696                         if (ret < 0)
2697                                 return ret;
2698                         /* Count all modify-header actions as one action. */
2699                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2700                                 ++actions_n;
2701                         action_flags |= actions->type ==
2702                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
2703                                                 MLX5_FLOW_ACTION_INC_TCP_ACK :
2704                                                 MLX5_FLOW_ACTION_DEC_TCP_ACK;
2705                         break;
2706                 default:
2707                         return rte_flow_error_set(error, ENOTSUP,
2708                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2709                                                   actions,
2710                                                   "action not supported");
2711                 }
2712         }
2713         /* Eswitch has few restrictions on using items and actions */
2714         if (attr->transfer) {
2715                 if (action_flags & MLX5_FLOW_ACTION_FLAG)
2716                         return rte_flow_error_set(error, ENOTSUP,
2717                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2718                                                   NULL,
2719                                                   "unsupported action FLAG");
2720                 if (action_flags & MLX5_FLOW_ACTION_MARK)
2721                         return rte_flow_error_set(error, ENOTSUP,
2722                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2723                                                   NULL,
2724                                                   "unsupported action MARK");
2725                 if (action_flags & MLX5_FLOW_ACTION_QUEUE)
2726                         return rte_flow_error_set(error, ENOTSUP,
2727                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2728                                                   NULL,
2729                                                   "unsupported action QUEUE");
2730                 if (action_flags & MLX5_FLOW_ACTION_RSS)
2731                         return rte_flow_error_set(error, ENOTSUP,
2732                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2733                                                   NULL,
2734                                                   "unsupported action RSS");
2735                 if (!(action_flags & MLX5_FLOW_FATE_ESWITCH_ACTIONS))
2736                         return rte_flow_error_set(error, EINVAL,
2737                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2738                                                   actions,
2739                                                   "no fate action is found");
2740         } else {
2741                 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
2742                         return rte_flow_error_set(error, EINVAL,
2743                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2744                                                   actions,
2745                                                   "no fate action is found");
2746         }
2747         return 0;
2748 }
2749
2750 /**
2751  * Internal preparation function. Allocates the DV flow size,
2752  * this size is constant.
2753  *
2754  * @param[in] attr
2755  *   Pointer to the flow attributes.
2756  * @param[in] items
2757  *   Pointer to the list of items.
2758  * @param[in] actions
2759  *   Pointer to the list of actions.
2760  * @param[out] error
2761  *   Pointer to the error structure.
2762  *
2763  * @return
2764  *   Pointer to mlx5_flow object on success,
2765  *   otherwise NULL and rte_errno is set.
2766  */
2767 static struct mlx5_flow *
2768 flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
2769                 const struct rte_flow_item items[] __rte_unused,
2770                 const struct rte_flow_action actions[] __rte_unused,
2771                 struct rte_flow_error *error)
2772 {
2773         uint32_t size = sizeof(struct mlx5_flow);
2774         struct mlx5_flow *flow;
2775
2776         flow = rte_calloc(__func__, 1, size, 0);
2777         if (!flow) {
2778                 rte_flow_error_set(error, ENOMEM,
2779                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2780                                    "not enough memory to create flow");
2781                 return NULL;
2782         }
2783         flow->dv.value.size = MLX5_ST_SZ_BYTES(fte_match_param);
2784         return flow;
2785 }
2786
2787 #ifndef NDEBUG
2788 /**
2789  * Sanity check for match mask and value. Similar to check_valid_spec() in
2790  * kernel driver. If unmasked bit is present in value, it returns failure.
2791  *
2792  * @param match_mask
2793  *   pointer to match mask buffer.
2794  * @param match_value
2795  *   pointer to match value buffer.
2796  *
2797  * @return
2798  *   0 if valid, -EINVAL otherwise.
2799  */
2800 static int
2801 flow_dv_check_valid_spec(void *match_mask, void *match_value)
2802 {
2803         uint8_t *m = match_mask;
2804         uint8_t *v = match_value;
2805         unsigned int i;
2806
2807         for (i = 0; i < MLX5_ST_SZ_BYTES(fte_match_param); ++i) {
2808                 if (v[i] & ~m[i]) {
2809                         DRV_LOG(ERR,
2810                                 "match_value differs from match_criteria"
2811                                 " %p[%u] != %p[%u]",
2812                                 match_value, i, match_mask, i);
2813                         return -EINVAL;
2814                 }
2815         }
2816         return 0;
2817 }
2818 #endif
2819
2820 /**
2821  * Add Ethernet item to matcher and to the value.
2822  *
2823  * @param[in, out] matcher
2824  *   Flow matcher.
2825  * @param[in, out] key
2826  *   Flow matcher value.
2827  * @param[in] item
2828  *   Flow pattern to translate.
2829  * @param[in] inner
2830  *   Item is inner pattern.
2831  */
2832 static void
2833 flow_dv_translate_item_eth(void *matcher, void *key,
2834                            const struct rte_flow_item *item, int inner)
2835 {
2836         const struct rte_flow_item_eth *eth_m = item->mask;
2837         const struct rte_flow_item_eth *eth_v = item->spec;
2838         const struct rte_flow_item_eth nic_mask = {
2839                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
2840                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
2841                 .type = RTE_BE16(0xffff),
2842         };
2843         void *headers_m;
2844         void *headers_v;
2845         char *l24_v;
2846         unsigned int i;
2847
2848         if (!eth_v)
2849                 return;
2850         if (!eth_m)
2851                 eth_m = &nic_mask;
2852         if (inner) {
2853                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2854                                          inner_headers);
2855                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2856         } else {
2857                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2858                                          outer_headers);
2859                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2860         }
2861         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
2862                &eth_m->dst, sizeof(eth_m->dst));
2863         /* The value must be in the range of the mask. */
2864         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
2865         for (i = 0; i < sizeof(eth_m->dst); ++i)
2866                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
2867         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
2868                &eth_m->src, sizeof(eth_m->src));
2869         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
2870         /* The value must be in the range of the mask. */
2871         for (i = 0; i < sizeof(eth_m->dst); ++i)
2872                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
2873         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
2874                  rte_be_to_cpu_16(eth_m->type));
2875         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
2876         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
2877 }
2878
2879 /**
2880  * Add VLAN item to matcher and to the value.
2881  *
2882  * @param[in, out] matcher
2883  *   Flow matcher.
2884  * @param[in, out] key
2885  *   Flow matcher value.
2886  * @param[in] item
2887  *   Flow pattern to translate.
2888  * @param[in] inner
2889  *   Item is inner pattern.
2890  */
2891 static void
2892 flow_dv_translate_item_vlan(void *matcher, void *key,
2893                             const struct rte_flow_item *item,
2894                             int inner)
2895 {
2896         const struct rte_flow_item_vlan *vlan_m = item->mask;
2897         const struct rte_flow_item_vlan *vlan_v = item->spec;
2898         const struct rte_flow_item_vlan nic_mask = {
2899                 .tci = RTE_BE16(0x0fff),
2900                 .inner_type = RTE_BE16(0xffff),
2901         };
2902         void *headers_m;
2903         void *headers_v;
2904         uint16_t tci_m;
2905         uint16_t tci_v;
2906
2907         if (!vlan_v)
2908                 return;
2909         if (!vlan_m)
2910                 vlan_m = &nic_mask;
2911         if (inner) {
2912                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2913                                          inner_headers);
2914                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2915         } else {
2916                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2917                                          outer_headers);
2918                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2919         }
2920         tci_m = rte_be_to_cpu_16(vlan_m->tci);
2921         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
2922         MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
2923         MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
2924         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
2925         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
2926         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
2927         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
2928         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
2929         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
2930 }
2931
2932 /**
2933  * Add IPV4 item to matcher and to the value.
2934  *
2935  * @param[in, out] matcher
2936  *   Flow matcher.
2937  * @param[in, out] key
2938  *   Flow matcher value.
2939  * @param[in] item
2940  *   Flow pattern to translate.
2941  * @param[in] inner
2942  *   Item is inner pattern.
2943  * @param[in] group
2944  *   The group to insert the rule.
2945  */
2946 static void
2947 flow_dv_translate_item_ipv4(void *matcher, void *key,
2948                             const struct rte_flow_item *item,
2949                             int inner, uint32_t group)
2950 {
2951         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
2952         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
2953         const struct rte_flow_item_ipv4 nic_mask = {
2954                 .hdr = {
2955                         .src_addr = RTE_BE32(0xffffffff),
2956                         .dst_addr = RTE_BE32(0xffffffff),
2957                         .type_of_service = 0xff,
2958                         .next_proto_id = 0xff,
2959                 },
2960         };
2961         void *headers_m;
2962         void *headers_v;
2963         char *l24_m;
2964         char *l24_v;
2965         uint8_t tos;
2966
2967         if (inner) {
2968                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2969                                          inner_headers);
2970                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2971         } else {
2972                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2973                                          outer_headers);
2974                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2975         }
2976         if (group == 0)
2977                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
2978         else
2979                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x4);
2980         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
2981         if (!ipv4_v)
2982                 return;
2983         if (!ipv4_m)
2984                 ipv4_m = &nic_mask;
2985         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2986                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
2987         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2988                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
2989         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
2990         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
2991         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2992                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
2993         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2994                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
2995         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
2996         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
2997         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
2998         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
2999                  ipv4_m->hdr.type_of_service);
3000         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
3001         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
3002                  ipv4_m->hdr.type_of_service >> 2);
3003         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
3004         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
3005                  ipv4_m->hdr.next_proto_id);
3006         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
3007                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
3008 }
3009
3010 /**
3011  * Add IPV6 item to matcher and to the value.
3012  *
3013  * @param[in, out] matcher
3014  *   Flow matcher.
3015  * @param[in, out] key
3016  *   Flow matcher value.
3017  * @param[in] item
3018  *   Flow pattern to translate.
3019  * @param[in] inner
3020  *   Item is inner pattern.
3021  * @param[in] group
3022  *   The group to insert the rule.
3023  */
3024 static void
3025 flow_dv_translate_item_ipv6(void *matcher, void *key,
3026                             const struct rte_flow_item *item,
3027                             int inner, uint32_t group)
3028 {
3029         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
3030         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
3031         const struct rte_flow_item_ipv6 nic_mask = {
3032                 .hdr = {
3033                         .src_addr =
3034                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
3035                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
3036                         .dst_addr =
3037                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
3038                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
3039                         .vtc_flow = RTE_BE32(0xffffffff),
3040                         .proto = 0xff,
3041                         .hop_limits = 0xff,
3042                 },
3043         };
3044         void *headers_m;
3045         void *headers_v;
3046         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3047         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3048         char *l24_m;
3049         char *l24_v;
3050         uint32_t vtc_m;
3051         uint32_t vtc_v;
3052         int i;
3053         int size;
3054
3055         if (inner) {
3056                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3057                                          inner_headers);
3058                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3059         } else {
3060                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3061                                          outer_headers);
3062                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3063         }
3064         if (group == 0)
3065                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
3066         else
3067                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x6);
3068         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
3069         if (!ipv6_v)
3070                 return;
3071         if (!ipv6_m)
3072                 ipv6_m = &nic_mask;
3073         size = sizeof(ipv6_m->hdr.dst_addr);
3074         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
3075                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
3076         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
3077                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
3078         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
3079         for (i = 0; i < size; ++i)
3080                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
3081         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
3082                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
3083         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
3084                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
3085         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
3086         for (i = 0; i < size; ++i)
3087                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
3088         /* TOS. */
3089         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
3090         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
3091         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
3092         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
3093         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
3094         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
3095         /* Label. */
3096         if (inner) {
3097                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
3098                          vtc_m);
3099                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
3100                          vtc_v);
3101         } else {
3102                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
3103                          vtc_m);
3104                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
3105                          vtc_v);
3106         }
3107         /* Protocol. */
3108         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
3109                  ipv6_m->hdr.proto);
3110         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
3111                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
3112 }
3113
3114 /**
3115  * Add TCP item to matcher and to the value.
3116  *
3117  * @param[in, out] matcher
3118  *   Flow matcher.
3119  * @param[in, out] key
3120  *   Flow matcher value.
3121  * @param[in] item
3122  *   Flow pattern to translate.
3123  * @param[in] inner
3124  *   Item is inner pattern.
3125  */
3126 static void
3127 flow_dv_translate_item_tcp(void *matcher, void *key,
3128                            const struct rte_flow_item *item,
3129                            int inner)
3130 {
3131         const struct rte_flow_item_tcp *tcp_m = item->mask;
3132         const struct rte_flow_item_tcp *tcp_v = item->spec;
3133         void *headers_m;
3134         void *headers_v;
3135
3136         if (inner) {
3137                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3138                                          inner_headers);
3139                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3140         } else {
3141                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3142                                          outer_headers);
3143                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3144         }
3145         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
3146         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
3147         if (!tcp_v)
3148                 return;
3149         if (!tcp_m)
3150                 tcp_m = &rte_flow_item_tcp_mask;
3151         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
3152                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
3153         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
3154                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
3155         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
3156                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
3157         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
3158                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
3159         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_flags,
3160                  tcp_m->hdr.tcp_flags);
3161         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_flags,
3162                  (tcp_v->hdr.tcp_flags & tcp_m->hdr.tcp_flags));
3163 }
3164
3165 /**
3166  * Add UDP item to matcher and to the value.
3167  *
3168  * @param[in, out] matcher
3169  *   Flow matcher.
3170  * @param[in, out] key
3171  *   Flow matcher value.
3172  * @param[in] item
3173  *   Flow pattern to translate.
3174  * @param[in] inner
3175  *   Item is inner pattern.
3176  */
3177 static void
3178 flow_dv_translate_item_udp(void *matcher, void *key,
3179                            const struct rte_flow_item *item,
3180                            int inner)
3181 {
3182         const struct rte_flow_item_udp *udp_m = item->mask;
3183         const struct rte_flow_item_udp *udp_v = item->spec;
3184         void *headers_m;
3185         void *headers_v;
3186
3187         if (inner) {
3188                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3189                                          inner_headers);
3190                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3191         } else {
3192                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3193                                          outer_headers);
3194                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3195         }
3196         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
3197         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
3198         if (!udp_v)
3199                 return;
3200         if (!udp_m)
3201                 udp_m = &rte_flow_item_udp_mask;
3202         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
3203                  rte_be_to_cpu_16(udp_m->hdr.src_port));
3204         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
3205                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
3206         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
3207                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
3208         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
3209                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
3210 }
3211
3212 /**
3213  * Add GRE optional Key item to matcher and to the value.
3214  *
3215  * @param[in, out] matcher
3216  *   Flow matcher.
3217  * @param[in, out] key
3218  *   Flow matcher value.
3219  * @param[in] item
3220  *   Flow pattern to translate.
3221  * @param[in] inner
3222  *   Item is inner pattern.
3223  */
3224 static void
3225 flow_dv_translate_item_gre_key(void *matcher, void *key,
3226                                    const struct rte_flow_item *item)
3227 {
3228         const rte_be32_t *key_m = item->mask;
3229         const rte_be32_t *key_v = item->spec;
3230         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3231         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3232         rte_be32_t gre_key_default_mask = RTE_BE32(UINT32_MAX);
3233
3234         if (!key_v)
3235                 return;
3236         if (!key_m)
3237                 key_m = &gre_key_default_mask;
3238         /* GRE K bit must be on and should already be validated */
3239         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present, 1);
3240         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present, 1);
3241         MLX5_SET(fte_match_set_misc, misc_m, gre_key_h,
3242                  rte_be_to_cpu_32(*key_m) >> 8);
3243         MLX5_SET(fte_match_set_misc, misc_v, gre_key_h,
3244                  rte_be_to_cpu_32((*key_v) & (*key_m)) >> 8);
3245         MLX5_SET(fte_match_set_misc, misc_m, gre_key_l,
3246                  rte_be_to_cpu_32(*key_m) & 0xFF);
3247         MLX5_SET(fte_match_set_misc, misc_v, gre_key_l,
3248                  rte_be_to_cpu_32((*key_v) & (*key_m)) & 0xFF);
3249 }
3250
3251 /**
3252  * Add GRE item to matcher and to the value.
3253  *
3254  * @param[in, out] matcher
3255  *   Flow matcher.
3256  * @param[in, out] key
3257  *   Flow matcher value.
3258  * @param[in] item
3259  *   Flow pattern to translate.
3260  * @param[in] inner
3261  *   Item is inner pattern.
3262  */
3263 static void
3264 flow_dv_translate_item_gre(void *matcher, void *key,
3265                            const struct rte_flow_item *item,
3266                            int inner)
3267 {
3268         const struct rte_flow_item_gre *gre_m = item->mask;
3269         const struct rte_flow_item_gre *gre_v = item->spec;
3270         void *headers_m;
3271         void *headers_v;
3272         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3273         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3274         struct {
3275                 union {
3276                         __extension__
3277                         struct {
3278                                 uint16_t version:3;
3279                                 uint16_t rsvd0:9;
3280                                 uint16_t s_present:1;
3281                                 uint16_t k_present:1;
3282                                 uint16_t rsvd_bit1:1;
3283                                 uint16_t c_present:1;
3284                         };
3285                         uint16_t value;
3286                 };
3287         } gre_crks_rsvd0_ver_m, gre_crks_rsvd0_ver_v;
3288
3289         if (inner) {
3290                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3291                                          inner_headers);
3292                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3293         } else {
3294                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3295                                          outer_headers);
3296                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3297         }
3298         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
3299         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
3300         if (!gre_v)
3301                 return;
3302         if (!gre_m)
3303                 gre_m = &rte_flow_item_gre_mask;
3304         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
3305                  rte_be_to_cpu_16(gre_m->protocol));
3306         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
3307                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
3308         gre_crks_rsvd0_ver_m.value = rte_be_to_cpu_16(gre_m->c_rsvd0_ver);
3309         gre_crks_rsvd0_ver_v.value = rte_be_to_cpu_16(gre_v->c_rsvd0_ver);
3310         MLX5_SET(fte_match_set_misc, misc_m, gre_c_present,
3311                  gre_crks_rsvd0_ver_m.c_present);
3312         MLX5_SET(fte_match_set_misc, misc_v, gre_c_present,
3313                  gre_crks_rsvd0_ver_v.c_present &
3314                  gre_crks_rsvd0_ver_m.c_present);
3315         MLX5_SET(fte_match_set_misc, misc_m, gre_k_present,
3316                  gre_crks_rsvd0_ver_m.k_present);
3317         MLX5_SET(fte_match_set_misc, misc_v, gre_k_present,
3318                  gre_crks_rsvd0_ver_v.k_present &
3319                  gre_crks_rsvd0_ver_m.k_present);
3320         MLX5_SET(fte_match_set_misc, misc_m, gre_s_present,
3321                  gre_crks_rsvd0_ver_m.s_present);
3322         MLX5_SET(fte_match_set_misc, misc_v, gre_s_present,
3323                  gre_crks_rsvd0_ver_v.s_present &
3324                  gre_crks_rsvd0_ver_m.s_present);
3325 }
3326
3327 /**
3328  * Add NVGRE item to matcher and to the value.
3329  *
3330  * @param[in, out] matcher
3331  *   Flow matcher.
3332  * @param[in, out] key
3333  *   Flow matcher value.
3334  * @param[in] item
3335  *   Flow pattern to translate.
3336  * @param[in] inner
3337  *   Item is inner pattern.
3338  */
3339 static void
3340 flow_dv_translate_item_nvgre(void *matcher, void *key,
3341                              const struct rte_flow_item *item,
3342                              int inner)
3343 {
3344         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
3345         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
3346         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3347         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3348         const char *tni_flow_id_m = (const char *)nvgre_m->tni;
3349         const char *tni_flow_id_v = (const char *)nvgre_v->tni;
3350         char *gre_key_m;
3351         char *gre_key_v;
3352         int size;
3353         int i;
3354
3355         flow_dv_translate_item_gre(matcher, key, item, inner);
3356         if (!nvgre_v)
3357                 return;
3358         if (!nvgre_m)
3359                 nvgre_m = &rte_flow_item_nvgre_mask;
3360         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
3361         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
3362         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
3363         memcpy(gre_key_m, tni_flow_id_m, size);
3364         for (i = 0; i < size; ++i)
3365                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
3366 }
3367
3368 /**
3369  * Add VXLAN item to matcher and to the value.
3370  *
3371  * @param[in, out] matcher
3372  *   Flow matcher.
3373  * @param[in, out] key
3374  *   Flow matcher value.
3375  * @param[in] item
3376  *   Flow pattern to translate.
3377  * @param[in] inner
3378  *   Item is inner pattern.
3379  */
3380 static void
3381 flow_dv_translate_item_vxlan(void *matcher, void *key,
3382                              const struct rte_flow_item *item,
3383                              int inner)
3384 {
3385         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
3386         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
3387         void *headers_m;
3388         void *headers_v;
3389         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3390         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3391         char *vni_m;
3392         char *vni_v;
3393         uint16_t dport;
3394         int size;
3395         int i;
3396
3397         if (inner) {
3398                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3399                                          inner_headers);
3400                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3401         } else {
3402                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3403                                          outer_headers);
3404                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3405         }
3406         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
3407                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
3408         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
3409                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
3410                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
3411         }
3412         if (!vxlan_v)
3413                 return;
3414         if (!vxlan_m)
3415                 vxlan_m = &rte_flow_item_vxlan_mask;
3416         size = sizeof(vxlan_m->vni);
3417         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
3418         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
3419         memcpy(vni_m, vxlan_m->vni, size);
3420         for (i = 0; i < size; ++i)
3421                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
3422 }
3423
3424 /**
3425  * Add MPLS item to matcher and to the value.
3426  *
3427  * @param[in, out] matcher
3428  *   Flow matcher.
3429  * @param[in, out] key
3430  *   Flow matcher value.
3431  * @param[in] item
3432  *   Flow pattern to translate.
3433  * @param[in] prev_layer
3434  *   The protocol layer indicated in previous item.
3435  * @param[in] inner
3436  *   Item is inner pattern.
3437  */
3438 static void
3439 flow_dv_translate_item_mpls(void *matcher, void *key,
3440                             const struct rte_flow_item *item,
3441                             uint64_t prev_layer,
3442                             int inner)
3443 {
3444         const uint32_t *in_mpls_m = item->mask;
3445         const uint32_t *in_mpls_v = item->spec;
3446         uint32_t *out_mpls_m = 0;
3447         uint32_t *out_mpls_v = 0;
3448         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3449         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3450         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
3451                                      misc_parameters_2);
3452         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
3453         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
3454         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3455
3456         switch (prev_layer) {
3457         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
3458                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
3459                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
3460                          MLX5_UDP_PORT_MPLS);
3461                 break;
3462         case MLX5_FLOW_LAYER_GRE:
3463                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
3464                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
3465                          RTE_ETHER_TYPE_MPLS);
3466                 break;
3467         default:
3468                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
3469                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
3470                          IPPROTO_MPLS);
3471                 break;
3472         }
3473         if (!in_mpls_v)
3474                 return;
3475         if (!in_mpls_m)
3476                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
3477         switch (prev_layer) {
3478         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
3479                 out_mpls_m =
3480                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
3481                                                  outer_first_mpls_over_udp);
3482                 out_mpls_v =
3483                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
3484                                                  outer_first_mpls_over_udp);
3485                 break;
3486         case MLX5_FLOW_LAYER_GRE:
3487                 out_mpls_m =
3488                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
3489                                                  outer_first_mpls_over_gre);
3490                 out_mpls_v =
3491                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
3492                                                  outer_first_mpls_over_gre);
3493                 break;
3494         default:
3495                 /* Inner MPLS not over GRE is not supported. */
3496                 if (!inner) {
3497                         out_mpls_m =
3498                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
3499                                                          misc2_m,
3500                                                          outer_first_mpls);
3501                         out_mpls_v =
3502                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
3503                                                          misc2_v,
3504                                                          outer_first_mpls);
3505                 }
3506                 break;
3507         }
3508         if (out_mpls_m && out_mpls_v) {
3509                 *out_mpls_m = *in_mpls_m;
3510                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
3511         }
3512 }
3513
3514 /**
3515  * Add META item to matcher
3516  *
3517  * @param[in, out] matcher
3518  *   Flow matcher.
3519  * @param[in, out] key
3520  *   Flow matcher value.
3521  * @param[in] item
3522  *   Flow pattern to translate.
3523  * @param[in] inner
3524  *   Item is inner pattern.
3525  */
3526 static void
3527 flow_dv_translate_item_meta(void *matcher, void *key,
3528                             const struct rte_flow_item *item)
3529 {
3530         const struct rte_flow_item_meta *meta_m;
3531         const struct rte_flow_item_meta *meta_v;
3532         void *misc2_m =
3533                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
3534         void *misc2_v =
3535                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
3536
3537         meta_m = (const void *)item->mask;
3538         if (!meta_m)
3539                 meta_m = &rte_flow_item_meta_mask;
3540         meta_v = (const void *)item->spec;
3541         if (meta_v) {
3542                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a,
3543                          rte_be_to_cpu_32(meta_m->data));
3544                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a,
3545                          rte_be_to_cpu_32(meta_v->data & meta_m->data));
3546         }
3547 }
3548
3549 /**
3550  * Add source vport match to the specified matcher.
3551  *
3552  * @param[in, out] matcher
3553  *   Flow matcher.
3554  * @param[in, out] key
3555  *   Flow matcher value.
3556  * @param[in] port
3557  *   Source vport value to match
3558  * @param[in] mask
3559  *   Mask
3560  */
3561 static void
3562 flow_dv_translate_item_source_vport(void *matcher, void *key,
3563                                     int16_t port, uint16_t mask)
3564 {
3565         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3566         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3567
3568         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
3569         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
3570 }
3571
3572 /**
3573  * Translate port-id item to eswitch match on  port-id.
3574  *
3575  * @param[in] dev
3576  *   The devich to configure through.
3577  * @param[in, out] matcher
3578  *   Flow matcher.
3579  * @param[in, out] key
3580  *   Flow matcher value.
3581  * @param[in] item
3582  *   Flow pattern to translate.
3583  *
3584  * @return
3585  *   0 on success, a negative errno value otherwise.
3586  */
3587 static int
3588 flow_dv_translate_item_port_id(struct rte_eth_dev *dev, void *matcher,
3589                                void *key, const struct rte_flow_item *item)
3590 {
3591         const struct rte_flow_item_port_id *pid_m = item ? item->mask : NULL;
3592         const struct rte_flow_item_port_id *pid_v = item ? item->spec : NULL;
3593         uint16_t mask, val, id;
3594         int ret;
3595
3596         mask = pid_m ? pid_m->id : 0xffff;
3597         id = pid_v ? pid_v->id : dev->data->port_id;
3598         ret = mlx5_port_to_eswitch_info(id, NULL, &val);
3599         if (ret)
3600                 return ret;
3601         flow_dv_translate_item_source_vport(matcher, key, val, mask);
3602         return 0;
3603 }
3604
3605 /**
3606  * Add ICMP6 item to matcher and to the value.
3607  *
3608  * @param[in, out] matcher
3609  *   Flow matcher.
3610  * @param[in, out] key
3611  *   Flow matcher value.
3612  * @param[in] item
3613  *   Flow pattern to translate.
3614  * @param[in] inner
3615  *   Item is inner pattern.
3616  */
3617 static void
3618 flow_dv_translate_item_icmp6(void *matcher, void *key,
3619                               const struct rte_flow_item *item,
3620                               int inner)
3621 {
3622         const struct rte_flow_item_icmp6 *icmp6_m = item->mask;
3623         const struct rte_flow_item_icmp6 *icmp6_v = item->spec;
3624         void *headers_m;
3625         void *headers_v;
3626         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
3627                                      misc_parameters_3);
3628         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
3629         if (inner) {
3630                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3631                                          inner_headers);
3632                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3633         } else {
3634                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3635                                          outer_headers);
3636                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3637         }
3638         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
3639         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMPV6);
3640         if (!icmp6_v)
3641                 return;
3642         if (!icmp6_m)
3643                 icmp6_m = &rte_flow_item_icmp6_mask;
3644         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_type, icmp6_m->type);
3645         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_type,
3646                  icmp6_v->type & icmp6_m->type);
3647         MLX5_SET(fte_match_set_misc3, misc3_m, icmpv6_code, icmp6_m->code);
3648         MLX5_SET(fte_match_set_misc3, misc3_v, icmpv6_code,
3649                  icmp6_v->code & icmp6_m->code);
3650 }
3651
3652 /**
3653  * Add ICMP item to matcher and to the value.
3654  *
3655  * @param[in, out] matcher
3656  *   Flow matcher.
3657  * @param[in, out] key
3658  *   Flow matcher value.
3659  * @param[in] item
3660  *   Flow pattern to translate.
3661  * @param[in] inner
3662  *   Item is inner pattern.
3663  */
3664 static void
3665 flow_dv_translate_item_icmp(void *matcher, void *key,
3666                             const struct rte_flow_item *item,
3667                             int inner)
3668 {
3669         const struct rte_flow_item_icmp *icmp_m = item->mask;
3670         const struct rte_flow_item_icmp *icmp_v = item->spec;
3671         void *headers_m;
3672         void *headers_v;
3673         void *misc3_m = MLX5_ADDR_OF(fte_match_param, matcher,
3674                                      misc_parameters_3);
3675         void *misc3_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_3);
3676         if (inner) {
3677                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3678                                          inner_headers);
3679                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
3680         } else {
3681                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
3682                                          outer_headers);
3683                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
3684         }
3685         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xFF);
3686         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_ICMP);
3687         if (!icmp_v)
3688                 return;
3689         if (!icmp_m)
3690                 icmp_m = &rte_flow_item_icmp_mask;
3691         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_type,
3692                  icmp_m->hdr.icmp_type);
3693         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_type,
3694                  icmp_v->hdr.icmp_type & icmp_m->hdr.icmp_type);
3695         MLX5_SET(fte_match_set_misc3, misc3_m, icmp_code,
3696                  icmp_m->hdr.icmp_code);
3697         MLX5_SET(fte_match_set_misc3, misc3_v, icmp_code,
3698                  icmp_v->hdr.icmp_code & icmp_m->hdr.icmp_code);
3699 }
3700
3701 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
3702
3703 #define HEADER_IS_ZERO(match_criteria, headers)                              \
3704         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
3705                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
3706
3707 /**
3708  * Calculate flow matcher enable bitmap.
3709  *
3710  * @param match_criteria
3711  *   Pointer to flow matcher criteria.
3712  *
3713  * @return
3714  *   Bitmap of enabled fields.
3715  */
3716 static uint8_t
3717 flow_dv_matcher_enable(uint32_t *match_criteria)
3718 {
3719         uint8_t match_criteria_enable;
3720
3721         match_criteria_enable =
3722                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
3723                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
3724         match_criteria_enable |=
3725                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
3726                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
3727         match_criteria_enable |=
3728                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
3729                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
3730         match_criteria_enable |=
3731                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
3732                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
3733 #ifdef HAVE_MLX5DV_DR
3734         match_criteria_enable |=
3735                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
3736                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
3737 #endif
3738         return match_criteria_enable;
3739 }
3740
3741
3742 /**
3743  * Get a flow table.
3744  *
3745  * @param dev[in, out]
3746  *   Pointer to rte_eth_dev structure.
3747  * @param[in] table_id
3748  *   Table id to use.
3749  * @param[in] egress
3750  *   Direction of the table.
3751  * @param[in] transfer
3752  *   E-Switch or NIC flow.
3753  * @param[out] error
3754  *   pointer to error structure.
3755  *
3756  * @return
3757  *   Returns tables resource based on the index, NULL in case of failed.
3758  */
3759 static struct mlx5_flow_tbl_resource *
3760 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
3761                          uint32_t table_id, uint8_t egress,
3762                          uint8_t transfer,
3763                          struct rte_flow_error *error)
3764 {
3765         struct mlx5_priv *priv = dev->data->dev_private;
3766         struct mlx5_ibv_shared *sh = priv->sh;
3767         struct mlx5_flow_tbl_resource *tbl;
3768
3769 #ifdef HAVE_MLX5DV_DR
3770         if (transfer) {
3771                 tbl = &sh->fdb_tbl[table_id];
3772                 if (!tbl->obj)
3773                         tbl->obj = mlx5_glue->dr_create_flow_tbl
3774                                 (sh->fdb_domain, table_id);
3775         } else if (egress) {
3776                 tbl = &sh->tx_tbl[table_id];
3777                 if (!tbl->obj)
3778                         tbl->obj = mlx5_glue->dr_create_flow_tbl
3779                                 (sh->tx_domain, table_id);
3780         } else {
3781                 tbl = &sh->rx_tbl[table_id];
3782                 if (!tbl->obj)
3783                         tbl->obj = mlx5_glue->dr_create_flow_tbl
3784                                 (sh->rx_domain, table_id);
3785         }
3786         if (!tbl->obj) {
3787                 rte_flow_error_set(error, ENOMEM,
3788                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3789                                    NULL, "cannot create table");
3790                 return NULL;
3791         }
3792         rte_atomic32_inc(&tbl->refcnt);
3793         return tbl;
3794 #else
3795         (void)error;
3796         (void)tbl;
3797         if (transfer)
3798                 return &sh->fdb_tbl[table_id];
3799         else if (egress)
3800                 return &sh->tx_tbl[table_id];
3801         else
3802                 return &sh->rx_tbl[table_id];
3803 #endif
3804 }
3805
3806 /**
3807  * Release a flow table.
3808  *
3809  * @param[in] tbl
3810  *   Table resource to be released.
3811  *
3812  * @return
3813  *   Returns 0 if table was released, else return 1;
3814  */
3815 static int
3816 flow_dv_tbl_resource_release(struct mlx5_flow_tbl_resource *tbl)
3817 {
3818         if (!tbl)
3819                 return 0;
3820         if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
3821                 mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
3822                 tbl->obj = NULL;
3823                 return 0;
3824         }
3825         return 1;
3826 }
3827
3828 /**
3829  * Register the flow matcher.
3830  *
3831  * @param dev[in, out]
3832  *   Pointer to rte_eth_dev structure.
3833  * @param[in, out] matcher
3834  *   Pointer to flow matcher.
3835  * @parm[in, out] dev_flow
3836  *   Pointer to the dev_flow.
3837  * @param[out] error
3838  *   pointer to error structure.
3839  *
3840  * @return
3841  *   0 on success otherwise -errno and errno is set.
3842  */
3843 static int
3844 flow_dv_matcher_register(struct rte_eth_dev *dev,
3845                          struct mlx5_flow_dv_matcher *matcher,
3846                          struct mlx5_flow *dev_flow,
3847                          struct rte_flow_error *error)
3848 {
3849         struct mlx5_priv *priv = dev->data->dev_private;
3850         struct mlx5_ibv_shared *sh = priv->sh;
3851         struct mlx5_flow_dv_matcher *cache_matcher;
3852         struct mlx5dv_flow_matcher_attr dv_attr = {
3853                 .type = IBV_FLOW_ATTR_NORMAL,
3854                 .match_mask = (void *)&matcher->mask,
3855         };
3856         struct mlx5_flow_tbl_resource *tbl = NULL;
3857
3858         /* Lookup from cache. */
3859         LIST_FOREACH(cache_matcher, &sh->matchers, next) {
3860                 if (matcher->crc == cache_matcher->crc &&
3861                     matcher->priority == cache_matcher->priority &&
3862                     matcher->egress == cache_matcher->egress &&
3863                     matcher->group == cache_matcher->group &&
3864                     matcher->transfer == cache_matcher->transfer &&
3865                     !memcmp((const void *)matcher->mask.buf,
3866                             (const void *)cache_matcher->mask.buf,
3867                             cache_matcher->mask.size)) {
3868                         DRV_LOG(DEBUG,
3869                                 "priority %hd use %s matcher %p: refcnt %d++",
3870                                 cache_matcher->priority,
3871                                 cache_matcher->egress ? "tx" : "rx",
3872                                 (void *)cache_matcher,
3873                                 rte_atomic32_read(&cache_matcher->refcnt));
3874                         rte_atomic32_inc(&cache_matcher->refcnt);
3875                         dev_flow->dv.matcher = cache_matcher;
3876                         return 0;
3877                 }
3878         }
3879         /* Register new matcher. */
3880         cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
3881         if (!cache_matcher)
3882                 return rte_flow_error_set(error, ENOMEM,
3883                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3884                                           "cannot allocate matcher memory");
3885         tbl = flow_dv_tbl_resource_get(dev, matcher->group * MLX5_GROUP_FACTOR,
3886                                        matcher->egress, matcher->transfer,
3887                                        error);
3888         if (!tbl) {
3889                 rte_free(cache_matcher);
3890                 return rte_flow_error_set(error, ENOMEM,
3891                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3892                                           NULL, "cannot create table");
3893         }
3894         *cache_matcher = *matcher;
3895         dv_attr.match_criteria_enable =
3896                 flow_dv_matcher_enable(cache_matcher->mask.buf);
3897         dv_attr.priority = matcher->priority;
3898         if (matcher->egress)
3899                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
3900         cache_matcher->matcher_object =
3901                 mlx5_glue->dv_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj);
3902         if (!cache_matcher->matcher_object) {
3903                 rte_free(cache_matcher);
3904 #ifdef HAVE_MLX5DV_DR
3905                 flow_dv_tbl_resource_release(tbl);
3906 #endif
3907                 return rte_flow_error_set(error, ENOMEM,
3908                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3909                                           NULL, "cannot create matcher");
3910         }
3911         rte_atomic32_inc(&cache_matcher->refcnt);
3912         LIST_INSERT_HEAD(&sh->matchers, cache_matcher, next);
3913         dev_flow->dv.matcher = cache_matcher;
3914         DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
3915                 cache_matcher->priority,
3916                 cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
3917                 rte_atomic32_read(&cache_matcher->refcnt));
3918         rte_atomic32_inc(&tbl->refcnt);
3919         return 0;
3920 }
3921
3922 /**
3923  * Find existing tag resource or create and register a new one.
3924  *
3925  * @param dev[in, out]
3926  *   Pointer to rte_eth_dev structure.
3927  * @param[in, out] resource
3928  *   Pointer to tag resource.
3929  * @parm[in, out] dev_flow
3930  *   Pointer to the dev_flow.
3931  * @param[out] error
3932  *   pointer to error structure.
3933  *
3934  * @return
3935  *   0 on success otherwise -errno and errno is set.
3936  */
3937 static int
3938 flow_dv_tag_resource_register
3939                         (struct rte_eth_dev *dev,
3940                          struct mlx5_flow_dv_tag_resource *resource,
3941                          struct mlx5_flow *dev_flow,
3942                          struct rte_flow_error *error)
3943 {
3944         struct mlx5_priv *priv = dev->data->dev_private;
3945         struct mlx5_ibv_shared *sh = priv->sh;
3946         struct mlx5_flow_dv_tag_resource *cache_resource;
3947
3948         /* Lookup a matching resource from cache. */
3949         LIST_FOREACH(cache_resource, &sh->tags, next) {
3950                 if (resource->tag == cache_resource->tag) {
3951                         DRV_LOG(DEBUG, "tag resource %p: refcnt %d++",
3952                                 (void *)cache_resource,
3953                                 rte_atomic32_read(&cache_resource->refcnt));
3954                         rte_atomic32_inc(&cache_resource->refcnt);
3955                         dev_flow->flow->tag_resource = cache_resource;
3956                         return 0;
3957                 }
3958         }
3959         /* Register new  resource. */
3960         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
3961         if (!cache_resource)
3962                 return rte_flow_error_set(error, ENOMEM,
3963                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3964                                           "cannot allocate resource memory");
3965         *cache_resource = *resource;
3966         cache_resource->action = mlx5_glue->dv_create_flow_action_tag
3967                 (resource->tag);
3968         if (!cache_resource->action) {
3969                 rte_free(cache_resource);
3970                 return rte_flow_error_set(error, ENOMEM,
3971                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3972                                           NULL, "cannot create action");
3973         }
3974         rte_atomic32_init(&cache_resource->refcnt);
3975         rte_atomic32_inc(&cache_resource->refcnt);
3976         LIST_INSERT_HEAD(&sh->tags, cache_resource, next);
3977         dev_flow->flow->tag_resource = cache_resource;
3978         DRV_LOG(DEBUG, "new tag resource %p: refcnt %d++",
3979                 (void *)cache_resource,
3980                 rte_atomic32_read(&cache_resource->refcnt));
3981         return 0;
3982 }
3983
3984 /**
3985  * Release the tag.
3986  *
3987  * @param dev
3988  *   Pointer to Ethernet device.
3989  * @param flow
3990  *   Pointer to mlx5_flow.
3991  *
3992  * @return
3993  *   1 while a reference on it exists, 0 when freed.
3994  */
3995 static int
3996 flow_dv_tag_release(struct rte_eth_dev *dev,
3997                     struct mlx5_flow_dv_tag_resource *tag)
3998 {
3999         assert(tag);
4000         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
4001                 dev->data->port_id, (void *)tag,
4002                 rte_atomic32_read(&tag->refcnt));
4003         if (rte_atomic32_dec_and_test(&tag->refcnt)) {
4004                 claim_zero(mlx5_glue->destroy_flow_action(tag->action));
4005                 LIST_REMOVE(tag, next);
4006                 DRV_LOG(DEBUG, "port %u tag %p: removed",
4007                         dev->data->port_id, (void *)tag);
4008                 rte_free(tag);
4009                 return 0;
4010         }
4011         return 1;
4012 }
4013
4014 /**
4015  * Translate port ID action to vport.
4016  *
4017  * @param[in] dev
4018  *   Pointer to rte_eth_dev structure.
4019  * @param[in] action
4020  *   Pointer to the port ID action.
4021  * @param[out] dst_port_id
4022  *   The target port ID.
4023  * @param[out] error
4024  *   Pointer to the error structure.
4025  *
4026  * @return
4027  *   0 on success, a negative errno value otherwise and rte_errno is set.
4028  */
4029 static int
4030 flow_dv_translate_action_port_id(struct rte_eth_dev *dev,
4031                                  const struct rte_flow_action *action,
4032                                  uint32_t *dst_port_id,
4033                                  struct rte_flow_error *error)
4034 {
4035         uint32_t port;
4036         uint16_t port_id;
4037         int ret;
4038         const struct rte_flow_action_port_id *conf =
4039                         (const struct rte_flow_action_port_id *)action->conf;
4040
4041         port = conf->original ? dev->data->port_id : conf->id;
4042         ret = mlx5_port_to_eswitch_info(port, NULL, &port_id);
4043         if (ret)
4044                 return rte_flow_error_set(error, -ret,
4045                                           RTE_FLOW_ERROR_TYPE_ACTION,
4046                                           NULL,
4047                                           "No eswitch info was found for port");
4048         *dst_port_id = port_id;
4049         return 0;
4050 }
4051
4052 /**
4053  * Fill the flow with DV spec.
4054  *
4055  * @param[in] dev
4056  *   Pointer to rte_eth_dev structure.
4057  * @param[in, out] dev_flow
4058  *   Pointer to the sub flow.
4059  * @param[in] attr
4060  *   Pointer to the flow attributes.
4061  * @param[in] items
4062  *   Pointer to the list of items.
4063  * @param[in] actions
4064  *   Pointer to the list of actions.
4065  * @param[out] error
4066  *   Pointer to the error structure.
4067  *
4068  * @return
4069  *   0 on success, a negative errno value otherwise and rte_errno is set.
4070  */
4071 static int
4072 flow_dv_translate(struct rte_eth_dev *dev,
4073                   struct mlx5_flow *dev_flow,
4074                   const struct rte_flow_attr *attr,
4075                   const struct rte_flow_item items[],
4076                   const struct rte_flow_action actions[],
4077                   struct rte_flow_error *error)
4078 {
4079         struct mlx5_priv *priv = dev->data->dev_private;
4080         struct rte_flow *flow = dev_flow->flow;
4081         uint64_t item_flags = 0;
4082         uint64_t last_item = 0;
4083         uint64_t action_flags = 0;
4084         uint64_t priority = attr->priority;
4085         struct mlx5_flow_dv_matcher matcher = {
4086                 .mask = {
4087                         .size = sizeof(matcher.mask.buf),
4088                 },
4089         };
4090         int actions_n = 0;
4091         bool actions_end = false;
4092         struct mlx5_flow_dv_modify_hdr_resource res = {
4093                 .ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
4094                                           MLX5DV_FLOW_TABLE_TYPE_NIC_RX
4095         };
4096         union flow_dv_attr flow_attr = { .attr = 0 };
4097         struct mlx5_flow_dv_tag_resource tag_resource;
4098         uint32_t modify_action_position = UINT32_MAX;
4099         void *match_mask = matcher.mask.buf;
4100         void *match_value = dev_flow->dv.value.buf;
4101
4102         flow->group = attr->group;
4103         if (attr->transfer)
4104                 res.ft_type = MLX5DV_FLOW_TABLE_TYPE_FDB;
4105         if (priority == MLX5_FLOW_PRIO_RSVD)
4106                 priority = priv->config.flow_prio - 1;
4107         for (; !actions_end ; actions++) {
4108                 const struct rte_flow_action_queue *queue;
4109                 const struct rte_flow_action_rss *rss;
4110                 const struct rte_flow_action *action = actions;
4111                 const struct rte_flow_action_count *count = action->conf;
4112                 const uint8_t *rss_key;
4113                 const struct rte_flow_action_jump *jump_data;
4114                 struct mlx5_flow_dv_jump_tbl_resource jump_tbl_resource;
4115                 struct mlx5_flow_tbl_resource *tbl;
4116                 uint32_t port_id = 0;
4117                 struct mlx5_flow_dv_port_id_action_resource port_id_resource;
4118
4119                 switch (actions->type) {
4120                 case RTE_FLOW_ACTION_TYPE_VOID:
4121                         break;
4122                 case RTE_FLOW_ACTION_TYPE_PORT_ID:
4123                         if (flow_dv_translate_action_port_id(dev, action,
4124                                                              &port_id, error))
4125                                 return -rte_errno;
4126                         port_id_resource.port_id = port_id;
4127                         if (flow_dv_port_id_action_resource_register
4128                             (dev, &port_id_resource, dev_flow, error))
4129                                 return -rte_errno;
4130                         dev_flow->dv.actions[actions_n++] =
4131                                 dev_flow->dv.port_id_action->action;
4132                         action_flags |= MLX5_FLOW_ACTION_PORT_ID;
4133                         break;
4134                 case RTE_FLOW_ACTION_TYPE_FLAG:
4135                         tag_resource.tag =
4136                                 mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
4137                         if (!flow->tag_resource)
4138                                 if (flow_dv_tag_resource_register
4139                                     (dev, &tag_resource, dev_flow, error))
4140                                         return errno;
4141                         dev_flow->dv.actions[actions_n++] =
4142                                 flow->tag_resource->action;
4143                         action_flags |= MLX5_FLOW_ACTION_FLAG;
4144                         break;
4145                 case RTE_FLOW_ACTION_TYPE_MARK:
4146                         tag_resource.tag = mlx5_flow_mark_set
4147                               (((const struct rte_flow_action_mark *)
4148                                (actions->conf))->id);
4149                         if (!flow->tag_resource)
4150                                 if (flow_dv_tag_resource_register
4151                                     (dev, &tag_resource, dev_flow, error))
4152                                         return errno;
4153                         dev_flow->dv.actions[actions_n++] =
4154                                 flow->tag_resource->action;
4155                         action_flags |= MLX5_FLOW_ACTION_MARK;
4156                         break;
4157                 case RTE_FLOW_ACTION_TYPE_DROP:
4158                         action_flags |= MLX5_FLOW_ACTION_DROP;
4159                         break;
4160                 case RTE_FLOW_ACTION_TYPE_QUEUE:
4161                         queue = actions->conf;
4162                         flow->rss.queue_num = 1;
4163                         (*flow->queue)[0] = queue->index;
4164                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
4165                         break;
4166                 case RTE_FLOW_ACTION_TYPE_RSS:
4167                         rss = actions->conf;
4168                         if (flow->queue)
4169                                 memcpy((*flow->queue), rss->queue,
4170                                        rss->queue_num * sizeof(uint16_t));
4171                         flow->rss.queue_num = rss->queue_num;
4172                         /* NULL RSS key indicates default RSS key. */
4173                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
4174                         memcpy(flow->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
4175                         /* RSS type 0 indicates default RSS type ETH_RSS_IP. */
4176                         flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
4177                         flow->rss.level = rss->level;
4178                         action_flags |= MLX5_FLOW_ACTION_RSS;
4179                         break;
4180                 case RTE_FLOW_ACTION_TYPE_COUNT:
4181                         if (!priv->config.devx) {
4182                                 rte_errno = ENOTSUP;
4183                                 goto cnt_err;
4184                         }
4185                         flow->counter = flow_dv_counter_new(dev, count->shared,
4186                                                             count->id);
4187                         if (flow->counter == NULL)
4188                                 goto cnt_err;
4189                         dev_flow->dv.actions[actions_n++] =
4190                                 flow->counter->action;
4191                         action_flags |= MLX5_FLOW_ACTION_COUNT;
4192                         break;
4193 cnt_err:
4194                         if (rte_errno == ENOTSUP)
4195                                 return rte_flow_error_set
4196                                               (error, ENOTSUP,
4197                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4198                                                NULL,
4199                                                "count action not supported");
4200                         else
4201                                 return rte_flow_error_set
4202                                                 (error, rte_errno,
4203                                                  RTE_FLOW_ERROR_TYPE_ACTION,
4204                                                  action,
4205                                                  "cannot create counter"
4206                                                   " object.");
4207                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
4208                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
4209                         if (flow_dv_create_action_l2_encap(dev, actions,
4210                                                            dev_flow,
4211                                                            attr->transfer,
4212                                                            error))
4213                                 return -rte_errno;
4214                         dev_flow->dv.actions[actions_n++] =
4215                                 dev_flow->dv.encap_decap->verbs_action;
4216                         action_flags |= actions->type ==
4217                                         RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
4218                                         MLX5_FLOW_ACTION_VXLAN_ENCAP :
4219                                         MLX5_FLOW_ACTION_NVGRE_ENCAP;
4220                         break;
4221                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
4222                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
4223                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
4224                                                            attr->transfer,
4225                                                            error))
4226                                 return -rte_errno;
4227                         dev_flow->dv.actions[actions_n++] =
4228                                 dev_flow->dv.encap_decap->verbs_action;
4229                         action_flags |= actions->type ==
4230                                         RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
4231                                         MLX5_FLOW_ACTION_VXLAN_DECAP :
4232                                         MLX5_FLOW_ACTION_NVGRE_DECAP;
4233                         break;
4234                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
4235                         /* Handle encap with preceding decap. */
4236                         if (action_flags & MLX5_FLOW_ACTION_RAW_DECAP) {
4237                                 if (flow_dv_create_action_raw_encap
4238                                         (dev, actions, dev_flow, attr, error))
4239                                         return -rte_errno;
4240                                 dev_flow->dv.actions[actions_n++] =
4241                                         dev_flow->dv.encap_decap->verbs_action;
4242                         } else {
4243                                 /* Handle encap without preceding decap. */
4244                                 if (flow_dv_create_action_l2_encap
4245                                     (dev, actions, dev_flow, attr->transfer,
4246                                      error))
4247                                         return -rte_errno;
4248                                 dev_flow->dv.actions[actions_n++] =
4249                                         dev_flow->dv.encap_decap->verbs_action;
4250                         }
4251                         action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
4252                         break;
4253                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
4254                         /* Check if this decap is followed by encap. */
4255                         for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
4256                                action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
4257                                action++) {
4258                         }
4259                         /* Handle decap only if it isn't followed by encap. */
4260                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
4261                                 if (flow_dv_create_action_l2_decap
4262                                     (dev, dev_flow, attr->transfer, error))
4263                                         return -rte_errno;
4264                                 dev_flow->dv.actions[actions_n++] =
4265                                         dev_flow->dv.encap_decap->verbs_action;
4266                         }
4267                         /* If decap is followed by encap, handle it at encap. */
4268                         action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
4269                         break;
4270                 case RTE_FLOW_ACTION_TYPE_JUMP:
4271                         jump_data = action->conf;
4272                         tbl = flow_dv_tbl_resource_get(dev, jump_data->group *
4273                                                        MLX5_GROUP_FACTOR,
4274                                                        attr->egress,
4275                                                        attr->transfer, error);
4276                         if (!tbl)
4277                                 return rte_flow_error_set
4278                                                 (error, errno,
4279                                                  RTE_FLOW_ERROR_TYPE_ACTION,
4280                                                  NULL,
4281                                                  "cannot create jump action.");
4282                         jump_tbl_resource.tbl = tbl;
4283                         if (flow_dv_jump_tbl_resource_register
4284                             (dev, &jump_tbl_resource, dev_flow, error)) {
4285                                 flow_dv_tbl_resource_release(tbl);
4286                                 return rte_flow_error_set
4287                                                 (error, errno,
4288                                                  RTE_FLOW_ERROR_TYPE_ACTION,
4289                                                  NULL,
4290                                                  "cannot create jump action.");
4291                         }
4292                         dev_flow->dv.actions[actions_n++] =
4293                                 dev_flow->dv.jump->action;
4294                         action_flags |= MLX5_FLOW_ACTION_JUMP;
4295                         break;
4296                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
4297                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
4298                         if (flow_dv_convert_action_modify_mac(&res, actions,
4299                                                               error))
4300                                 return -rte_errno;
4301                         action_flags |= actions->type ==
4302                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
4303                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
4304                                         MLX5_FLOW_ACTION_SET_MAC_DST;
4305                         break;
4306                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
4307                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
4308                         if (flow_dv_convert_action_modify_ipv4(&res, actions,
4309                                                                error))
4310                                 return -rte_errno;
4311                         action_flags |= actions->type ==
4312                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
4313                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
4314                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
4315                         break;
4316                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
4317                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
4318                         if (flow_dv_convert_action_modify_ipv6(&res, actions,
4319                                                                error))
4320                                 return -rte_errno;
4321                         action_flags |= actions->type ==
4322                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
4323                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
4324                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
4325                         break;
4326                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
4327                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
4328                         if (flow_dv_convert_action_modify_tp(&res, actions,
4329                                                              items, &flow_attr,
4330                                                              error))
4331                                 return -rte_errno;
4332                         action_flags |= actions->type ==
4333                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
4334                                         MLX5_FLOW_ACTION_SET_TP_SRC :
4335                                         MLX5_FLOW_ACTION_SET_TP_DST;
4336                         break;
4337                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
4338                         if (flow_dv_convert_action_modify_dec_ttl(&res, items,
4339                                                                   &flow_attr,
4340                                                                   error))
4341                                 return -rte_errno;
4342                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
4343                         break;
4344                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
4345                         if (flow_dv_convert_action_modify_ttl(&res, actions,
4346                                                              items, &flow_attr,
4347                                                              error))
4348                                 return -rte_errno;
4349                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
4350                         break;
4351                 case RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ:
4352                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ:
4353                         if (flow_dv_convert_action_modify_tcp_seq(&res, actions,
4354                                                                   error))
4355                                 return -rte_errno;
4356                         action_flags |= actions->type ==
4357                                         RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ ?
4358                                         MLX5_FLOW_ACTION_INC_TCP_SEQ :
4359                                         MLX5_FLOW_ACTION_DEC_TCP_SEQ;
4360                         break;
4361
4362                 case RTE_FLOW_ACTION_TYPE_INC_TCP_ACK:
4363                 case RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK:
4364                         if (flow_dv_convert_action_modify_tcp_ack(&res, actions,
4365                                                                   error))
4366                                 return -rte_errno;
4367                         action_flags |= actions->type ==
4368                                         RTE_FLOW_ACTION_TYPE_INC_TCP_ACK ?
4369                                         MLX5_FLOW_ACTION_INC_TCP_ACK :
4370                                         MLX5_FLOW_ACTION_DEC_TCP_ACK;
4371                         break;
4372                 case RTE_FLOW_ACTION_TYPE_END:
4373                         actions_end = true;
4374                         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) {
4375                                 /* create modify action if needed. */
4376                                 if (flow_dv_modify_hdr_resource_register
4377                                                                 (dev, &res,
4378                                                                  dev_flow,
4379                                                                  error))
4380                                         return -rte_errno;
4381                                 dev_flow->dv.actions[modify_action_position] =
4382                                         dev_flow->dv.modify_hdr->verbs_action;
4383                         }
4384                         break;
4385                 default:
4386                         break;
4387                 }
4388                 if ((action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) &&
4389                     modify_action_position == UINT32_MAX)
4390                         modify_action_position = actions_n++;
4391         }
4392         dev_flow->dv.actions_n = actions_n;
4393         flow->actions = action_flags;
4394         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
4395                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
4396
4397                 switch (items->type) {
4398                 case RTE_FLOW_ITEM_TYPE_PORT_ID:
4399                         flow_dv_translate_item_port_id(dev, match_mask,
4400                                                        match_value, items);
4401                         last_item = MLX5_FLOW_ITEM_PORT_ID;
4402                         break;
4403                 case RTE_FLOW_ITEM_TYPE_ETH:
4404                         flow_dv_translate_item_eth(match_mask, match_value,
4405                                                    items, tunnel);
4406                         matcher.priority = MLX5_PRIORITY_MAP_L2;
4407                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
4408                                              MLX5_FLOW_LAYER_OUTER_L2;
4409                         break;
4410                 case RTE_FLOW_ITEM_TYPE_VLAN:
4411                         flow_dv_translate_item_vlan(match_mask, match_value,
4412                                                     items, tunnel);
4413                         matcher.priority = MLX5_PRIORITY_MAP_L2;
4414                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
4415                                               MLX5_FLOW_LAYER_INNER_VLAN) :
4416                                              (MLX5_FLOW_LAYER_OUTER_L2 |
4417                                               MLX5_FLOW_LAYER_OUTER_VLAN);
4418                         break;
4419                 case RTE_FLOW_ITEM_TYPE_IPV4:
4420                         flow_dv_translate_item_ipv4(match_mask, match_value,
4421                                                     items, tunnel, attr->group);
4422                         matcher.priority = MLX5_PRIORITY_MAP_L3;
4423                         dev_flow->dv.hash_fields |=
4424                                 mlx5_flow_hashfields_adjust
4425                                         (dev_flow, tunnel,
4426                                          MLX5_IPV4_LAYER_TYPES,
4427                                          MLX5_IPV4_IBV_RX_HASH);
4428                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
4429                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
4430                         break;
4431                 case RTE_FLOW_ITEM_TYPE_IPV6:
4432                         flow_dv_translate_item_ipv6(match_mask, match_value,
4433                                                     items, tunnel, attr->group);
4434                         matcher.priority = MLX5_PRIORITY_MAP_L3;
4435                         dev_flow->dv.hash_fields |=
4436                                 mlx5_flow_hashfields_adjust
4437                                         (dev_flow, tunnel,
4438                                          MLX5_IPV6_LAYER_TYPES,
4439                                          MLX5_IPV6_IBV_RX_HASH);
4440                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
4441                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
4442                         break;
4443                 case RTE_FLOW_ITEM_TYPE_TCP:
4444                         flow_dv_translate_item_tcp(match_mask, match_value,
4445                                                    items, tunnel);
4446                         matcher.priority = MLX5_PRIORITY_MAP_L4;
4447                         dev_flow->dv.hash_fields |=
4448                                 mlx5_flow_hashfields_adjust
4449                                         (dev_flow, tunnel, ETH_RSS_TCP,
4450                                          IBV_RX_HASH_SRC_PORT_TCP |
4451                                          IBV_RX_HASH_DST_PORT_TCP);
4452                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
4453                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
4454                         break;
4455                 case RTE_FLOW_ITEM_TYPE_UDP:
4456                         flow_dv_translate_item_udp(match_mask, match_value,
4457                                                    items, tunnel);
4458                         matcher.priority = MLX5_PRIORITY_MAP_L4;
4459                         dev_flow->dv.hash_fields |=
4460                                 mlx5_flow_hashfields_adjust
4461                                         (dev_flow, tunnel, ETH_RSS_UDP,
4462                                          IBV_RX_HASH_SRC_PORT_UDP |
4463                                          IBV_RX_HASH_DST_PORT_UDP);
4464                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
4465                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
4466                         break;
4467                 case RTE_FLOW_ITEM_TYPE_GRE:
4468                         flow_dv_translate_item_gre(match_mask, match_value,
4469                                                    items, tunnel);
4470                         last_item = MLX5_FLOW_LAYER_GRE;
4471                         break;
4472                 case RTE_FLOW_ITEM_TYPE_GRE_KEY:
4473                         flow_dv_translate_item_gre_key(match_mask,
4474                                                        match_value, items);
4475                         item_flags |= MLX5_FLOW_LAYER_GRE_KEY;
4476                         break;
4477                 case RTE_FLOW_ITEM_TYPE_NVGRE:
4478                         flow_dv_translate_item_nvgre(match_mask, match_value,
4479                                                      items, tunnel);
4480                         last_item = MLX5_FLOW_LAYER_GRE;
4481                         break;
4482                 case RTE_FLOW_ITEM_TYPE_VXLAN:
4483                         flow_dv_translate_item_vxlan(match_mask, match_value,
4484                                                      items, tunnel);
4485                         last_item = MLX5_FLOW_LAYER_VXLAN;
4486                         break;
4487                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
4488                         flow_dv_translate_item_vxlan(match_mask, match_value,
4489                                                      items, tunnel);
4490                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
4491                         break;
4492                 case RTE_FLOW_ITEM_TYPE_MPLS:
4493                         flow_dv_translate_item_mpls(match_mask, match_value,
4494                                                     items, last_item, tunnel);
4495                         last_item = MLX5_FLOW_LAYER_MPLS;
4496                         break;
4497                 case RTE_FLOW_ITEM_TYPE_META:
4498                         flow_dv_translate_item_meta(match_mask, match_value,
4499                                                     items);
4500                         last_item = MLX5_FLOW_ITEM_METADATA;
4501                         break;
4502                 case RTE_FLOW_ITEM_TYPE_ICMP:
4503                         flow_dv_translate_item_icmp(match_mask, match_value,
4504                                                     items, tunnel);
4505                         item_flags |= MLX5_FLOW_LAYER_ICMP;
4506                         break;
4507                 case RTE_FLOW_ITEM_TYPE_ICMP6:
4508                         flow_dv_translate_item_icmp6(match_mask, match_value,
4509                                                       items, tunnel);
4510                         item_flags |= MLX5_FLOW_LAYER_ICMP6;
4511                         break;
4512                 default:
4513                         break;
4514                 }
4515                 item_flags |= last_item;
4516         }
4517         /*
4518          * In case of ingress traffic when E-Switch mode is enabled,
4519          * we have two cases where we need to set the source port manually.
4520          * The first one, is in case of Nic steering rule, and the second is
4521          * E-Switch rule where no port_id item was found. In both cases
4522          * the source port is set according the current port in use.
4523          */
4524         if ((attr->ingress && !(item_flags & MLX5_FLOW_ITEM_PORT_ID)) &&
4525             (priv->representor || priv->master)) {
4526                 if (flow_dv_translate_item_port_id(dev, match_mask,
4527                                                    match_value, NULL))
4528                         return -rte_errno;
4529         }
4530         assert(!flow_dv_check_valid_spec(matcher.mask.buf,
4531                                          dev_flow->dv.value.buf));
4532         dev_flow->layers = item_flags;
4533         /* Register matcher. */
4534         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
4535                                     matcher.mask.size);
4536         matcher.priority = mlx5_flow_adjust_priority(dev, priority,
4537                                                      matcher.priority);
4538         matcher.egress = attr->egress;
4539         matcher.group = attr->group;
4540         matcher.transfer = attr->transfer;
4541         if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
4542                 return -rte_errno;
4543         return 0;
4544 }
4545
4546 /**
4547  * Apply the flow to the NIC.
4548  *
4549  * @param[in] dev
4550  *   Pointer to the Ethernet device structure.
4551  * @param[in, out] flow
4552  *   Pointer to flow structure.
4553  * @param[out] error
4554  *   Pointer to error structure.
4555  *
4556  * @return
4557  *   0 on success, a negative errno value otherwise and rte_errno is set.
4558  */
4559 static int
4560 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
4561               struct rte_flow_error *error)
4562 {
4563         struct mlx5_flow_dv *dv;
4564         struct mlx5_flow *dev_flow;
4565         struct mlx5_priv *priv = dev->data->dev_private;
4566         int n;
4567         int err;
4568
4569         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
4570                 dv = &dev_flow->dv;
4571                 n = dv->actions_n;
4572                 if (flow->actions & MLX5_FLOW_ACTION_DROP) {
4573                         if (flow->transfer) {
4574                                 dv->actions[n++] = priv->sh->esw_drop_action;
4575                         } else {
4576                                 dv->hrxq = mlx5_hrxq_drop_new(dev);
4577                                 if (!dv->hrxq) {
4578                                         rte_flow_error_set
4579                                                 (error, errno,
4580                                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4581                                                  NULL,
4582                                                  "cannot get drop hash queue");
4583                                         goto error;
4584                                 }
4585                                 dv->actions[n++] = dv->hrxq->action;
4586                         }
4587                 } else if (flow->actions &
4588                            (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
4589                         struct mlx5_hrxq *hrxq;
4590
4591                         hrxq = mlx5_hrxq_get(dev, flow->key,
4592                                              MLX5_RSS_HASH_KEY_LEN,
4593                                              dv->hash_fields,
4594                                              (*flow->queue),
4595                                              flow->rss.queue_num);
4596                         if (!hrxq)
4597                                 hrxq = mlx5_hrxq_new
4598                                         (dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
4599                                          dv->hash_fields, (*flow->queue),
4600                                          flow->rss.queue_num,
4601                                          !!(dev_flow->layers &
4602                                             MLX5_FLOW_LAYER_TUNNEL));
4603                         if (!hrxq) {
4604                                 rte_flow_error_set
4605                                         (error, rte_errno,
4606                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
4607                                          "cannot get hash queue");
4608                                 goto error;
4609                         }
4610                         dv->hrxq = hrxq;
4611                         dv->actions[n++] = dv->hrxq->action;
4612                 }
4613                 dv->flow =
4614                         mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
4615                                                   (void *)&dv->value, n,
4616                                                   dv->actions);
4617                 if (!dv->flow) {
4618                         rte_flow_error_set(error, errno,
4619                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4620                                            NULL,
4621                                            "hardware refuses to create flow");
4622                         goto error;
4623                 }
4624         }
4625         return 0;
4626 error:
4627         err = rte_errno; /* Save rte_errno before cleanup. */
4628         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
4629                 struct mlx5_flow_dv *dv = &dev_flow->dv;
4630                 if (dv->hrxq) {
4631                         if (flow->actions & MLX5_FLOW_ACTION_DROP)
4632                                 mlx5_hrxq_drop_release(dev);
4633                         else
4634                                 mlx5_hrxq_release(dev, dv->hrxq);
4635                         dv->hrxq = NULL;
4636                 }
4637         }
4638         rte_errno = err; /* Restore rte_errno. */
4639         return -rte_errno;
4640 }
4641
4642 /**
4643  * Release the flow matcher.
4644  *
4645  * @param dev
4646  *   Pointer to Ethernet device.
4647  * @param flow
4648  *   Pointer to mlx5_flow.
4649  *
4650  * @return
4651  *   1 while a reference on it exists, 0 when freed.
4652  */
4653 static int
4654 flow_dv_matcher_release(struct rte_eth_dev *dev,
4655                         struct mlx5_flow *flow)
4656 {
4657         struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
4658         struct mlx5_priv *priv = dev->data->dev_private;
4659         struct mlx5_ibv_shared *sh = priv->sh;
4660         struct mlx5_flow_tbl_resource *tbl;
4661
4662         assert(matcher->matcher_object);
4663         DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
4664                 dev->data->port_id, (void *)matcher,
4665                 rte_atomic32_read(&matcher->refcnt));
4666         if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
4667                 claim_zero(mlx5_glue->dv_destroy_flow_matcher
4668                            (matcher->matcher_object));
4669                 LIST_REMOVE(matcher, next);
4670                 if (matcher->egress)
4671                         tbl = &sh->tx_tbl[matcher->group];
4672                 else
4673                         tbl = &sh->rx_tbl[matcher->group];
4674                 flow_dv_tbl_resource_release(tbl);
4675                 rte_free(matcher);
4676                 DRV_LOG(DEBUG, "port %u matcher %p: removed",
4677                         dev->data->port_id, (void *)matcher);
4678                 return 0;
4679         }
4680         return 1;
4681 }
4682
4683 /**
4684  * Release an encap/decap resource.
4685  *
4686  * @param flow
4687  *   Pointer to mlx5_flow.
4688  *
4689  * @return
4690  *   1 while a reference on it exists, 0 when freed.
4691  */
4692 static int
4693 flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
4694 {
4695         struct mlx5_flow_dv_encap_decap_resource *cache_resource =
4696                                                 flow->dv.encap_decap;
4697
4698         assert(cache_resource->verbs_action);
4699         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
4700                 (void *)cache_resource,
4701                 rte_atomic32_read(&cache_resource->refcnt));
4702         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
4703                 claim_zero(mlx5_glue->destroy_flow_action
4704                                 (cache_resource->verbs_action));
4705                 LIST_REMOVE(cache_resource, next);
4706                 rte_free(cache_resource);
4707                 DRV_LOG(DEBUG, "encap/decap resource %p: removed",
4708                         (void *)cache_resource);
4709                 return 0;
4710         }
4711         return 1;
4712 }
4713
4714 /**
4715  * Release an jump to table action resource.
4716  *
4717  * @param flow
4718  *   Pointer to mlx5_flow.
4719  *
4720  * @return
4721  *   1 while a reference on it exists, 0 when freed.
4722  */
4723 static int
4724 flow_dv_jump_tbl_resource_release(struct mlx5_flow *flow)
4725 {
4726         struct mlx5_flow_dv_jump_tbl_resource *cache_resource =
4727                                                 flow->dv.jump;
4728
4729         assert(cache_resource->action);
4730         DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--",
4731                 (void *)cache_resource,
4732                 rte_atomic32_read(&cache_resource->refcnt));
4733         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
4734                 claim_zero(mlx5_glue->destroy_flow_action
4735                                 (cache_resource->action));
4736                 LIST_REMOVE(cache_resource, next);
4737                 flow_dv_tbl_resource_release(cache_resource->tbl);
4738                 rte_free(cache_resource);
4739                 DRV_LOG(DEBUG, "jump table resource %p: removed",
4740                         (void *)cache_resource);
4741                 return 0;
4742         }
4743         return 1;
4744 }
4745
4746 /**
4747  * Release a modify-header resource.
4748  *
4749  * @param flow
4750  *   Pointer to mlx5_flow.
4751  *
4752  * @return
4753  *   1 while a reference on it exists, 0 when freed.
4754  */
4755 static int
4756 flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
4757 {
4758         struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
4759                                                 flow->dv.modify_hdr;
4760
4761         assert(cache_resource->verbs_action);
4762         DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
4763                 (void *)cache_resource,
4764                 rte_atomic32_read(&cache_resource->refcnt));
4765         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
4766                 claim_zero(mlx5_glue->destroy_flow_action
4767                                 (cache_resource->verbs_action));
4768                 LIST_REMOVE(cache_resource, next);
4769                 rte_free(cache_resource);
4770                 DRV_LOG(DEBUG, "modify-header resource %p: removed",
4771                         (void *)cache_resource);
4772                 return 0;
4773         }
4774         return 1;
4775 }
4776
4777 /**
4778  * Release port ID action resource.
4779  *
4780  * @param flow
4781  *   Pointer to mlx5_flow.
4782  *
4783  * @return
4784  *   1 while a reference on it exists, 0 when freed.
4785  */
4786 static int
4787 flow_dv_port_id_action_resource_release(struct mlx5_flow *flow)
4788 {
4789         struct mlx5_flow_dv_port_id_action_resource *cache_resource =
4790                 flow->dv.port_id_action;
4791
4792         assert(cache_resource->action);
4793         DRV_LOG(DEBUG, "port ID action resource %p: refcnt %d--",
4794                 (void *)cache_resource,
4795                 rte_atomic32_read(&cache_resource->refcnt));
4796         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
4797                 claim_zero(mlx5_glue->destroy_flow_action
4798                                 (cache_resource->action));
4799                 LIST_REMOVE(cache_resource, next);
4800                 rte_free(cache_resource);
4801                 DRV_LOG(DEBUG, "port id action resource %p: removed",
4802                         (void *)cache_resource);
4803                 return 0;
4804         }
4805         return 1;
4806 }
4807
4808 /**
4809  * Remove the flow from the NIC but keeps it in memory.
4810  *
4811  * @param[in] dev
4812  *   Pointer to Ethernet device.
4813  * @param[in, out] flow
4814  *   Pointer to flow structure.
4815  */
4816 static void
4817 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
4818 {
4819         struct mlx5_flow_dv *dv;
4820         struct mlx5_flow *dev_flow;
4821
4822         if (!flow)
4823                 return;
4824         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
4825                 dv = &dev_flow->dv;
4826                 if (dv->flow) {
4827                         claim_zero(mlx5_glue->dv_destroy_flow(dv->flow));
4828                         dv->flow = NULL;
4829                 }
4830                 if (dv->hrxq) {
4831                         if (flow->actions & MLX5_FLOW_ACTION_DROP)
4832                                 mlx5_hrxq_drop_release(dev);
4833                         else
4834                                 mlx5_hrxq_release(dev, dv->hrxq);
4835                         dv->hrxq = NULL;
4836                 }
4837         }
4838 }
4839
4840 /**
4841  * Remove the flow from the NIC and the memory.
4842  *
4843  * @param[in] dev
4844  *   Pointer to the Ethernet device structure.
4845  * @param[in, out] flow
4846  *   Pointer to flow structure.
4847  */
4848 static void
4849 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
4850 {
4851         struct mlx5_flow *dev_flow;
4852
4853         if (!flow)
4854                 return;
4855         flow_dv_remove(dev, flow);
4856         if (flow->counter) {
4857                 flow_dv_counter_release(flow->counter);
4858                 flow->counter = NULL;
4859         }
4860         if (flow->tag_resource) {
4861                 flow_dv_tag_release(dev, flow->tag_resource);
4862                 flow->tag_resource = NULL;
4863         }
4864         while (!LIST_EMPTY(&flow->dev_flows)) {
4865                 dev_flow = LIST_FIRST(&flow->dev_flows);
4866                 LIST_REMOVE(dev_flow, next);
4867                 if (dev_flow->dv.matcher)
4868                         flow_dv_matcher_release(dev, dev_flow);
4869                 if (dev_flow->dv.encap_decap)
4870                         flow_dv_encap_decap_resource_release(dev_flow);
4871                 if (dev_flow->dv.modify_hdr)
4872                         flow_dv_modify_hdr_resource_release(dev_flow);
4873                 if (dev_flow->dv.jump)
4874                         flow_dv_jump_tbl_resource_release(dev_flow);
4875                 if (dev_flow->dv.port_id_action)
4876                         flow_dv_port_id_action_resource_release(dev_flow);
4877                 rte_free(dev_flow);
4878         }
4879 }
4880
4881 /**
4882  * Query a dv flow  rule for its statistics via devx.
4883  *
4884  * @param[in] dev
4885  *   Pointer to Ethernet device.
4886  * @param[in] flow
4887  *   Pointer to the sub flow.
4888  * @param[out] data
4889  *   data retrieved by the query.
4890  * @param[out] error
4891  *   Perform verbose error reporting if not NULL.
4892  *
4893  * @return
4894  *   0 on success, a negative errno value otherwise and rte_errno is set.
4895  */
4896 static int
4897 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
4898                     void *data, struct rte_flow_error *error)
4899 {
4900         struct mlx5_priv *priv = dev->data->dev_private;
4901         struct rte_flow_query_count *qc = data;
4902         uint64_t pkts = 0;
4903         uint64_t bytes = 0;
4904         int err;
4905
4906         if (!priv->config.devx)
4907                 return rte_flow_error_set(error, ENOTSUP,
4908                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4909                                           NULL,
4910                                           "counters are not supported");
4911         if (flow->counter) {
4912                 err = mlx5_devx_cmd_flow_counter_query
4913                                                 (flow->counter->dcs,
4914                                                  qc->reset, &pkts, &bytes);
4915                 if (err)
4916                         return rte_flow_error_set
4917                                 (error, err,
4918                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4919                                  NULL,
4920                                  "cannot read counters");
4921                 qc->hits_set = 1;
4922                 qc->bytes_set = 1;
4923                 qc->hits = pkts - flow->counter->hits;
4924                 qc->bytes = bytes - flow->counter->bytes;
4925                 if (qc->reset) {
4926                         flow->counter->hits = pkts;
4927                         flow->counter->bytes = bytes;
4928                 }
4929                 return 0;
4930         }
4931         return rte_flow_error_set(error, EINVAL,
4932                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4933                                   NULL,
4934                                   "counters are not available");
4935 }
4936
4937 /**
4938  * Query a flow.
4939  *
4940  * @see rte_flow_query()
4941  * @see rte_flow_ops
4942  */
4943 static int
4944 flow_dv_query(struct rte_eth_dev *dev,
4945               struct rte_flow *flow __rte_unused,
4946               const struct rte_flow_action *actions __rte_unused,
4947               void *data __rte_unused,
4948               struct rte_flow_error *error __rte_unused)
4949 {
4950         int ret = -EINVAL;
4951
4952         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
4953                 switch (actions->type) {
4954                 case RTE_FLOW_ACTION_TYPE_VOID:
4955                         break;
4956                 case RTE_FLOW_ACTION_TYPE_COUNT:
4957                         ret = flow_dv_query_count(dev, flow, data, error);
4958                         break;
4959                 default:
4960                         return rte_flow_error_set(error, ENOTSUP,
4961                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4962                                                   actions,
4963                                                   "action not supported");
4964                 }
4965         }
4966         return ret;
4967 }
4968
4969 /*
4970  * Mutex-protected thunk to flow_dv_translate().
4971  */
4972 static int
4973 flow_d_translate(struct rte_eth_dev *dev,
4974                  struct mlx5_flow *dev_flow,
4975                  const struct rte_flow_attr *attr,
4976                  const struct rte_flow_item items[],
4977                  const struct rte_flow_action actions[],
4978                  struct rte_flow_error *error)
4979 {
4980         int ret;
4981
4982         flow_d_shared_lock(dev);
4983         ret = flow_dv_translate(dev, dev_flow, attr, items, actions, error);
4984         flow_d_shared_unlock(dev);
4985         return ret;
4986 }
4987
4988 /*
4989  * Mutex-protected thunk to flow_dv_apply().
4990  */
4991 static int
4992 flow_d_apply(struct rte_eth_dev *dev,
4993              struct rte_flow *flow,
4994              struct rte_flow_error *error)
4995 {
4996         int ret;
4997
4998         flow_d_shared_lock(dev);
4999         ret = flow_dv_apply(dev, flow, error);
5000         flow_d_shared_unlock(dev);
5001         return ret;
5002 }
5003
5004 /*
5005  * Mutex-protected thunk to flow_dv_remove().
5006  */
5007 static void
5008 flow_d_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
5009 {
5010         flow_d_shared_lock(dev);
5011         flow_dv_remove(dev, flow);
5012         flow_d_shared_unlock(dev);
5013 }
5014
5015 /*
5016  * Mutex-protected thunk to flow_dv_destroy().
5017  */
5018 static void
5019 flow_d_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
5020 {
5021         flow_d_shared_lock(dev);
5022         flow_dv_destroy(dev, flow);
5023         flow_d_shared_unlock(dev);
5024 }
5025
5026 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
5027         .validate = flow_dv_validate,
5028         .prepare = flow_dv_prepare,
5029         .translate = flow_d_translate,
5030         .apply = flow_d_apply,
5031         .remove = flow_d_remove,
5032         .destroy = flow_d_destroy,
5033         .query = flow_dv_query,
5034 };
5035
5036 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */