net/mlx5: fix modify header action position
[dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9
10 /* Verbs header. */
11 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
12 #ifdef PEDANTIC
13 #pragma GCC diagnostic ignored "-Wpedantic"
14 #endif
15 #include <infiniband/verbs.h>
16 #ifdef PEDANTIC
17 #pragma GCC diagnostic error "-Wpedantic"
18 #endif
19
20 #include <rte_common.h>
21 #include <rte_ether.h>
22 #include <rte_ethdev_driver.h>
23 #include <rte_flow.h>
24 #include <rte_flow_driver.h>
25 #include <rte_malloc.h>
26 #include <rte_ip.h>
27 #include <rte_gre.h>
28
29 #include "mlx5.h"
30 #include "mlx5_defs.h"
31 #include "mlx5_glue.h"
32 #include "mlx5_flow.h"
33 #include "mlx5_prm.h"
34 #include "mlx5_rxtx.h"
35
36 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
37
38 #ifndef HAVE_IBV_FLOW_DEVX_COUNTERS
39 #define MLX5DV_FLOW_ACTION_COUNTERS_DEVX 0
40 #endif
41
42 union flow_dv_attr {
43         struct {
44                 uint32_t valid:1;
45                 uint32_t ipv4:1;
46                 uint32_t ipv6:1;
47                 uint32_t tcp:1;
48                 uint32_t udp:1;
49                 uint32_t reserved:27;
50         };
51         uint32_t attr;
52 };
53
54 /**
55  * Initialize flow attributes structure according to flow items' types.
56  *
57  * @param[in] item
58  *   Pointer to item specification.
59  * @param[out] attr
60  *   Pointer to flow attributes structure.
61  */
62 static void
63 flow_dv_attr_init(const struct rte_flow_item *item, union flow_dv_attr *attr)
64 {
65         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
66                 switch (item->type) {
67                 case RTE_FLOW_ITEM_TYPE_IPV4:
68                         attr->ipv4 = 1;
69                         break;
70                 case RTE_FLOW_ITEM_TYPE_IPV6:
71                         attr->ipv6 = 1;
72                         break;
73                 case RTE_FLOW_ITEM_TYPE_UDP:
74                         attr->udp = 1;
75                         break;
76                 case RTE_FLOW_ITEM_TYPE_TCP:
77                         attr->tcp = 1;
78                         break;
79                 default:
80                         break;
81                 }
82         }
83         attr->valid = 1;
84 }
85
86 struct field_modify_info {
87         uint32_t size; /* Size of field in protocol header, in bytes. */
88         uint32_t offset; /* Offset of field in protocol header, in bytes. */
89         enum mlx5_modification_field id;
90 };
91
92 struct field_modify_info modify_eth[] = {
93         {4,  0, MLX5_MODI_OUT_DMAC_47_16},
94         {2,  4, MLX5_MODI_OUT_DMAC_15_0},
95         {4,  6, MLX5_MODI_OUT_SMAC_47_16},
96         {2, 10, MLX5_MODI_OUT_SMAC_15_0},
97         {0, 0, 0},
98 };
99
100 struct field_modify_info modify_ipv4[] = {
101         {1,  8, MLX5_MODI_OUT_IPV4_TTL},
102         {4, 12, MLX5_MODI_OUT_SIPV4},
103         {4, 16, MLX5_MODI_OUT_DIPV4},
104         {0, 0, 0},
105 };
106
107 struct field_modify_info modify_ipv6[] = {
108         {1,  7, MLX5_MODI_OUT_IPV6_HOPLIMIT},
109         {4,  8, MLX5_MODI_OUT_SIPV6_127_96},
110         {4, 12, MLX5_MODI_OUT_SIPV6_95_64},
111         {4, 16, MLX5_MODI_OUT_SIPV6_63_32},
112         {4, 20, MLX5_MODI_OUT_SIPV6_31_0},
113         {4, 24, MLX5_MODI_OUT_DIPV6_127_96},
114         {4, 28, MLX5_MODI_OUT_DIPV6_95_64},
115         {4, 32, MLX5_MODI_OUT_DIPV6_63_32},
116         {4, 36, MLX5_MODI_OUT_DIPV6_31_0},
117         {0, 0, 0},
118 };
119
120 struct field_modify_info modify_udp[] = {
121         {2, 0, MLX5_MODI_OUT_UDP_SPORT},
122         {2, 2, MLX5_MODI_OUT_UDP_DPORT},
123         {0, 0, 0},
124 };
125
126 struct field_modify_info modify_tcp[] = {
127         {2, 0, MLX5_MODI_OUT_TCP_SPORT},
128         {2, 2, MLX5_MODI_OUT_TCP_DPORT},
129         {0, 0, 0},
130 };
131
132 /**
133  * Acquire the synchronizing object to protect multithreaded access
134  * to shared dv context. Lock occurs only if context is actually
135  * shared, i.e. we have multiport IB device and representors are
136  * created.
137  *
138  * @param[in] dev
139  *   Pointer to the rte_eth_dev structure.
140  */
141 static void
142 flow_d_shared_lock(struct rte_eth_dev *dev)
143 {
144         struct mlx5_priv *priv = dev->data->dev_private;
145         struct mlx5_ibv_shared *sh = priv->sh;
146
147         if (sh->dv_refcnt > 1) {
148                 int ret;
149
150                 ret = pthread_mutex_lock(&sh->dv_mutex);
151                 assert(!ret);
152                 (void)ret;
153         }
154 }
155
156 static void
157 flow_d_shared_unlock(struct rte_eth_dev *dev)
158 {
159         struct mlx5_priv *priv = dev->data->dev_private;
160         struct mlx5_ibv_shared *sh = priv->sh;
161
162         if (sh->dv_refcnt > 1) {
163                 int ret;
164
165                 ret = pthread_mutex_unlock(&sh->dv_mutex);
166                 assert(!ret);
167                 (void)ret;
168         }
169 }
170
171 /**
172  * Convert modify-header action to DV specification.
173  *
174  * @param[in] item
175  *   Pointer to item specification.
176  * @param[in] field
177  *   Pointer to field modification information.
178  * @param[in,out] resource
179  *   Pointer to the modify-header resource.
180  * @param[in] type
181  *   Type of modification.
182  * @param[out] error
183  *   Pointer to the error structure.
184  *
185  * @return
186  *   0 on success, a negative errno value otherwise and rte_errno is set.
187  */
188 static int
189 flow_dv_convert_modify_action(struct rte_flow_item *item,
190                               struct field_modify_info *field,
191                               struct mlx5_flow_dv_modify_hdr_resource *resource,
192                               uint32_t type,
193                               struct rte_flow_error *error)
194 {
195         uint32_t i = resource->actions_num;
196         struct mlx5_modification_cmd *actions = resource->actions;
197         const uint8_t *spec = item->spec;
198         const uint8_t *mask = item->mask;
199         uint32_t set;
200
201         while (field->size) {
202                 set = 0;
203                 /* Generate modify command for each mask segment. */
204                 memcpy(&set, &mask[field->offset], field->size);
205                 if (set) {
206                         if (i >= MLX5_MODIFY_NUM)
207                                 return rte_flow_error_set(error, EINVAL,
208                                          RTE_FLOW_ERROR_TYPE_ACTION, NULL,
209                                          "too many items to modify");
210                         actions[i].action_type = type;
211                         actions[i].field = field->id;
212                         actions[i].length = field->size ==
213                                         4 ? 0 : field->size * 8;
214                         rte_memcpy(&actions[i].data[4 - field->size],
215                                    &spec[field->offset], field->size);
216                         actions[i].data0 = rte_cpu_to_be_32(actions[i].data0);
217                         ++i;
218                 }
219                 if (resource->actions_num != i)
220                         resource->actions_num = i;
221                 field++;
222         }
223         if (!resource->actions_num)
224                 return rte_flow_error_set(error, EINVAL,
225                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
226                                           "invalid modification flow item");
227         return 0;
228 }
229
230 /**
231  * Convert modify-header set IPv4 address action to DV specification.
232  *
233  * @param[in,out] resource
234  *   Pointer to the modify-header resource.
235  * @param[in] action
236  *   Pointer to action specification.
237  * @param[out] error
238  *   Pointer to the error structure.
239  *
240  * @return
241  *   0 on success, a negative errno value otherwise and rte_errno is set.
242  */
243 static int
244 flow_dv_convert_action_modify_ipv4
245                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
246                          const struct rte_flow_action *action,
247                          struct rte_flow_error *error)
248 {
249         const struct rte_flow_action_set_ipv4 *conf =
250                 (const struct rte_flow_action_set_ipv4 *)(action->conf);
251         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV4 };
252         struct rte_flow_item_ipv4 ipv4;
253         struct rte_flow_item_ipv4 ipv4_mask;
254
255         memset(&ipv4, 0, sizeof(ipv4));
256         memset(&ipv4_mask, 0, sizeof(ipv4_mask));
257         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC) {
258                 ipv4.hdr.src_addr = conf->ipv4_addr;
259                 ipv4_mask.hdr.src_addr = rte_flow_item_ipv4_mask.hdr.src_addr;
260         } else {
261                 ipv4.hdr.dst_addr = conf->ipv4_addr;
262                 ipv4_mask.hdr.dst_addr = rte_flow_item_ipv4_mask.hdr.dst_addr;
263         }
264         item.spec = &ipv4;
265         item.mask = &ipv4_mask;
266         return flow_dv_convert_modify_action(&item, modify_ipv4, resource,
267                                              MLX5_MODIFICATION_TYPE_SET, error);
268 }
269
270 /**
271  * Convert modify-header set IPv6 address action to DV specification.
272  *
273  * @param[in,out] resource
274  *   Pointer to the modify-header resource.
275  * @param[in] action
276  *   Pointer to action specification.
277  * @param[out] error
278  *   Pointer to the error structure.
279  *
280  * @return
281  *   0 on success, a negative errno value otherwise and rte_errno is set.
282  */
283 static int
284 flow_dv_convert_action_modify_ipv6
285                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
286                          const struct rte_flow_action *action,
287                          struct rte_flow_error *error)
288 {
289         const struct rte_flow_action_set_ipv6 *conf =
290                 (const struct rte_flow_action_set_ipv6 *)(action->conf);
291         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_IPV6 };
292         struct rte_flow_item_ipv6 ipv6;
293         struct rte_flow_item_ipv6 ipv6_mask;
294
295         memset(&ipv6, 0, sizeof(ipv6));
296         memset(&ipv6_mask, 0, sizeof(ipv6_mask));
297         if (action->type == RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC) {
298                 memcpy(&ipv6.hdr.src_addr, &conf->ipv6_addr,
299                        sizeof(ipv6.hdr.src_addr));
300                 memcpy(&ipv6_mask.hdr.src_addr,
301                        &rte_flow_item_ipv6_mask.hdr.src_addr,
302                        sizeof(ipv6.hdr.src_addr));
303         } else {
304                 memcpy(&ipv6.hdr.dst_addr, &conf->ipv6_addr,
305                        sizeof(ipv6.hdr.dst_addr));
306                 memcpy(&ipv6_mask.hdr.dst_addr,
307                        &rte_flow_item_ipv6_mask.hdr.dst_addr,
308                        sizeof(ipv6.hdr.dst_addr));
309         }
310         item.spec = &ipv6;
311         item.mask = &ipv6_mask;
312         return flow_dv_convert_modify_action(&item, modify_ipv6, resource,
313                                              MLX5_MODIFICATION_TYPE_SET, error);
314 }
315
316 /**
317  * Convert modify-header set MAC address action to DV specification.
318  *
319  * @param[in,out] resource
320  *   Pointer to the modify-header resource.
321  * @param[in] action
322  *   Pointer to action specification.
323  * @param[out] error
324  *   Pointer to the error structure.
325  *
326  * @return
327  *   0 on success, a negative errno value otherwise and rte_errno is set.
328  */
329 static int
330 flow_dv_convert_action_modify_mac
331                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
332                          const struct rte_flow_action *action,
333                          struct rte_flow_error *error)
334 {
335         const struct rte_flow_action_set_mac *conf =
336                 (const struct rte_flow_action_set_mac *)(action->conf);
337         struct rte_flow_item item = { .type = RTE_FLOW_ITEM_TYPE_ETH };
338         struct rte_flow_item_eth eth;
339         struct rte_flow_item_eth eth_mask;
340
341         memset(&eth, 0, sizeof(eth));
342         memset(&eth_mask, 0, sizeof(eth_mask));
343         if (action->type == RTE_FLOW_ACTION_TYPE_SET_MAC_SRC) {
344                 memcpy(&eth.src.addr_bytes, &conf->mac_addr,
345                        sizeof(eth.src.addr_bytes));
346                 memcpy(&eth_mask.src.addr_bytes,
347                        &rte_flow_item_eth_mask.src.addr_bytes,
348                        sizeof(eth_mask.src.addr_bytes));
349         } else {
350                 memcpy(&eth.dst.addr_bytes, &conf->mac_addr,
351                        sizeof(eth.dst.addr_bytes));
352                 memcpy(&eth_mask.dst.addr_bytes,
353                        &rte_flow_item_eth_mask.dst.addr_bytes,
354                        sizeof(eth_mask.dst.addr_bytes));
355         }
356         item.spec = &eth;
357         item.mask = &eth_mask;
358         return flow_dv_convert_modify_action(&item, modify_eth, resource,
359                                              MLX5_MODIFICATION_TYPE_SET, error);
360 }
361
362 /**
363  * Convert modify-header set TP action to DV specification.
364  *
365  * @param[in,out] resource
366  *   Pointer to the modify-header resource.
367  * @param[in] action
368  *   Pointer to action specification.
369  * @param[in] items
370  *   Pointer to rte_flow_item objects list.
371  * @param[in] attr
372  *   Pointer to flow attributes structure.
373  * @param[out] error
374  *   Pointer to the error structure.
375  *
376  * @return
377  *   0 on success, a negative errno value otherwise and rte_errno is set.
378  */
379 static int
380 flow_dv_convert_action_modify_tp
381                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
382                          const struct rte_flow_action *action,
383                          const struct rte_flow_item *items,
384                          union flow_dv_attr *attr,
385                          struct rte_flow_error *error)
386 {
387         const struct rte_flow_action_set_tp *conf =
388                 (const struct rte_flow_action_set_tp *)(action->conf);
389         struct rte_flow_item item;
390         struct rte_flow_item_udp udp;
391         struct rte_flow_item_udp udp_mask;
392         struct rte_flow_item_tcp tcp;
393         struct rte_flow_item_tcp tcp_mask;
394         struct field_modify_info *field;
395
396         if (!attr->valid)
397                 flow_dv_attr_init(items, attr);
398         if (attr->udp) {
399                 memset(&udp, 0, sizeof(udp));
400                 memset(&udp_mask, 0, sizeof(udp_mask));
401                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
402                         udp.hdr.src_port = conf->port;
403                         udp_mask.hdr.src_port =
404                                         rte_flow_item_udp_mask.hdr.src_port;
405                 } else {
406                         udp.hdr.dst_port = conf->port;
407                         udp_mask.hdr.dst_port =
408                                         rte_flow_item_udp_mask.hdr.dst_port;
409                 }
410                 item.type = RTE_FLOW_ITEM_TYPE_UDP;
411                 item.spec = &udp;
412                 item.mask = &udp_mask;
413                 field = modify_udp;
414         }
415         if (attr->tcp) {
416                 memset(&tcp, 0, sizeof(tcp));
417                 memset(&tcp_mask, 0, sizeof(tcp_mask));
418                 if (action->type == RTE_FLOW_ACTION_TYPE_SET_TP_SRC) {
419                         tcp.hdr.src_port = conf->port;
420                         tcp_mask.hdr.src_port =
421                                         rte_flow_item_tcp_mask.hdr.src_port;
422                 } else {
423                         tcp.hdr.dst_port = conf->port;
424                         tcp_mask.hdr.dst_port =
425                                         rte_flow_item_tcp_mask.hdr.dst_port;
426                 }
427                 item.type = RTE_FLOW_ITEM_TYPE_TCP;
428                 item.spec = &tcp;
429                 item.mask = &tcp_mask;
430                 field = modify_tcp;
431         }
432         return flow_dv_convert_modify_action(&item, field, resource,
433                                              MLX5_MODIFICATION_TYPE_SET, error);
434 }
435
436 /**
437  * Convert modify-header set TTL action to DV specification.
438  *
439  * @param[in,out] resource
440  *   Pointer to the modify-header resource.
441  * @param[in] action
442  *   Pointer to action specification.
443  * @param[in] items
444  *   Pointer to rte_flow_item objects list.
445  * @param[in] attr
446  *   Pointer to flow attributes structure.
447  * @param[out] error
448  *   Pointer to the error structure.
449  *
450  * @return
451  *   0 on success, a negative errno value otherwise and rte_errno is set.
452  */
453 static int
454 flow_dv_convert_action_modify_ttl
455                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
456                          const struct rte_flow_action *action,
457                          const struct rte_flow_item *items,
458                          union flow_dv_attr *attr,
459                          struct rte_flow_error *error)
460 {
461         const struct rte_flow_action_set_ttl *conf =
462                 (const struct rte_flow_action_set_ttl *)(action->conf);
463         struct rte_flow_item item;
464         struct rte_flow_item_ipv4 ipv4;
465         struct rte_flow_item_ipv4 ipv4_mask;
466         struct rte_flow_item_ipv6 ipv6;
467         struct rte_flow_item_ipv6 ipv6_mask;
468         struct field_modify_info *field;
469
470         if (!attr->valid)
471                 flow_dv_attr_init(items, attr);
472         if (attr->ipv4) {
473                 memset(&ipv4, 0, sizeof(ipv4));
474                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
475                 ipv4.hdr.time_to_live = conf->ttl_value;
476                 ipv4_mask.hdr.time_to_live = 0xFF;
477                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
478                 item.spec = &ipv4;
479                 item.mask = &ipv4_mask;
480                 field = modify_ipv4;
481         }
482         if (attr->ipv6) {
483                 memset(&ipv6, 0, sizeof(ipv6));
484                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
485                 ipv6.hdr.hop_limits = conf->ttl_value;
486                 ipv6_mask.hdr.hop_limits = 0xFF;
487                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
488                 item.spec = &ipv6;
489                 item.mask = &ipv6_mask;
490                 field = modify_ipv6;
491         }
492         return flow_dv_convert_modify_action(&item, field, resource,
493                                              MLX5_MODIFICATION_TYPE_SET, error);
494 }
495
496 /**
497  * Convert modify-header decrement TTL action to DV specification.
498  *
499  * @param[in,out] resource
500  *   Pointer to the modify-header resource.
501  * @param[in] action
502  *   Pointer to action specification.
503  * @param[in] items
504  *   Pointer to rte_flow_item objects list.
505  * @param[in] attr
506  *   Pointer to flow attributes structure.
507  * @param[out] error
508  *   Pointer to the error structure.
509  *
510  * @return
511  *   0 on success, a negative errno value otherwise and rte_errno is set.
512  */
513 static int
514 flow_dv_convert_action_modify_dec_ttl
515                         (struct mlx5_flow_dv_modify_hdr_resource *resource,
516                          const struct rte_flow_item *items,
517                          union flow_dv_attr *attr,
518                          struct rte_flow_error *error)
519 {
520         struct rte_flow_item item;
521         struct rte_flow_item_ipv4 ipv4;
522         struct rte_flow_item_ipv4 ipv4_mask;
523         struct rte_flow_item_ipv6 ipv6;
524         struct rte_flow_item_ipv6 ipv6_mask;
525         struct field_modify_info *field;
526
527         if (!attr->valid)
528                 flow_dv_attr_init(items, attr);
529         if (attr->ipv4) {
530                 memset(&ipv4, 0, sizeof(ipv4));
531                 memset(&ipv4_mask, 0, sizeof(ipv4_mask));
532                 ipv4.hdr.time_to_live = 0xFF;
533                 ipv4_mask.hdr.time_to_live = 0xFF;
534                 item.type = RTE_FLOW_ITEM_TYPE_IPV4;
535                 item.spec = &ipv4;
536                 item.mask = &ipv4_mask;
537                 field = modify_ipv4;
538         }
539         if (attr->ipv6) {
540                 memset(&ipv6, 0, sizeof(ipv6));
541                 memset(&ipv6_mask, 0, sizeof(ipv6_mask));
542                 ipv6.hdr.hop_limits = 0xFF;
543                 ipv6_mask.hdr.hop_limits = 0xFF;
544                 item.type = RTE_FLOW_ITEM_TYPE_IPV6;
545                 item.spec = &ipv6;
546                 item.mask = &ipv6_mask;
547                 field = modify_ipv6;
548         }
549         return flow_dv_convert_modify_action(&item, field, resource,
550                                              MLX5_MODIFICATION_TYPE_ADD, error);
551 }
552
553 /**
554  * Validate META item.
555  *
556  * @param[in] dev
557  *   Pointer to the rte_eth_dev structure.
558  * @param[in] item
559  *   Item specification.
560  * @param[in] attr
561  *   Attributes of flow that includes this item.
562  * @param[out] error
563  *   Pointer to error structure.
564  *
565  * @return
566  *   0 on success, a negative errno value otherwise and rte_errno is set.
567  */
568 static int
569 flow_dv_validate_item_meta(struct rte_eth_dev *dev,
570                            const struct rte_flow_item *item,
571                            const struct rte_flow_attr *attr,
572                            struct rte_flow_error *error)
573 {
574         const struct rte_flow_item_meta *spec = item->spec;
575         const struct rte_flow_item_meta *mask = item->mask;
576         const struct rte_flow_item_meta nic_mask = {
577                 .data = RTE_BE32(UINT32_MAX)
578         };
579         int ret;
580         uint64_t offloads = dev->data->dev_conf.txmode.offloads;
581
582         if (!(offloads & DEV_TX_OFFLOAD_MATCH_METADATA))
583                 return rte_flow_error_set(error, EPERM,
584                                           RTE_FLOW_ERROR_TYPE_ITEM,
585                                           NULL,
586                                           "match on metadata offload "
587                                           "configuration is off for this port");
588         if (!spec)
589                 return rte_flow_error_set(error, EINVAL,
590                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
591                                           item->spec,
592                                           "data cannot be empty");
593         if (!spec->data)
594                 return rte_flow_error_set(error, EINVAL,
595                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
596                                           NULL,
597                                           "data cannot be zero");
598         if (!mask)
599                 mask = &rte_flow_item_meta_mask;
600         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
601                                         (const uint8_t *)&nic_mask,
602                                         sizeof(struct rte_flow_item_meta),
603                                         error);
604         if (ret < 0)
605                 return ret;
606         if (attr->ingress)
607                 return rte_flow_error_set(error, ENOTSUP,
608                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
609                                           NULL,
610                                           "pattern not supported for ingress");
611         return 0;
612 }
613
614 /**
615  * Validate count action.
616  *
617  * @param[in] dev
618  *   device otr.
619  * @param[out] error
620  *   Pointer to error structure.
621  *
622  * @return
623  *   0 on success, a negative errno value otherwise and rte_errno is set.
624  */
625 static int
626 flow_dv_validate_action_count(struct rte_eth_dev *dev,
627                               struct rte_flow_error *error)
628 {
629         struct mlx5_priv *priv = dev->data->dev_private;
630
631         if (!priv->config.devx)
632                 goto notsup_err;
633 #ifdef HAVE_IBV_FLOW_DEVX_COUNTERS
634         return 0;
635 #endif
636 notsup_err:
637         return rte_flow_error_set
638                       (error, ENOTSUP,
639                        RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
640                        NULL,
641                        "count action not supported");
642 }
643
644 /**
645  * Validate the L2 encap action.
646  *
647  * @param[in] action_flags
648  *   Holds the actions detected until now.
649  * @param[in] action
650  *   Pointer to the encap action.
651  * @param[in] attr
652  *   Pointer to flow attributes
653  * @param[out] error
654  *   Pointer to error structure.
655  *
656  * @return
657  *   0 on success, a negative errno value otherwise and rte_errno is set.
658  */
659 static int
660 flow_dv_validate_action_l2_encap(uint64_t action_flags,
661                                  const struct rte_flow_action *action,
662                                  const struct rte_flow_attr *attr,
663                                  struct rte_flow_error *error)
664 {
665         if (!(action->conf))
666                 return rte_flow_error_set(error, EINVAL,
667                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
668                                           "configuration cannot be null");
669         if (action_flags & MLX5_FLOW_ACTION_DROP)
670                 return rte_flow_error_set(error, EINVAL,
671                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
672                                           "can't drop and encap in same flow");
673         if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
674                 return rte_flow_error_set(error, EINVAL,
675                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
676                                           "can only have a single encap or"
677                                           " decap action in a flow");
678         if (attr->ingress)
679                 return rte_flow_error_set(error, ENOTSUP,
680                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
681                                           NULL,
682                                           "encap action not supported for "
683                                           "ingress");
684         return 0;
685 }
686
687 /**
688  * Validate the L2 decap action.
689  *
690  * @param[in] action_flags
691  *   Holds the actions detected until now.
692  * @param[in] attr
693  *   Pointer to flow attributes
694  * @param[out] error
695  *   Pointer to error structure.
696  *
697  * @return
698  *   0 on success, a negative errno value otherwise and rte_errno is set.
699  */
700 static int
701 flow_dv_validate_action_l2_decap(uint64_t action_flags,
702                                  const struct rte_flow_attr *attr,
703                                  struct rte_flow_error *error)
704 {
705         if (action_flags & MLX5_FLOW_ACTION_DROP)
706                 return rte_flow_error_set(error, EINVAL,
707                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
708                                           "can't drop and decap in same flow");
709         if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
710                 return rte_flow_error_set(error, EINVAL,
711                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
712                                           "can only have a single encap or"
713                                           " decap action in a flow");
714         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
715                 return rte_flow_error_set(error, EINVAL,
716                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
717                                           "can't have decap action after"
718                                           " modify action");
719         if (attr->egress)
720                 return rte_flow_error_set(error, ENOTSUP,
721                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
722                                           NULL,
723                                           "decap action not supported for "
724                                           "egress");
725         return 0;
726 }
727
728 /**
729  * Validate the raw encap action.
730  *
731  * @param[in] action_flags
732  *   Holds the actions detected until now.
733  * @param[in] action
734  *   Pointer to the encap action.
735  * @param[in] attr
736  *   Pointer to flow attributes
737  * @param[out] error
738  *   Pointer to error structure.
739  *
740  * @return
741  *   0 on success, a negative errno value otherwise and rte_errno is set.
742  */
743 static int
744 flow_dv_validate_action_raw_encap(uint64_t action_flags,
745                                   const struct rte_flow_action *action,
746                                   const struct rte_flow_attr *attr,
747                                   struct rte_flow_error *error)
748 {
749         if (!(action->conf))
750                 return rte_flow_error_set(error, EINVAL,
751                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
752                                           "configuration cannot be null");
753         if (action_flags & MLX5_FLOW_ACTION_DROP)
754                 return rte_flow_error_set(error, EINVAL,
755                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
756                                           "can't drop and encap in same flow");
757         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
758                 return rte_flow_error_set(error, EINVAL,
759                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
760                                           "can only have a single encap"
761                                           " action in a flow");
762         /* encap without preceding decap is not supported for ingress */
763         if (attr->ingress && !(action_flags & MLX5_FLOW_ACTION_RAW_DECAP))
764                 return rte_flow_error_set(error, ENOTSUP,
765                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
766                                           NULL,
767                                           "encap action not supported for "
768                                           "ingress");
769         return 0;
770 }
771
772 /**
773  * Validate the raw decap action.
774  *
775  * @param[in] action_flags
776  *   Holds the actions detected until now.
777  * @param[in] action
778  *   Pointer to the encap action.
779  * @param[in] attr
780  *   Pointer to flow attributes
781  * @param[out] error
782  *   Pointer to error structure.
783  *
784  * @return
785  *   0 on success, a negative errno value otherwise and rte_errno is set.
786  */
787 static int
788 flow_dv_validate_action_raw_decap(uint64_t action_flags,
789                                   const struct rte_flow_action *action,
790                                   const struct rte_flow_attr *attr,
791                                   struct rte_flow_error *error)
792 {
793         if (action_flags & MLX5_FLOW_ACTION_DROP)
794                 return rte_flow_error_set(error, EINVAL,
795                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
796                                           "can't drop and decap in same flow");
797         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
798                 return rte_flow_error_set(error, EINVAL,
799                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
800                                           "can't have encap action before"
801                                           " decap action");
802         if (action_flags & MLX5_FLOW_DECAP_ACTIONS)
803                 return rte_flow_error_set(error, EINVAL,
804                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
805                                           "can only have a single decap"
806                                           " action in a flow");
807         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS)
808                 return rte_flow_error_set(error, EINVAL,
809                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
810                                           "can't have decap action after"
811                                           " modify action");
812         /* decap action is valid on egress only if it is followed by encap */
813         if (attr->egress) {
814                 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
815                        action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
816                        action++) {
817                 }
818                 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
819                         return rte_flow_error_set
820                                         (error, ENOTSUP,
821                                          RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
822                                          NULL, "decap action not supported"
823                                          " for egress");
824         }
825         return 0;
826 }
827
828 /**
829  * Find existing encap/decap resource or create and register a new one.
830  *
831  * @param dev[in, out]
832  *   Pointer to rte_eth_dev structure.
833  * @param[in, out] resource
834  *   Pointer to encap/decap resource.
835  * @parm[in, out] dev_flow
836  *   Pointer to the dev_flow.
837  * @param[out] error
838  *   pointer to error structure.
839  *
840  * @return
841  *   0 on success otherwise -errno and errno is set.
842  */
843 static int
844 flow_dv_encap_decap_resource_register
845                         (struct rte_eth_dev *dev,
846                          struct mlx5_flow_dv_encap_decap_resource *resource,
847                          struct mlx5_flow *dev_flow,
848                          struct rte_flow_error *error)
849 {
850         struct mlx5_priv *priv = dev->data->dev_private;
851         struct mlx5_ibv_shared *sh = priv->sh;
852         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
853         struct rte_flow *flow = dev_flow->flow;
854         struct mlx5dv_dr_ns *ns;
855
856         resource->flags = flow->group ? 0 : 1;
857         if (flow->ingress)
858                 ns = sh->rx_ns;
859         else
860                 ns = sh->tx_ns;
861
862         /* Lookup a matching resource from cache. */
863         LIST_FOREACH(cache_resource, &sh->encaps_decaps, next) {
864                 if (resource->reformat_type == cache_resource->reformat_type &&
865                     resource->ft_type == cache_resource->ft_type &&
866                     resource->flags == cache_resource->flags &&
867                     resource->size == cache_resource->size &&
868                     !memcmp((const void *)resource->buf,
869                             (const void *)cache_resource->buf,
870                             resource->size)) {
871                         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
872                                 (void *)cache_resource,
873                                 rte_atomic32_read(&cache_resource->refcnt));
874                         rte_atomic32_inc(&cache_resource->refcnt);
875                         dev_flow->dv.encap_decap = cache_resource;
876                         return 0;
877                 }
878         }
879         /* Register new encap/decap resource. */
880         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
881         if (!cache_resource)
882                 return rte_flow_error_set(error, ENOMEM,
883                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
884                                           "cannot allocate resource memory");
885         *cache_resource = *resource;
886         cache_resource->verbs_action =
887                 mlx5_glue->dv_create_flow_action_packet_reformat
888                         (sh->ctx, cache_resource->reformat_type,
889                          cache_resource->ft_type, ns, cache_resource->flags,
890                          cache_resource->size,
891                          (cache_resource->size ? cache_resource->buf : NULL));
892         if (!cache_resource->verbs_action) {
893                 rte_free(cache_resource);
894                 return rte_flow_error_set(error, ENOMEM,
895                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
896                                           NULL, "cannot create action");
897         }
898         rte_atomic32_init(&cache_resource->refcnt);
899         rte_atomic32_inc(&cache_resource->refcnt);
900         LIST_INSERT_HEAD(&sh->encaps_decaps, cache_resource, next);
901         dev_flow->dv.encap_decap = cache_resource;
902         DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
903                 (void *)cache_resource,
904                 rte_atomic32_read(&cache_resource->refcnt));
905         return 0;
906 }
907
908 /**
909  * Find existing table jump resource or create and register a new one.
910  *
911  * @param dev[in, out]
912  *   Pointer to rte_eth_dev structure.
913  * @param[in, out] resource
914  *   Pointer to jump table resource.
915  * @parm[in, out] dev_flow
916  *   Pointer to the dev_flow.
917  * @param[out] error
918  *   pointer to error structure.
919  *
920  * @return
921  *   0 on success otherwise -errno and errno is set.
922  */
923 static int
924 flow_dv_jump_tbl_resource_register
925                         (struct rte_eth_dev *dev,
926                          struct mlx5_flow_dv_jump_tbl_resource *resource,
927                          struct mlx5_flow *dev_flow,
928                          struct rte_flow_error *error)
929 {
930         struct mlx5_priv *priv = dev->data->dev_private;
931         struct mlx5_ibv_shared *sh = priv->sh;
932         struct mlx5_flow_dv_jump_tbl_resource *cache_resource;
933
934         /* Lookup a matching resource from cache. */
935         LIST_FOREACH(cache_resource, &sh->jump_tbl, next) {
936                 if (resource->tbl == cache_resource->tbl) {
937                         DRV_LOG(DEBUG, "jump table resource resource %p: refcnt %d++",
938                                 (void *)cache_resource,
939                                 rte_atomic32_read(&cache_resource->refcnt));
940                         rte_atomic32_inc(&cache_resource->refcnt);
941                         dev_flow->dv.jump = cache_resource;
942                         return 0;
943                 }
944         }
945         /* Register new jump table resource. */
946         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
947         if (!cache_resource)
948                 return rte_flow_error_set(error, ENOMEM,
949                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
950                                           "cannot allocate resource memory");
951         *cache_resource = *resource;
952         cache_resource->action =
953                 mlx5_glue->dr_create_flow_action_dest_flow_tbl
954                 (resource->tbl->obj);
955         if (!cache_resource->action) {
956                 rte_free(cache_resource);
957                 return rte_flow_error_set(error, ENOMEM,
958                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
959                                           NULL, "cannot create action");
960         }
961         rte_atomic32_init(&cache_resource->refcnt);
962         rte_atomic32_inc(&cache_resource->refcnt);
963         LIST_INSERT_HEAD(&sh->jump_tbl, cache_resource, next);
964         dev_flow->dv.jump = cache_resource;
965         DRV_LOG(DEBUG, "new jump table  resource %p: refcnt %d++",
966                 (void *)cache_resource,
967                 rte_atomic32_read(&cache_resource->refcnt));
968         return 0;
969 }
970
971 /**
972  * Get the size of specific rte_flow_item_type
973  *
974  * @param[in] item_type
975  *   Tested rte_flow_item_type.
976  *
977  * @return
978  *   sizeof struct item_type, 0 if void or irrelevant.
979  */
980 static size_t
981 flow_dv_get_item_len(const enum rte_flow_item_type item_type)
982 {
983         size_t retval;
984
985         switch (item_type) {
986         case RTE_FLOW_ITEM_TYPE_ETH:
987                 retval = sizeof(struct rte_flow_item_eth);
988                 break;
989         case RTE_FLOW_ITEM_TYPE_VLAN:
990                 retval = sizeof(struct rte_flow_item_vlan);
991                 break;
992         case RTE_FLOW_ITEM_TYPE_IPV4:
993                 retval = sizeof(struct rte_flow_item_ipv4);
994                 break;
995         case RTE_FLOW_ITEM_TYPE_IPV6:
996                 retval = sizeof(struct rte_flow_item_ipv6);
997                 break;
998         case RTE_FLOW_ITEM_TYPE_UDP:
999                 retval = sizeof(struct rte_flow_item_udp);
1000                 break;
1001         case RTE_FLOW_ITEM_TYPE_TCP:
1002                 retval = sizeof(struct rte_flow_item_tcp);
1003                 break;
1004         case RTE_FLOW_ITEM_TYPE_VXLAN:
1005                 retval = sizeof(struct rte_flow_item_vxlan);
1006                 break;
1007         case RTE_FLOW_ITEM_TYPE_GRE:
1008                 retval = sizeof(struct rte_flow_item_gre);
1009                 break;
1010         case RTE_FLOW_ITEM_TYPE_NVGRE:
1011                 retval = sizeof(struct rte_flow_item_nvgre);
1012                 break;
1013         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1014                 retval = sizeof(struct rte_flow_item_vxlan_gpe);
1015                 break;
1016         case RTE_FLOW_ITEM_TYPE_MPLS:
1017                 retval = sizeof(struct rte_flow_item_mpls);
1018                 break;
1019         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
1020         default:
1021                 retval = 0;
1022                 break;
1023         }
1024         return retval;
1025 }
1026
1027 #define MLX5_ENCAP_IPV4_VERSION         0x40
1028 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
1029 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
1030 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
1031 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
1032 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
1033 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
1034
1035 /**
1036  * Convert the encap action data from list of rte_flow_item to raw buffer
1037  *
1038  * @param[in] items
1039  *   Pointer to rte_flow_item objects list.
1040  * @param[out] buf
1041  *   Pointer to the output buffer.
1042  * @param[out] size
1043  *   Pointer to the output buffer size.
1044  * @param[out] error
1045  *   Pointer to the error structure.
1046  *
1047  * @return
1048  *   0 on success, a negative errno value otherwise and rte_errno is set.
1049  */
1050 static int
1051 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
1052                            size_t *size, struct rte_flow_error *error)
1053 {
1054         struct ether_hdr *eth = NULL;
1055         struct vlan_hdr *vlan = NULL;
1056         struct ipv4_hdr *ipv4 = NULL;
1057         struct ipv6_hdr *ipv6 = NULL;
1058         struct udp_hdr *udp = NULL;
1059         struct vxlan_hdr *vxlan = NULL;
1060         struct vxlan_gpe_hdr *vxlan_gpe = NULL;
1061         struct gre_hdr *gre = NULL;
1062         size_t len;
1063         size_t temp_size = 0;
1064
1065         if (!items)
1066                 return rte_flow_error_set(error, EINVAL,
1067                                           RTE_FLOW_ERROR_TYPE_ACTION,
1068                                           NULL, "invalid empty data");
1069         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1070                 len = flow_dv_get_item_len(items->type);
1071                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
1072                         return rte_flow_error_set(error, EINVAL,
1073                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1074                                                   (void *)items->type,
1075                                                   "items total size is too big"
1076                                                   " for encap action");
1077                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
1078                 switch (items->type) {
1079                 case RTE_FLOW_ITEM_TYPE_ETH:
1080                         eth = (struct ether_hdr *)&buf[temp_size];
1081                         break;
1082                 case RTE_FLOW_ITEM_TYPE_VLAN:
1083                         vlan = (struct vlan_hdr *)&buf[temp_size];
1084                         if (!eth)
1085                                 return rte_flow_error_set(error, EINVAL,
1086                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1087                                                 (void *)items->type,
1088                                                 "eth header not found");
1089                         if (!eth->ether_type)
1090                                 eth->ether_type = RTE_BE16(ETHER_TYPE_VLAN);
1091                         break;
1092                 case RTE_FLOW_ITEM_TYPE_IPV4:
1093                         ipv4 = (struct ipv4_hdr *)&buf[temp_size];
1094                         if (!vlan && !eth)
1095                                 return rte_flow_error_set(error, EINVAL,
1096                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1097                                                 (void *)items->type,
1098                                                 "neither eth nor vlan"
1099                                                 " header found");
1100                         if (vlan && !vlan->eth_proto)
1101                                 vlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv4);
1102                         else if (eth && !eth->ether_type)
1103                                 eth->ether_type = RTE_BE16(ETHER_TYPE_IPv4);
1104                         if (!ipv4->version_ihl)
1105                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
1106                                                     MLX5_ENCAP_IPV4_IHL_MIN;
1107                         if (!ipv4->time_to_live)
1108                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
1109                         break;
1110                 case RTE_FLOW_ITEM_TYPE_IPV6:
1111                         ipv6 = (struct ipv6_hdr *)&buf[temp_size];
1112                         if (!vlan && !eth)
1113                                 return rte_flow_error_set(error, EINVAL,
1114                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1115                                                 (void *)items->type,
1116                                                 "neither eth nor vlan"
1117                                                 " header found");
1118                         if (vlan && !vlan->eth_proto)
1119                                 vlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv6);
1120                         else if (eth && !eth->ether_type)
1121                                 eth->ether_type = RTE_BE16(ETHER_TYPE_IPv6);
1122                         if (!ipv6->vtc_flow)
1123                                 ipv6->vtc_flow =
1124                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
1125                         if (!ipv6->hop_limits)
1126                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
1127                         break;
1128                 case RTE_FLOW_ITEM_TYPE_UDP:
1129                         udp = (struct udp_hdr *)&buf[temp_size];
1130                         if (!ipv4 && !ipv6)
1131                                 return rte_flow_error_set(error, EINVAL,
1132                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1133                                                 (void *)items->type,
1134                                                 "ip header not found");
1135                         if (ipv4 && !ipv4->next_proto_id)
1136                                 ipv4->next_proto_id = IPPROTO_UDP;
1137                         else if (ipv6 && !ipv6->proto)
1138                                 ipv6->proto = IPPROTO_UDP;
1139                         break;
1140                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1141                         vxlan = (struct vxlan_hdr *)&buf[temp_size];
1142                         if (!udp)
1143                                 return rte_flow_error_set(error, EINVAL,
1144                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1145                                                 (void *)items->type,
1146                                                 "udp header not found");
1147                         if (!udp->dst_port)
1148                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
1149                         if (!vxlan->vx_flags)
1150                                 vxlan->vx_flags =
1151                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
1152                         break;
1153                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1154                         vxlan_gpe = (struct vxlan_gpe_hdr *)&buf[temp_size];
1155                         if (!udp)
1156                                 return rte_flow_error_set(error, EINVAL,
1157                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1158                                                 (void *)items->type,
1159                                                 "udp header not found");
1160                         if (!vxlan_gpe->proto)
1161                                 return rte_flow_error_set(error, EINVAL,
1162                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1163                                                 (void *)items->type,
1164                                                 "next protocol not found");
1165                         if (!udp->dst_port)
1166                                 udp->dst_port =
1167                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
1168                         if (!vxlan_gpe->vx_flags)
1169                                 vxlan_gpe->vx_flags =
1170                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
1171                         break;
1172                 case RTE_FLOW_ITEM_TYPE_GRE:
1173                 case RTE_FLOW_ITEM_TYPE_NVGRE:
1174                         gre = (struct gre_hdr *)&buf[temp_size];
1175                         if (!gre->proto)
1176                                 return rte_flow_error_set(error, EINVAL,
1177                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1178                                                 (void *)items->type,
1179                                                 "next protocol not found");
1180                         if (!ipv4 && !ipv6)
1181                                 return rte_flow_error_set(error, EINVAL,
1182                                                 RTE_FLOW_ERROR_TYPE_ACTION,
1183                                                 (void *)items->type,
1184                                                 "ip header not found");
1185                         if (ipv4 && !ipv4->next_proto_id)
1186                                 ipv4->next_proto_id = IPPROTO_GRE;
1187                         else if (ipv6 && !ipv6->proto)
1188                                 ipv6->proto = IPPROTO_GRE;
1189                         break;
1190                 case RTE_FLOW_ITEM_TYPE_VOID:
1191                         break;
1192                 default:
1193                         return rte_flow_error_set(error, EINVAL,
1194                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1195                                                   (void *)items->type,
1196                                                   "unsupported item type");
1197                         break;
1198                 }
1199                 temp_size += len;
1200         }
1201         *size = temp_size;
1202         return 0;
1203 }
1204
1205 /**
1206  * Convert L2 encap action to DV specification.
1207  *
1208  * @param[in] dev
1209  *   Pointer to rte_eth_dev structure.
1210  * @param[in] action
1211  *   Pointer to action structure.
1212  * @param[in, out] dev_flow
1213  *   Pointer to the mlx5_flow.
1214  * @param[out] error
1215  *   Pointer to the error structure.
1216  *
1217  * @return
1218  *   0 on success, a negative errno value otherwise and rte_errno is set.
1219  */
1220 static int
1221 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
1222                                const struct rte_flow_action *action,
1223                                struct mlx5_flow *dev_flow,
1224                                struct rte_flow_error *error)
1225 {
1226         const struct rte_flow_item *encap_data;
1227         const struct rte_flow_action_raw_encap *raw_encap_data;
1228         struct mlx5_flow_dv_encap_decap_resource res = {
1229                 .reformat_type =
1230                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
1231                 .ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
1232         };
1233
1234         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
1235                 raw_encap_data =
1236                         (const struct rte_flow_action_raw_encap *)action->conf;
1237                 res.size = raw_encap_data->size;
1238                 memcpy(res.buf, raw_encap_data->data, res.size);
1239         } else {
1240                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
1241                         encap_data =
1242                                 ((const struct rte_flow_action_vxlan_encap *)
1243                                                 action->conf)->definition;
1244                 else
1245                         encap_data =
1246                                 ((const struct rte_flow_action_nvgre_encap *)
1247                                                 action->conf)->definition;
1248                 if (flow_dv_convert_encap_data(encap_data, res.buf,
1249                                                &res.size, error))
1250                         return -rte_errno;
1251         }
1252         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1253                 return rte_flow_error_set(error, EINVAL,
1254                                           RTE_FLOW_ERROR_TYPE_ACTION,
1255                                           NULL, "can't create L2 encap action");
1256         return 0;
1257 }
1258
1259 /**
1260  * Convert L2 decap action to DV specification.
1261  *
1262  * @param[in] dev
1263  *   Pointer to rte_eth_dev structure.
1264  * @param[in, out] dev_flow
1265  *   Pointer to the mlx5_flow.
1266  * @param[out] error
1267  *   Pointer to the error structure.
1268  *
1269  * @return
1270  *   0 on success, a negative errno value otherwise and rte_errno is set.
1271  */
1272 static int
1273 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
1274                                struct mlx5_flow *dev_flow,
1275                                struct rte_flow_error *error)
1276 {
1277         struct mlx5_flow_dv_encap_decap_resource res = {
1278                 .size = 0,
1279                 .reformat_type =
1280                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
1281                 .ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
1282         };
1283
1284         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1285                 return rte_flow_error_set(error, EINVAL,
1286                                           RTE_FLOW_ERROR_TYPE_ACTION,
1287                                           NULL, "can't create L2 decap action");
1288         return 0;
1289 }
1290
1291 /**
1292  * Convert raw decap/encap (L3 tunnel) action to DV specification.
1293  *
1294  * @param[in] dev
1295  *   Pointer to rte_eth_dev structure.
1296  * @param[in] action
1297  *   Pointer to action structure.
1298  * @param[in, out] dev_flow
1299  *   Pointer to the mlx5_flow.
1300  * @param[in] attr
1301  *   Pointer to the flow attributes.
1302  * @param[out] error
1303  *   Pointer to the error structure.
1304  *
1305  * @return
1306  *   0 on success, a negative errno value otherwise and rte_errno is set.
1307  */
1308 static int
1309 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
1310                                 const struct rte_flow_action *action,
1311                                 struct mlx5_flow *dev_flow,
1312                                 const struct rte_flow_attr *attr,
1313                                 struct rte_flow_error *error)
1314 {
1315         const struct rte_flow_action_raw_encap *encap_data;
1316         struct mlx5_flow_dv_encap_decap_resource res;
1317
1318         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
1319         res.size = encap_data->size;
1320         memcpy(res.buf, encap_data->data, res.size);
1321         res.reformat_type = attr->egress ?
1322                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL :
1323                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
1324         res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
1325                                      MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
1326         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
1327                 return rte_flow_error_set(error, EINVAL,
1328                                           RTE_FLOW_ERROR_TYPE_ACTION,
1329                                           NULL, "can't create encap action");
1330         return 0;
1331 }
1332
1333 /**
1334  * Validate the modify-header actions.
1335  *
1336  * @param[in] action_flags
1337  *   Holds the actions detected until now.
1338  * @param[in] action
1339  *   Pointer to the modify action.
1340  * @param[out] error
1341  *   Pointer to error structure.
1342  *
1343  * @return
1344  *   0 on success, a negative errno value otherwise and rte_errno is set.
1345  */
1346 static int
1347 flow_dv_validate_action_modify_hdr(const uint64_t action_flags,
1348                                    const struct rte_flow_action *action,
1349                                    struct rte_flow_error *error)
1350 {
1351         if (action->type != RTE_FLOW_ACTION_TYPE_DEC_TTL && !action->conf)
1352                 return rte_flow_error_set(error, EINVAL,
1353                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1354                                           NULL, "action configuration not set");
1355         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
1356                 return rte_flow_error_set(error, EINVAL,
1357                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1358                                           "can't have encap action before"
1359                                           " modify action");
1360         return 0;
1361 }
1362
1363 /**
1364  * Validate the modify-header MAC address actions.
1365  *
1366  * @param[in] action_flags
1367  *   Holds the actions detected until now.
1368  * @param[in] action
1369  *   Pointer to the modify action.
1370  * @param[in] item_flags
1371  *   Holds the items detected.
1372  * @param[out] error
1373  *   Pointer to error structure.
1374  *
1375  * @return
1376  *   0 on success, a negative errno value otherwise and rte_errno is set.
1377  */
1378 static int
1379 flow_dv_validate_action_modify_mac(const uint64_t action_flags,
1380                                    const struct rte_flow_action *action,
1381                                    const uint64_t item_flags,
1382                                    struct rte_flow_error *error)
1383 {
1384         int ret = 0;
1385
1386         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1387         if (!ret) {
1388                 if (!(item_flags & MLX5_FLOW_LAYER_L2))
1389                         return rte_flow_error_set(error, EINVAL,
1390                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1391                                                   NULL,
1392                                                   "no L2 item in pattern");
1393         }
1394         return ret;
1395 }
1396
1397 /**
1398  * Validate the modify-header IPv4 address actions.
1399  *
1400  * @param[in] action_flags
1401  *   Holds the actions detected until now.
1402  * @param[in] action
1403  *   Pointer to the modify action.
1404  * @param[in] item_flags
1405  *   Holds the items detected.
1406  * @param[out] error
1407  *   Pointer to error structure.
1408  *
1409  * @return
1410  *   0 on success, a negative errno value otherwise and rte_errno is set.
1411  */
1412 static int
1413 flow_dv_validate_action_modify_ipv4(const uint64_t action_flags,
1414                                     const struct rte_flow_action *action,
1415                                     const uint64_t item_flags,
1416                                     struct rte_flow_error *error)
1417 {
1418         int ret = 0;
1419
1420         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1421         if (!ret) {
1422                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV4))
1423                         return rte_flow_error_set(error, EINVAL,
1424                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1425                                                   NULL,
1426                                                   "no ipv4 item in pattern");
1427         }
1428         return ret;
1429 }
1430
1431 /**
1432  * Validate the modify-header IPv6 address actions.
1433  *
1434  * @param[in] action_flags
1435  *   Holds the actions detected until now.
1436  * @param[in] action
1437  *   Pointer to the modify action.
1438  * @param[in] item_flags
1439  *   Holds the items detected.
1440  * @param[out] error
1441  *   Pointer to error structure.
1442  *
1443  * @return
1444  *   0 on success, a negative errno value otherwise and rte_errno is set.
1445  */
1446 static int
1447 flow_dv_validate_action_modify_ipv6(const uint64_t action_flags,
1448                                     const struct rte_flow_action *action,
1449                                     const uint64_t item_flags,
1450                                     struct rte_flow_error *error)
1451 {
1452         int ret = 0;
1453
1454         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1455         if (!ret) {
1456                 if (!(item_flags & MLX5_FLOW_LAYER_L3_IPV6))
1457                         return rte_flow_error_set(error, EINVAL,
1458                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1459                                                   NULL,
1460                                                   "no ipv6 item in pattern");
1461         }
1462         return ret;
1463 }
1464
1465 /**
1466  * Validate the modify-header TP actions.
1467  *
1468  * @param[in] action_flags
1469  *   Holds the actions detected until now.
1470  * @param[in] action
1471  *   Pointer to the modify action.
1472  * @param[in] item_flags
1473  *   Holds the items detected.
1474  * @param[out] error
1475  *   Pointer to error structure.
1476  *
1477  * @return
1478  *   0 on success, a negative errno value otherwise and rte_errno is set.
1479  */
1480 static int
1481 flow_dv_validate_action_modify_tp(const uint64_t action_flags,
1482                                   const struct rte_flow_action *action,
1483                                   const uint64_t item_flags,
1484                                   struct rte_flow_error *error)
1485 {
1486         int ret = 0;
1487
1488         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1489         if (!ret) {
1490                 if (!(item_flags & MLX5_FLOW_LAYER_L4))
1491                         return rte_flow_error_set(error, EINVAL,
1492                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1493                                                   NULL, "no transport layer "
1494                                                   "in pattern");
1495         }
1496         return ret;
1497 }
1498
1499 /**
1500  * Validate the modify-header TTL actions.
1501  *
1502  * @param[in] action_flags
1503  *   Holds the actions detected until now.
1504  * @param[in] action
1505  *   Pointer to the modify action.
1506  * @param[in] item_flags
1507  *   Holds the items detected.
1508  * @param[out] error
1509  *   Pointer to error structure.
1510  *
1511  * @return
1512  *   0 on success, a negative errno value otherwise and rte_errno is set.
1513  */
1514 static int
1515 flow_dv_validate_action_modify_ttl(const uint64_t action_flags,
1516                                    const struct rte_flow_action *action,
1517                                    const uint64_t item_flags,
1518                                    struct rte_flow_error *error)
1519 {
1520         int ret = 0;
1521
1522         ret = flow_dv_validate_action_modify_hdr(action_flags, action, error);
1523         if (!ret) {
1524                 if (!(item_flags & MLX5_FLOW_LAYER_L3))
1525                         return rte_flow_error_set(error, EINVAL,
1526                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1527                                                   NULL,
1528                                                   "no IP protocol in pattern");
1529         }
1530         return ret;
1531 }
1532
1533 /**
1534  * Validate jump action.
1535  *
1536  * @param[in] action
1537  *   Pointer to the modify action.
1538  * @param[in] group
1539  *   The group of the current flow.
1540  * @param[out] error
1541  *   Pointer to error structure.
1542  *
1543  * @return
1544  *   0 on success, a negative errno value otherwise and rte_errno is set.
1545  */
1546 static int
1547 flow_dv_validate_action_jump(const struct rte_flow_action *action,
1548                              uint32_t group,
1549                              struct rte_flow_error *error)
1550 {
1551         if (action->type != RTE_FLOW_ACTION_TYPE_JUMP && !action->conf)
1552                 return rte_flow_error_set(error, EINVAL,
1553                                           RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1554                                           NULL, "action configuration not set");
1555         if (group >= ((const struct rte_flow_action_jump *)action->conf)->group)
1556                 return rte_flow_error_set(error, EINVAL,
1557                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
1558                                           "target group must be higher then"
1559                                           " the current flow group");
1560         return 0;
1561 }
1562
1563
1564 /**
1565  * Find existing modify-header resource or create and register a new one.
1566  *
1567  * @param dev[in, out]
1568  *   Pointer to rte_eth_dev structure.
1569  * @param[in, out] resource
1570  *   Pointer to modify-header resource.
1571  * @parm[in, out] dev_flow
1572  *   Pointer to the dev_flow.
1573  * @param[out] error
1574  *   pointer to error structure.
1575  *
1576  * @return
1577  *   0 on success otherwise -errno and errno is set.
1578  */
1579 static int
1580 flow_dv_modify_hdr_resource_register
1581                         (struct rte_eth_dev *dev,
1582                          struct mlx5_flow_dv_modify_hdr_resource *resource,
1583                          struct mlx5_flow *dev_flow,
1584                          struct rte_flow_error *error)
1585 {
1586         struct mlx5_priv *priv = dev->data->dev_private;
1587         struct mlx5_ibv_shared *sh = priv->sh;
1588         struct mlx5_flow_dv_modify_hdr_resource *cache_resource;
1589
1590         struct mlx5dv_dr_ns *ns =
1591                 resource->ft_type == MLX5DV_FLOW_TABLE_TYPE_NIC_TX  ?
1592                 sh->tx_ns : sh->rx_ns;
1593
1594         /* Lookup a matching resource from cache. */
1595         LIST_FOREACH(cache_resource, &sh->modify_cmds, next) {
1596                 if (resource->ft_type == cache_resource->ft_type &&
1597                     resource->actions_num == cache_resource->actions_num &&
1598                     !memcmp((const void *)resource->actions,
1599                             (const void *)cache_resource->actions,
1600                             (resource->actions_num *
1601                                             sizeof(resource->actions[0])))) {
1602                         DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d++",
1603                                 (void *)cache_resource,
1604                                 rte_atomic32_read(&cache_resource->refcnt));
1605                         rte_atomic32_inc(&cache_resource->refcnt);
1606                         dev_flow->dv.modify_hdr = cache_resource;
1607                         return 0;
1608                 }
1609         }
1610         /* Register new modify-header resource. */
1611         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
1612         if (!cache_resource)
1613                 return rte_flow_error_set(error, ENOMEM,
1614                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1615                                           "cannot allocate resource memory");
1616         *cache_resource = *resource;
1617         cache_resource->verbs_action =
1618                 mlx5_glue->dv_create_flow_action_modify_header
1619                                         (sh->ctx, cache_resource->ft_type,
1620                                          ns, 0,
1621                                          cache_resource->actions_num *
1622                                          sizeof(cache_resource->actions[0]),
1623                                          (uint64_t *)cache_resource->actions);
1624         if (!cache_resource->verbs_action) {
1625                 rte_free(cache_resource);
1626                 return rte_flow_error_set(error, ENOMEM,
1627                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1628                                           NULL, "cannot create action");
1629         }
1630         rte_atomic32_init(&cache_resource->refcnt);
1631         rte_atomic32_inc(&cache_resource->refcnt);
1632         LIST_INSERT_HEAD(&sh->modify_cmds, cache_resource, next);
1633         dev_flow->dv.modify_hdr = cache_resource;
1634         DRV_LOG(DEBUG, "new modify-header resource %p: refcnt %d++",
1635                 (void *)cache_resource,
1636                 rte_atomic32_read(&cache_resource->refcnt));
1637         return 0;
1638 }
1639
1640 /**
1641  * Get or create a flow counter.
1642  *
1643  * @param[in] dev
1644  *   Pointer to the Ethernet device structure.
1645  * @param[in] shared
1646  *   Indicate if this counter is shared with other flows.
1647  * @param[in] id
1648  *   Counter identifier.
1649  *
1650  * @return
1651  *   pointer to flow counter on success, NULL otherwise and rte_errno is set.
1652  */
1653 static struct mlx5_flow_counter *
1654 flow_dv_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
1655 {
1656         struct mlx5_priv *priv = dev->data->dev_private;
1657         struct mlx5_flow_counter *cnt = NULL;
1658         struct mlx5_devx_counter_set *dcs = NULL;
1659         int ret;
1660
1661         if (!priv->config.devx) {
1662                 ret = -ENOTSUP;
1663                 goto error_exit;
1664         }
1665         if (shared) {
1666                 LIST_FOREACH(cnt, &priv->flow_counters, next) {
1667                         if (cnt->shared && cnt->id == id) {
1668                                 cnt->ref_cnt++;
1669                                 return cnt;
1670                         }
1671                 }
1672         }
1673         cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
1674         dcs = rte_calloc(__func__, 1, sizeof(*dcs), 0);
1675         if (!dcs || !cnt) {
1676                 ret = -ENOMEM;
1677                 goto error_exit;
1678         }
1679         ret = mlx5_devx_cmd_flow_counter_alloc(priv->sh->ctx, dcs);
1680         if (ret)
1681                 goto error_exit;
1682         struct mlx5_flow_counter tmpl = {
1683                 .shared = shared,
1684                 .ref_cnt = 1,
1685                 .id = id,
1686                 .dcs = dcs,
1687         };
1688         tmpl.action = mlx5_glue->dv_create_flow_action_counter(dcs->obj, 0);
1689         if (!tmpl.action) {
1690                 ret = errno;
1691                 goto error_exit;
1692         }
1693         *cnt = tmpl;
1694         LIST_INSERT_HEAD(&priv->flow_counters, cnt, next);
1695         return cnt;
1696 error_exit:
1697         rte_free(cnt);
1698         rte_free(dcs);
1699         rte_errno = -ret;
1700         return NULL;
1701 }
1702
1703 /**
1704  * Release a flow counter.
1705  *
1706  * @param[in] counter
1707  *   Pointer to the counter handler.
1708  */
1709 static void
1710 flow_dv_counter_release(struct mlx5_flow_counter *counter)
1711 {
1712         int ret;
1713
1714         if (!counter)
1715                 return;
1716         if (--counter->ref_cnt == 0) {
1717                 ret = mlx5_devx_cmd_flow_counter_free(counter->dcs->obj);
1718                 if (ret)
1719                         DRV_LOG(ERR, "Failed to free devx counters, %d", ret);
1720                 LIST_REMOVE(counter, next);
1721                 rte_free(counter->dcs);
1722                 rte_free(counter);
1723         }
1724 }
1725
1726 /**
1727  * Verify the @p attributes will be correctly understood by the NIC and store
1728  * them in the @p flow if everything is correct.
1729  *
1730  * @param[in] dev
1731  *   Pointer to dev struct.
1732  * @param[in] attributes
1733  *   Pointer to flow attributes
1734  * @param[out] error
1735  *   Pointer to error structure.
1736  *
1737  * @return
1738  *   0 on success, a negative errno value otherwise and rte_errno is set.
1739  */
1740 static int
1741 flow_dv_validate_attributes(struct rte_eth_dev *dev,
1742                             const struct rte_flow_attr *attributes,
1743                             struct rte_flow_error *error)
1744 {
1745         struct mlx5_priv *priv = dev->data->dev_private;
1746         uint32_t priority_max = priv->config.flow_prio - 1;
1747
1748 #ifndef HAVE_MLX5DV_DR
1749         if (attributes->group)
1750                 return rte_flow_error_set(error, ENOTSUP,
1751                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1752                                           NULL,
1753                                           "groups is not supported");
1754 #endif
1755         if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
1756             attributes->priority >= priority_max)
1757                 return rte_flow_error_set(error, ENOTSUP,
1758                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1759                                           NULL,
1760                                           "priority out of range");
1761         if (attributes->transfer)
1762                 return rte_flow_error_set(error, ENOTSUP,
1763                                           RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1764                                           NULL,
1765                                           "transfer is not supported");
1766         if (!(attributes->egress ^ attributes->ingress))
1767                 return rte_flow_error_set(error, ENOTSUP,
1768                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
1769                                           "must specify exactly one of "
1770                                           "ingress or egress");
1771         return 0;
1772 }
1773
1774 /**
1775  * Internal validation function. For validating both actions and items.
1776  *
1777  * @param[in] dev
1778  *   Pointer to the rte_eth_dev structure.
1779  * @param[in] attr
1780  *   Pointer to the flow attributes.
1781  * @param[in] items
1782  *   Pointer to the list of items.
1783  * @param[in] actions
1784  *   Pointer to the list of actions.
1785  * @param[out] error
1786  *   Pointer to the error structure.
1787  *
1788  * @return
1789  *   0 on success, a negative errno value otherwise and rte_errno is set.
1790  */
1791 static int
1792 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
1793                  const struct rte_flow_item items[],
1794                  const struct rte_flow_action actions[],
1795                  struct rte_flow_error *error)
1796 {
1797         int ret;
1798         uint64_t action_flags = 0;
1799         uint64_t item_flags = 0;
1800         uint64_t last_item = 0;
1801         uint8_t next_protocol = 0xff;
1802         int actions_n = 0;
1803
1804         if (items == NULL)
1805                 return -1;
1806         ret = flow_dv_validate_attributes(dev, attr, error);
1807         if (ret < 0)
1808                 return ret;
1809         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1810                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1811                 switch (items->type) {
1812                 case RTE_FLOW_ITEM_TYPE_VOID:
1813                         break;
1814                 case RTE_FLOW_ITEM_TYPE_ETH:
1815                         ret = mlx5_flow_validate_item_eth(items, item_flags,
1816                                                           error);
1817                         if (ret < 0)
1818                                 return ret;
1819                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1820                                              MLX5_FLOW_LAYER_OUTER_L2;
1821                         break;
1822                 case RTE_FLOW_ITEM_TYPE_VLAN:
1823                         ret = mlx5_flow_validate_item_vlan(items, item_flags,
1824                                                            error);
1825                         if (ret < 0)
1826                                 return ret;
1827                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
1828                                              MLX5_FLOW_LAYER_OUTER_VLAN;
1829                         break;
1830                 case RTE_FLOW_ITEM_TYPE_IPV4:
1831                         ret = mlx5_flow_validate_item_ipv4(items, item_flags,
1832                                                            NULL, error);
1833                         if (ret < 0)
1834                                 return ret;
1835                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1836                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1837                         if (items->mask != NULL &&
1838                             ((const struct rte_flow_item_ipv4 *)
1839                              items->mask)->hdr.next_proto_id) {
1840                                 next_protocol =
1841                                         ((const struct rte_flow_item_ipv4 *)
1842                                          (items->spec))->hdr.next_proto_id;
1843                                 next_protocol &=
1844                                         ((const struct rte_flow_item_ipv4 *)
1845                                          (items->mask))->hdr.next_proto_id;
1846                         } else {
1847                                 /* Reset for inner layer. */
1848                                 next_protocol = 0xff;
1849                         }
1850                         break;
1851                 case RTE_FLOW_ITEM_TYPE_IPV6:
1852                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
1853                                                            NULL, error);
1854                         if (ret < 0)
1855                                 return ret;
1856                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1857                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1858                         if (items->mask != NULL &&
1859                             ((const struct rte_flow_item_ipv6 *)
1860                              items->mask)->hdr.proto) {
1861                                 next_protocol =
1862                                         ((const struct rte_flow_item_ipv6 *)
1863                                          items->spec)->hdr.proto;
1864                                 next_protocol &=
1865                                         ((const struct rte_flow_item_ipv6 *)
1866                                          items->mask)->hdr.proto;
1867                         } else {
1868                                 /* Reset for inner layer. */
1869                                 next_protocol = 0xff;
1870                         }
1871                         break;
1872                 case RTE_FLOW_ITEM_TYPE_TCP:
1873                         ret = mlx5_flow_validate_item_tcp
1874                                                 (items, item_flags,
1875                                                  next_protocol,
1876                                                  &rte_flow_item_tcp_mask,
1877                                                  error);
1878                         if (ret < 0)
1879                                 return ret;
1880                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1881                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
1882                         break;
1883                 case RTE_FLOW_ITEM_TYPE_UDP:
1884                         ret = mlx5_flow_validate_item_udp(items, item_flags,
1885                                                           next_protocol,
1886                                                           error);
1887                         if (ret < 0)
1888                                 return ret;
1889                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1890                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
1891                         break;
1892                 case RTE_FLOW_ITEM_TYPE_GRE:
1893                 case RTE_FLOW_ITEM_TYPE_NVGRE:
1894                         ret = mlx5_flow_validate_item_gre(items, item_flags,
1895                                                           next_protocol, error);
1896                         if (ret < 0)
1897                                 return ret;
1898                         last_item = MLX5_FLOW_LAYER_GRE;
1899                         break;
1900                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1901                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
1902                                                             error);
1903                         if (ret < 0)
1904                                 return ret;
1905                         last_item = MLX5_FLOW_LAYER_VXLAN;
1906                         break;
1907                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1908                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
1909                                                                 item_flags, dev,
1910                                                                 error);
1911                         if (ret < 0)
1912                                 return ret;
1913                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
1914                         break;
1915                 case RTE_FLOW_ITEM_TYPE_MPLS:
1916                         ret = mlx5_flow_validate_item_mpls(dev, items,
1917                                                            item_flags,
1918                                                            last_item, error);
1919                         if (ret < 0)
1920                                 return ret;
1921                         last_item = MLX5_FLOW_LAYER_MPLS;
1922                         break;
1923                 case RTE_FLOW_ITEM_TYPE_META:
1924                         ret = flow_dv_validate_item_meta(dev, items, attr,
1925                                                          error);
1926                         if (ret < 0)
1927                                 return ret;
1928                         last_item = MLX5_FLOW_ITEM_METADATA;
1929                         break;
1930                 default:
1931                         return rte_flow_error_set(error, ENOTSUP,
1932                                                   RTE_FLOW_ERROR_TYPE_ITEM,
1933                                                   NULL, "item not supported");
1934                 }
1935                 item_flags |= last_item;
1936         }
1937         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1938                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
1939                         return rte_flow_error_set(error, ENOTSUP,
1940                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1941                                                   actions, "too many actions");
1942                 switch (actions->type) {
1943                 case RTE_FLOW_ACTION_TYPE_VOID:
1944                         break;
1945                 case RTE_FLOW_ACTION_TYPE_FLAG:
1946                         ret = mlx5_flow_validate_action_flag(action_flags,
1947                                                              attr, error);
1948                         if (ret < 0)
1949                                 return ret;
1950                         action_flags |= MLX5_FLOW_ACTION_FLAG;
1951                         ++actions_n;
1952                         break;
1953                 case RTE_FLOW_ACTION_TYPE_MARK:
1954                         ret = mlx5_flow_validate_action_mark(actions,
1955                                                              action_flags,
1956                                                              attr, error);
1957                         if (ret < 0)
1958                                 return ret;
1959                         action_flags |= MLX5_FLOW_ACTION_MARK;
1960                         ++actions_n;
1961                         break;
1962                 case RTE_FLOW_ACTION_TYPE_DROP:
1963                         ret = mlx5_flow_validate_action_drop(action_flags,
1964                                                              attr, error);
1965                         if (ret < 0)
1966                                 return ret;
1967                         action_flags |= MLX5_FLOW_ACTION_DROP;
1968                         ++actions_n;
1969                         break;
1970                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1971                         ret = mlx5_flow_validate_action_queue(actions,
1972                                                               action_flags, dev,
1973                                                               attr, error);
1974                         if (ret < 0)
1975                                 return ret;
1976                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
1977                         ++actions_n;
1978                         break;
1979                 case RTE_FLOW_ACTION_TYPE_RSS:
1980                         ret = mlx5_flow_validate_action_rss(actions,
1981                                                             action_flags, dev,
1982                                                             attr, item_flags,
1983                                                             error);
1984                         if (ret < 0)
1985                                 return ret;
1986                         action_flags |= MLX5_FLOW_ACTION_RSS;
1987                         ++actions_n;
1988                         break;
1989                 case RTE_FLOW_ACTION_TYPE_COUNT:
1990                         ret = flow_dv_validate_action_count(dev, error);
1991                         if (ret < 0)
1992                                 return ret;
1993                         action_flags |= MLX5_FLOW_ACTION_COUNT;
1994                         ++actions_n;
1995                         break;
1996                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
1997                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
1998                         ret = flow_dv_validate_action_l2_encap(action_flags,
1999                                                                actions, attr,
2000                                                                error);
2001                         if (ret < 0)
2002                                 return ret;
2003                         action_flags |= actions->type ==
2004                                         RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
2005                                         MLX5_FLOW_ACTION_VXLAN_ENCAP :
2006                                         MLX5_FLOW_ACTION_NVGRE_ENCAP;
2007                         ++actions_n;
2008                         break;
2009                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
2010                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
2011                         ret = flow_dv_validate_action_l2_decap(action_flags,
2012                                                                attr, error);
2013                         if (ret < 0)
2014                                 return ret;
2015                         action_flags |= actions->type ==
2016                                         RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
2017                                         MLX5_FLOW_ACTION_VXLAN_DECAP :
2018                                         MLX5_FLOW_ACTION_NVGRE_DECAP;
2019                         ++actions_n;
2020                         break;
2021                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
2022                         ret = flow_dv_validate_action_raw_encap(action_flags,
2023                                                                 actions, attr,
2024                                                                 error);
2025                         if (ret < 0)
2026                                 return ret;
2027                         action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
2028                         ++actions_n;
2029                         break;
2030                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
2031                         ret = flow_dv_validate_action_raw_decap(action_flags,
2032                                                                 actions, attr,
2033                                                                 error);
2034                         if (ret < 0)
2035                                 return ret;
2036                         action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
2037                         ++actions_n;
2038                         break;
2039                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
2040                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
2041                         ret = flow_dv_validate_action_modify_mac(action_flags,
2042                                                                  actions,
2043                                                                  item_flags,
2044                                                                  error);
2045                         if (ret < 0)
2046                                 return ret;
2047                         /* Count all modify-header actions as one action. */
2048                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2049                                 ++actions_n;
2050                         action_flags |= actions->type ==
2051                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
2052                                                 MLX5_FLOW_ACTION_SET_MAC_SRC :
2053                                                 MLX5_FLOW_ACTION_SET_MAC_DST;
2054                         break;
2055
2056                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
2057                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
2058                         ret = flow_dv_validate_action_modify_ipv4(action_flags,
2059                                                                   actions,
2060                                                                   item_flags,
2061                                                                   error);
2062                         if (ret < 0)
2063                                 return ret;
2064                         /* Count all modify-header actions as one action. */
2065                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2066                                 ++actions_n;
2067                         action_flags |= actions->type ==
2068                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
2069                                                 MLX5_FLOW_ACTION_SET_IPV4_SRC :
2070                                                 MLX5_FLOW_ACTION_SET_IPV4_DST;
2071                         break;
2072                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
2073                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
2074                         ret = flow_dv_validate_action_modify_ipv6(action_flags,
2075                                                                   actions,
2076                                                                   item_flags,
2077                                                                   error);
2078                         if (ret < 0)
2079                                 return ret;
2080                         /* Count all modify-header actions as one action. */
2081                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2082                                 ++actions_n;
2083                         action_flags |= actions->type ==
2084                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
2085                                                 MLX5_FLOW_ACTION_SET_IPV6_SRC :
2086                                                 MLX5_FLOW_ACTION_SET_IPV6_DST;
2087                         break;
2088                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
2089                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
2090                         ret = flow_dv_validate_action_modify_tp(action_flags,
2091                                                                 actions,
2092                                                                 item_flags,
2093                                                                 error);
2094                         if (ret < 0)
2095                                 return ret;
2096                         /* Count all modify-header actions as one action. */
2097                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2098                                 ++actions_n;
2099                         action_flags |= actions->type ==
2100                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
2101                                                 MLX5_FLOW_ACTION_SET_TP_SRC :
2102                                                 MLX5_FLOW_ACTION_SET_TP_DST;
2103                         break;
2104                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
2105                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
2106                         ret = flow_dv_validate_action_modify_ttl(action_flags,
2107                                                                  actions,
2108                                                                  item_flags,
2109                                                                  error);
2110                         if (ret < 0)
2111                                 return ret;
2112                         /* Count all modify-header actions as one action. */
2113                         if (!(action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS))
2114                                 ++actions_n;
2115                         action_flags |= actions->type ==
2116                                         RTE_FLOW_ACTION_TYPE_SET_TTL ?
2117                                                 MLX5_FLOW_ACTION_SET_TTL :
2118                                                 MLX5_FLOW_ACTION_DEC_TTL;
2119                         break;
2120                 case RTE_FLOW_ACTION_TYPE_JUMP:
2121                         ret = flow_dv_validate_action_jump(actions,
2122                                                            attr->group, error);
2123                         if (ret)
2124                                 return ret;
2125                         ++actions_n;
2126                         action_flags |= MLX5_FLOW_ACTION_JUMP;
2127                         break;
2128                 default:
2129                         return rte_flow_error_set(error, ENOTSUP,
2130                                                   RTE_FLOW_ERROR_TYPE_ACTION,
2131                                                   actions,
2132                                                   "action not supported");
2133                 }
2134         }
2135         if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
2136                 return rte_flow_error_set(error, EINVAL,
2137                                           RTE_FLOW_ERROR_TYPE_ACTION, actions,
2138                                           "no fate action is found");
2139         return 0;
2140 }
2141
2142 /**
2143  * Internal preparation function. Allocates the DV flow size,
2144  * this size is constant.
2145  *
2146  * @param[in] attr
2147  *   Pointer to the flow attributes.
2148  * @param[in] items
2149  *   Pointer to the list of items.
2150  * @param[in] actions
2151  *   Pointer to the list of actions.
2152  * @param[out] error
2153  *   Pointer to the error structure.
2154  *
2155  * @return
2156  *   Pointer to mlx5_flow object on success,
2157  *   otherwise NULL and rte_errno is set.
2158  */
2159 static struct mlx5_flow *
2160 flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
2161                 const struct rte_flow_item items[] __rte_unused,
2162                 const struct rte_flow_action actions[] __rte_unused,
2163                 struct rte_flow_error *error)
2164 {
2165         uint32_t size = sizeof(struct mlx5_flow);
2166         struct mlx5_flow *flow;
2167
2168         flow = rte_calloc(__func__, 1, size, 0);
2169         if (!flow) {
2170                 rte_flow_error_set(error, ENOMEM,
2171                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2172                                    "not enough memory to create flow");
2173                 return NULL;
2174         }
2175         flow->dv.value.size = MLX5_ST_SZ_DB(fte_match_param);
2176         return flow;
2177 }
2178
2179 #ifndef NDEBUG
2180 /**
2181  * Sanity check for match mask and value. Similar to check_valid_spec() in
2182  * kernel driver. If unmasked bit is present in value, it returns failure.
2183  *
2184  * @param match_mask
2185  *   pointer to match mask buffer.
2186  * @param match_value
2187  *   pointer to match value buffer.
2188  *
2189  * @return
2190  *   0 if valid, -EINVAL otherwise.
2191  */
2192 static int
2193 flow_dv_check_valid_spec(void *match_mask, void *match_value)
2194 {
2195         uint8_t *m = match_mask;
2196         uint8_t *v = match_value;
2197         unsigned int i;
2198
2199         for (i = 0; i < MLX5_ST_SZ_DB(fte_match_param); ++i) {
2200                 if (v[i] & ~m[i]) {
2201                         DRV_LOG(ERR,
2202                                 "match_value differs from match_criteria"
2203                                 " %p[%u] != %p[%u]",
2204                                 match_value, i, match_mask, i);
2205                         return -EINVAL;
2206                 }
2207         }
2208         return 0;
2209 }
2210 #endif
2211
2212 /**
2213  * Add Ethernet item to matcher and to the value.
2214  *
2215  * @param[in, out] matcher
2216  *   Flow matcher.
2217  * @param[in, out] key
2218  *   Flow matcher value.
2219  * @param[in] item
2220  *   Flow pattern to translate.
2221  * @param[in] inner
2222  *   Item is inner pattern.
2223  */
2224 static void
2225 flow_dv_translate_item_eth(void *matcher, void *key,
2226                            const struct rte_flow_item *item, int inner)
2227 {
2228         const struct rte_flow_item_eth *eth_m = item->mask;
2229         const struct rte_flow_item_eth *eth_v = item->spec;
2230         const struct rte_flow_item_eth nic_mask = {
2231                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
2232                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
2233                 .type = RTE_BE16(0xffff),
2234         };
2235         void *headers_m;
2236         void *headers_v;
2237         char *l24_v;
2238         unsigned int i;
2239
2240         if (!eth_v)
2241                 return;
2242         if (!eth_m)
2243                 eth_m = &nic_mask;
2244         if (inner) {
2245                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2246                                          inner_headers);
2247                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2248         } else {
2249                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2250                                          outer_headers);
2251                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2252         }
2253         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
2254                &eth_m->dst, sizeof(eth_m->dst));
2255         /* The value must be in the range of the mask. */
2256         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
2257         for (i = 0; i < sizeof(eth_m->dst); ++i)
2258                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
2259         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
2260                &eth_m->src, sizeof(eth_m->src));
2261         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
2262         /* The value must be in the range of the mask. */
2263         for (i = 0; i < sizeof(eth_m->dst); ++i)
2264                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
2265         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
2266                  rte_be_to_cpu_16(eth_m->type));
2267         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
2268         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
2269 }
2270
2271 /**
2272  * Add VLAN item to matcher and to the value.
2273  *
2274  * @param[in, out] matcher
2275  *   Flow matcher.
2276  * @param[in, out] key
2277  *   Flow matcher value.
2278  * @param[in] item
2279  *   Flow pattern to translate.
2280  * @param[in] inner
2281  *   Item is inner pattern.
2282  */
2283 static void
2284 flow_dv_translate_item_vlan(void *matcher, void *key,
2285                             const struct rte_flow_item *item,
2286                             int inner)
2287 {
2288         const struct rte_flow_item_vlan *vlan_m = item->mask;
2289         const struct rte_flow_item_vlan *vlan_v = item->spec;
2290         const struct rte_flow_item_vlan nic_mask = {
2291                 .tci = RTE_BE16(0x0fff),
2292                 .inner_type = RTE_BE16(0xffff),
2293         };
2294         void *headers_m;
2295         void *headers_v;
2296         uint16_t tci_m;
2297         uint16_t tci_v;
2298
2299         if (!vlan_v)
2300                 return;
2301         if (!vlan_m)
2302                 vlan_m = &nic_mask;
2303         if (inner) {
2304                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2305                                          inner_headers);
2306                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2307         } else {
2308                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2309                                          outer_headers);
2310                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2311         }
2312         tci_m = rte_be_to_cpu_16(vlan_m->tci);
2313         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
2314         MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
2315         MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
2316         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
2317         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
2318         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
2319         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
2320         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
2321         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
2322 }
2323
2324 /**
2325  * Add IPV4 item to matcher and to the value.
2326  *
2327  * @param[in, out] matcher
2328  *   Flow matcher.
2329  * @param[in, out] key
2330  *   Flow matcher value.
2331  * @param[in] item
2332  *   Flow pattern to translate.
2333  * @param[in] inner
2334  *   Item is inner pattern.
2335  * @param[in] group
2336  *   The group to insert the rule.
2337  */
2338 static void
2339 flow_dv_translate_item_ipv4(void *matcher, void *key,
2340                             const struct rte_flow_item *item,
2341                             int inner, uint32_t group)
2342 {
2343         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
2344         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
2345         const struct rte_flow_item_ipv4 nic_mask = {
2346                 .hdr = {
2347                         .src_addr = RTE_BE32(0xffffffff),
2348                         .dst_addr = RTE_BE32(0xffffffff),
2349                         .type_of_service = 0xff,
2350                         .next_proto_id = 0xff,
2351                 },
2352         };
2353         void *headers_m;
2354         void *headers_v;
2355         char *l24_m;
2356         char *l24_v;
2357         uint8_t tos;
2358
2359         if (inner) {
2360                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2361                                          inner_headers);
2362                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2363         } else {
2364                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2365                                          outer_headers);
2366                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2367         }
2368         if (group == 0)
2369                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
2370         else
2371                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x4);
2372         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
2373         if (!ipv4_v)
2374                 return;
2375         if (!ipv4_m)
2376                 ipv4_m = &nic_mask;
2377         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2378                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
2379         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2380                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
2381         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
2382         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
2383         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2384                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
2385         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2386                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
2387         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
2388         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
2389         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
2390         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
2391                  ipv4_m->hdr.type_of_service);
2392         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
2393         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
2394                  ipv4_m->hdr.type_of_service >> 2);
2395         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
2396         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
2397                  ipv4_m->hdr.next_proto_id);
2398         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2399                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
2400 }
2401
2402 /**
2403  * Add IPV6 item to matcher and to the value.
2404  *
2405  * @param[in, out] matcher
2406  *   Flow matcher.
2407  * @param[in, out] key
2408  *   Flow matcher value.
2409  * @param[in] item
2410  *   Flow pattern to translate.
2411  * @param[in] inner
2412  *   Item is inner pattern.
2413  * @param[in] group
2414  *   The group to insert the rule.
2415  */
2416 static void
2417 flow_dv_translate_item_ipv6(void *matcher, void *key,
2418                             const struct rte_flow_item *item,
2419                             int inner, uint32_t group)
2420 {
2421         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
2422         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
2423         const struct rte_flow_item_ipv6 nic_mask = {
2424                 .hdr = {
2425                         .src_addr =
2426                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
2427                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
2428                         .dst_addr =
2429                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
2430                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
2431                         .vtc_flow = RTE_BE32(0xffffffff),
2432                         .proto = 0xff,
2433                         .hop_limits = 0xff,
2434                 },
2435         };
2436         void *headers_m;
2437         void *headers_v;
2438         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2439         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2440         char *l24_m;
2441         char *l24_v;
2442         uint32_t vtc_m;
2443         uint32_t vtc_v;
2444         int i;
2445         int size;
2446
2447         if (inner) {
2448                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2449                                          inner_headers);
2450                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2451         } else {
2452                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2453                                          outer_headers);
2454                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2455         }
2456         if (group == 0)
2457                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
2458         else
2459                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0x6);
2460         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
2461         if (!ipv6_v)
2462                 return;
2463         if (!ipv6_m)
2464                 ipv6_m = &nic_mask;
2465         size = sizeof(ipv6_m->hdr.dst_addr);
2466         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2467                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
2468         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2469                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
2470         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
2471         for (i = 0; i < size; ++i)
2472                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
2473         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
2474                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
2475         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
2476                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
2477         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
2478         for (i = 0; i < size; ++i)
2479                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
2480         /* TOS. */
2481         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
2482         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
2483         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
2484         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
2485         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
2486         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
2487         /* Label. */
2488         if (inner) {
2489                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
2490                          vtc_m);
2491                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
2492                          vtc_v);
2493         } else {
2494                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
2495                          vtc_m);
2496                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
2497                          vtc_v);
2498         }
2499         /* Protocol. */
2500         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
2501                  ipv6_m->hdr.proto);
2502         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2503                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
2504 }
2505
2506 /**
2507  * Add TCP item to matcher and to the value.
2508  *
2509  * @param[in, out] matcher
2510  *   Flow matcher.
2511  * @param[in, out] key
2512  *   Flow matcher value.
2513  * @param[in] item
2514  *   Flow pattern to translate.
2515  * @param[in] inner
2516  *   Item is inner pattern.
2517  */
2518 static void
2519 flow_dv_translate_item_tcp(void *matcher, void *key,
2520                            const struct rte_flow_item *item,
2521                            int inner)
2522 {
2523         const struct rte_flow_item_tcp *tcp_m = item->mask;
2524         const struct rte_flow_item_tcp *tcp_v = item->spec;
2525         void *headers_m;
2526         void *headers_v;
2527
2528         if (inner) {
2529                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2530                                          inner_headers);
2531                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2532         } else {
2533                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2534                                          outer_headers);
2535                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2536         }
2537         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2538         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
2539         if (!tcp_v)
2540                 return;
2541         if (!tcp_m)
2542                 tcp_m = &rte_flow_item_tcp_mask;
2543         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
2544                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
2545         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
2546                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
2547         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
2548                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
2549         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
2550                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
2551 }
2552
2553 /**
2554  * Add UDP item to matcher and to the value.
2555  *
2556  * @param[in, out] matcher
2557  *   Flow matcher.
2558  * @param[in, out] key
2559  *   Flow matcher value.
2560  * @param[in] item
2561  *   Flow pattern to translate.
2562  * @param[in] inner
2563  *   Item is inner pattern.
2564  */
2565 static void
2566 flow_dv_translate_item_udp(void *matcher, void *key,
2567                            const struct rte_flow_item *item,
2568                            int inner)
2569 {
2570         const struct rte_flow_item_udp *udp_m = item->mask;
2571         const struct rte_flow_item_udp *udp_v = item->spec;
2572         void *headers_m;
2573         void *headers_v;
2574
2575         if (inner) {
2576                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2577                                          inner_headers);
2578                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2579         } else {
2580                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2581                                          outer_headers);
2582                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2583         }
2584         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2585         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
2586         if (!udp_v)
2587                 return;
2588         if (!udp_m)
2589                 udp_m = &rte_flow_item_udp_mask;
2590         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
2591                  rte_be_to_cpu_16(udp_m->hdr.src_port));
2592         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
2593                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
2594         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
2595                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
2596         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
2597                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
2598 }
2599
2600 /**
2601  * Add GRE item to matcher and to the value.
2602  *
2603  * @param[in, out] matcher
2604  *   Flow matcher.
2605  * @param[in, out] key
2606  *   Flow matcher value.
2607  * @param[in] item
2608  *   Flow pattern to translate.
2609  * @param[in] inner
2610  *   Item is inner pattern.
2611  */
2612 static void
2613 flow_dv_translate_item_gre(void *matcher, void *key,
2614                            const struct rte_flow_item *item,
2615                            int inner)
2616 {
2617         const struct rte_flow_item_gre *gre_m = item->mask;
2618         const struct rte_flow_item_gre *gre_v = item->spec;
2619         void *headers_m;
2620         void *headers_v;
2621         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2622         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2623
2624         if (inner) {
2625                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2626                                          inner_headers);
2627                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2628         } else {
2629                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2630                                          outer_headers);
2631                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2632         }
2633         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2634         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
2635         if (!gre_v)
2636                 return;
2637         if (!gre_m)
2638                 gre_m = &rte_flow_item_gre_mask;
2639         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
2640                  rte_be_to_cpu_16(gre_m->protocol));
2641         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
2642                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
2643 }
2644
2645 /**
2646  * Add NVGRE item to matcher and to the value.
2647  *
2648  * @param[in, out] matcher
2649  *   Flow matcher.
2650  * @param[in, out] key
2651  *   Flow matcher value.
2652  * @param[in] item
2653  *   Flow pattern to translate.
2654  * @param[in] inner
2655  *   Item is inner pattern.
2656  */
2657 static void
2658 flow_dv_translate_item_nvgre(void *matcher, void *key,
2659                              const struct rte_flow_item *item,
2660                              int inner)
2661 {
2662         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
2663         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
2664         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2665         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2666         const char *tni_flow_id_m = (const char *)nvgre_m->tni;
2667         const char *tni_flow_id_v = (const char *)nvgre_v->tni;
2668         char *gre_key_m;
2669         char *gre_key_v;
2670         int size;
2671         int i;
2672
2673         flow_dv_translate_item_gre(matcher, key, item, inner);
2674         if (!nvgre_v)
2675                 return;
2676         if (!nvgre_m)
2677                 nvgre_m = &rte_flow_item_nvgre_mask;
2678         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
2679         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
2680         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
2681         memcpy(gre_key_m, tni_flow_id_m, size);
2682         for (i = 0; i < size; ++i)
2683                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
2684 }
2685
2686 /**
2687  * Add VXLAN item to matcher and to the value.
2688  *
2689  * @param[in, out] matcher
2690  *   Flow matcher.
2691  * @param[in, out] key
2692  *   Flow matcher value.
2693  * @param[in] item
2694  *   Flow pattern to translate.
2695  * @param[in] inner
2696  *   Item is inner pattern.
2697  */
2698 static void
2699 flow_dv_translate_item_vxlan(void *matcher, void *key,
2700                              const struct rte_flow_item *item,
2701                              int inner)
2702 {
2703         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
2704         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
2705         void *headers_m;
2706         void *headers_v;
2707         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2708         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2709         char *vni_m;
2710         char *vni_v;
2711         uint16_t dport;
2712         int size;
2713         int i;
2714
2715         if (inner) {
2716                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2717                                          inner_headers);
2718                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
2719         } else {
2720                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
2721                                          outer_headers);
2722                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2723         }
2724         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
2725                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
2726         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
2727                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
2728                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
2729         }
2730         if (!vxlan_v)
2731                 return;
2732         if (!vxlan_m)
2733                 vxlan_m = &rte_flow_item_vxlan_mask;
2734         size = sizeof(vxlan_m->vni);
2735         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
2736         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
2737         memcpy(vni_m, vxlan_m->vni, size);
2738         for (i = 0; i < size; ++i)
2739                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
2740 }
2741
2742 /**
2743  * Add MPLS item to matcher and to the value.
2744  *
2745  * @param[in, out] matcher
2746  *   Flow matcher.
2747  * @param[in, out] key
2748  *   Flow matcher value.
2749  * @param[in] item
2750  *   Flow pattern to translate.
2751  * @param[in] prev_layer
2752  *   The protocol layer indicated in previous item.
2753  * @param[in] inner
2754  *   Item is inner pattern.
2755  */
2756 static void
2757 flow_dv_translate_item_mpls(void *matcher, void *key,
2758                             const struct rte_flow_item *item,
2759                             uint64_t prev_layer,
2760                             int inner)
2761 {
2762         const uint32_t *in_mpls_m = item->mask;
2763         const uint32_t *in_mpls_v = item->spec;
2764         uint32_t *out_mpls_m = 0;
2765         uint32_t *out_mpls_v = 0;
2766         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
2767         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
2768         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
2769                                      misc_parameters_2);
2770         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
2771         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
2772         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
2773
2774         switch (prev_layer) {
2775         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
2776                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
2777                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
2778                          MLX5_UDP_PORT_MPLS);
2779                 break;
2780         case MLX5_FLOW_LAYER_GRE:
2781                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
2782                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
2783                          ETHER_TYPE_MPLS);
2784                 break;
2785         default:
2786                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
2787                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
2788                          IPPROTO_MPLS);
2789                 break;
2790         }
2791         if (!in_mpls_v)
2792                 return;
2793         if (!in_mpls_m)
2794                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
2795         switch (prev_layer) {
2796         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
2797                 out_mpls_m =
2798                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
2799                                                  outer_first_mpls_over_udp);
2800                 out_mpls_v =
2801                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
2802                                                  outer_first_mpls_over_udp);
2803                 break;
2804         case MLX5_FLOW_LAYER_GRE:
2805                 out_mpls_m =
2806                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
2807                                                  outer_first_mpls_over_gre);
2808                 out_mpls_v =
2809                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
2810                                                  outer_first_mpls_over_gre);
2811                 break;
2812         default:
2813                 /* Inner MPLS not over GRE is not supported. */
2814                 if (!inner) {
2815                         out_mpls_m =
2816                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
2817                                                          misc2_m,
2818                                                          outer_first_mpls);
2819                         out_mpls_v =
2820                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
2821                                                          misc2_v,
2822                                                          outer_first_mpls);
2823                 }
2824                 break;
2825         }
2826         if (out_mpls_m && out_mpls_v) {
2827                 *out_mpls_m = *in_mpls_m;
2828                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
2829         }
2830 }
2831
2832 /**
2833  * Add META item to matcher
2834  *
2835  * @param[in, out] matcher
2836  *   Flow matcher.
2837  * @param[in, out] key
2838  *   Flow matcher value.
2839  * @param[in] item
2840  *   Flow pattern to translate.
2841  * @param[in] inner
2842  *   Item is inner pattern.
2843  */
2844 static void
2845 flow_dv_translate_item_meta(void *matcher, void *key,
2846                             const struct rte_flow_item *item)
2847 {
2848         const struct rte_flow_item_meta *meta_m;
2849         const struct rte_flow_item_meta *meta_v;
2850         void *misc2_m =
2851                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
2852         void *misc2_v =
2853                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
2854
2855         meta_m = (const void *)item->mask;
2856         if (!meta_m)
2857                 meta_m = &rte_flow_item_meta_mask;
2858         meta_v = (const void *)item->spec;
2859         if (meta_v) {
2860                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a,
2861                          rte_be_to_cpu_32(meta_m->data));
2862                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a,
2863                          rte_be_to_cpu_32(meta_v->data & meta_m->data));
2864         }
2865 }
2866
2867 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
2868
2869 #define HEADER_IS_ZERO(match_criteria, headers)                              \
2870         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
2871                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
2872
2873 /**
2874  * Calculate flow matcher enable bitmap.
2875  *
2876  * @param match_criteria
2877  *   Pointer to flow matcher criteria.
2878  *
2879  * @return
2880  *   Bitmap of enabled fields.
2881  */
2882 static uint8_t
2883 flow_dv_matcher_enable(uint32_t *match_criteria)
2884 {
2885         uint8_t match_criteria_enable;
2886
2887         match_criteria_enable =
2888                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
2889                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
2890         match_criteria_enable |=
2891                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
2892                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
2893         match_criteria_enable |=
2894                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
2895                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
2896         match_criteria_enable |=
2897                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
2898                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
2899 #ifdef HAVE_MLX5DV_DR
2900         match_criteria_enable |=
2901                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_3)) <<
2902                 MLX5_MATCH_CRITERIA_ENABLE_MISC3_BIT;
2903 #endif
2904         return match_criteria_enable;
2905 }
2906
2907
2908 /**
2909  * Get a flow table.
2910  *
2911  * @param dev[in, out]
2912  *   Pointer to rte_eth_dev structure.
2913  * @param[in] table_id
2914  *   Table id to use.
2915  * @param[in] egress
2916  *   Direction of the table.
2917  * @param[out] error
2918  *   pointer to error structure.
2919  *
2920  * @return
2921  *   Returns tables resource based on the index, NULL in case of failed.
2922  */
2923 static struct mlx5_flow_tbl_resource *
2924 flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
2925                          uint32_t table_id, uint8_t egress,
2926                          struct rte_flow_error *error)
2927 {
2928         struct mlx5_priv *priv = dev->data->dev_private;
2929         struct mlx5_ibv_shared *sh = priv->sh;
2930         struct mlx5_flow_tbl_resource *tbl;
2931
2932 #ifdef HAVE_MLX5DV_DR
2933         if (egress) {
2934                 tbl = &sh->tx_tbl[table_id];
2935                 if (!tbl->obj)
2936                         tbl->obj = mlx5_glue->dr_create_flow_tbl
2937                                 (sh->tx_ns, table_id);
2938         } else {
2939                 tbl = &sh->rx_tbl[table_id];
2940                 if (!tbl->obj)
2941                         tbl->obj = mlx5_glue->dr_create_flow_tbl
2942                                 (sh->rx_ns, table_id);
2943         }
2944         if (!tbl->obj) {
2945                 rte_flow_error_set(error, ENOMEM,
2946                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2947                                    NULL, "cannot create table");
2948                 return NULL;
2949         }
2950         rte_atomic32_inc(&tbl->refcnt);
2951         return tbl;
2952 #else
2953         (void)error;
2954         (void)tbl;
2955         if (egress)
2956                 return &sh->tx_tbl[table_id];
2957         else
2958                 return &sh->rx_tbl[table_id];
2959 #endif
2960 }
2961
2962 /**
2963  * Release a flow table.
2964  *
2965  * @param[in] tbl
2966  *   Table resource to be released.
2967  *
2968  * @return
2969  *   Returns 0 if table was released, else return 1;
2970  */
2971 static int
2972 flow_dv_tbl_resource_release(struct mlx5_flow_tbl_resource *tbl)
2973 {
2974         if (!tbl)
2975                 return 0;
2976         if (rte_atomic32_dec_and_test(&tbl->refcnt)) {
2977                 mlx5_glue->dr_destroy_flow_tbl(tbl->obj);
2978                 tbl->obj = NULL;
2979                 return 0;
2980         }
2981         return 1;
2982 }
2983
2984 /**
2985  * Register the flow matcher.
2986  *
2987  * @param dev[in, out]
2988  *   Pointer to rte_eth_dev structure.
2989  * @param[in, out] matcher
2990  *   Pointer to flow matcher.
2991  * @parm[in, out] dev_flow
2992  *   Pointer to the dev_flow.
2993  * @param[out] error
2994  *   pointer to error structure.
2995  *
2996  * @return
2997  *   0 on success otherwise -errno and errno is set.
2998  */
2999 static int
3000 flow_dv_matcher_register(struct rte_eth_dev *dev,
3001                          struct mlx5_flow_dv_matcher *matcher,
3002                          struct mlx5_flow *dev_flow,
3003                          struct rte_flow_error *error)
3004 {
3005         struct mlx5_priv *priv = dev->data->dev_private;
3006         struct mlx5_ibv_shared *sh = priv->sh;
3007         struct mlx5_flow_dv_matcher *cache_matcher;
3008         struct mlx5dv_flow_matcher_attr dv_attr = {
3009                 .type = IBV_FLOW_ATTR_NORMAL,
3010                 .match_mask = (void *)&matcher->mask,
3011         };
3012         struct mlx5_flow_tbl_resource *tbl = NULL;
3013
3014         /* Lookup from cache. */
3015         LIST_FOREACH(cache_matcher, &sh->matchers, next) {
3016                 if (matcher->crc == cache_matcher->crc &&
3017                     matcher->priority == cache_matcher->priority &&
3018                     matcher->egress == cache_matcher->egress &&
3019                     matcher->group == cache_matcher->group &&
3020                     !memcmp((const void *)matcher->mask.buf,
3021                             (const void *)cache_matcher->mask.buf,
3022                             cache_matcher->mask.size)) {
3023                         DRV_LOG(DEBUG,
3024                                 "priority %hd use %s matcher %p: refcnt %d++",
3025                                 cache_matcher->priority,
3026                                 cache_matcher->egress ? "tx" : "rx",
3027                                 (void *)cache_matcher,
3028                                 rte_atomic32_read(&cache_matcher->refcnt));
3029                         rte_atomic32_inc(&cache_matcher->refcnt);
3030                         dev_flow->dv.matcher = cache_matcher;
3031                         return 0;
3032                 }
3033         }
3034         /* Register new matcher. */
3035         cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
3036         if (!cache_matcher)
3037                 return rte_flow_error_set(error, ENOMEM,
3038                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3039                                           "cannot allocate matcher memory");
3040         tbl = flow_dv_tbl_resource_get(dev, matcher->group * MLX5_GROUP_FACTOR,
3041                                        matcher->egress, error);
3042         if (!tbl) {
3043                 rte_free(cache_matcher);
3044                 return rte_flow_error_set(error, ENOMEM,
3045                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3046                                           NULL, "cannot create table");
3047         }
3048         *cache_matcher = *matcher;
3049         dv_attr.match_criteria_enable =
3050                 flow_dv_matcher_enable(cache_matcher->mask.buf);
3051         dv_attr.priority = matcher->priority;
3052         if (matcher->egress)
3053                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
3054         cache_matcher->matcher_object =
3055                 mlx5_glue->dv_create_flow_matcher(sh->ctx, &dv_attr, tbl->obj);
3056         if (!cache_matcher->matcher_object) {
3057                 rte_free(cache_matcher);
3058 #ifdef HAVE_MLX5DV_DR
3059                 flow_dv_tbl_resource_release(tbl);
3060 #endif
3061                 return rte_flow_error_set(error, ENOMEM,
3062                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3063                                           NULL, "cannot create matcher");
3064         }
3065         rte_atomic32_inc(&cache_matcher->refcnt);
3066         LIST_INSERT_HEAD(&sh->matchers, cache_matcher, next);
3067         dev_flow->dv.matcher = cache_matcher;
3068         DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
3069                 cache_matcher->priority,
3070                 cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
3071                 rte_atomic32_read(&cache_matcher->refcnt));
3072         rte_atomic32_inc(&tbl->refcnt);
3073         return 0;
3074 }
3075
3076 /**
3077  * Add source vport match to the specified matcher.
3078  *
3079  * @param[in, out] matcher
3080  *   Flow matcher.
3081  * @param[in, out] key
3082  *   Flow matcher value.
3083  * @param[in] port
3084  *   Source vport value to match
3085  * @param[in] mask
3086  *   Mask
3087  */
3088 static void
3089 flow_dv_translate_source_vport(void *matcher, void *key,
3090                               int16_t port, uint16_t mask)
3091 {
3092         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
3093         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
3094
3095         MLX5_SET(fte_match_set_misc, misc_m, source_port, mask);
3096         MLX5_SET(fte_match_set_misc, misc_v, source_port, port);
3097 }
3098
3099 /**
3100  * Find existing tag resource or create and register a new one.
3101  *
3102  * @param dev[in, out]
3103  *   Pointer to rte_eth_dev structure.
3104  * @param[in, out] resource
3105  *   Pointer to tag resource.
3106  * @parm[in, out] dev_flow
3107  *   Pointer to the dev_flow.
3108  * @param[out] error
3109  *   pointer to error structure.
3110  *
3111  * @return
3112  *   0 on success otherwise -errno and errno is set.
3113  */
3114 static int
3115 flow_dv_tag_resource_register
3116                         (struct rte_eth_dev *dev,
3117                          struct mlx5_flow_dv_tag_resource *resource,
3118                          struct mlx5_flow *dev_flow,
3119                          struct rte_flow_error *error)
3120 {
3121         struct mlx5_priv *priv = dev->data->dev_private;
3122         struct mlx5_ibv_shared *sh = priv->sh;
3123         struct mlx5_flow_dv_tag_resource *cache_resource;
3124
3125         /* Lookup a matching resource from cache. */
3126         LIST_FOREACH(cache_resource, &sh->tags, next) {
3127                 if (resource->tag == cache_resource->tag) {
3128                         DRV_LOG(DEBUG, "tag resource %p: refcnt %d++",
3129                                 (void *)cache_resource,
3130                                 rte_atomic32_read(&cache_resource->refcnt));
3131                         rte_atomic32_inc(&cache_resource->refcnt);
3132                         dev_flow->flow->tag_resource = cache_resource;
3133                         return 0;
3134                 }
3135         }
3136         /* Register new  resource. */
3137         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
3138         if (!cache_resource)
3139                 return rte_flow_error_set(error, ENOMEM,
3140                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3141                                           "cannot allocate resource memory");
3142         *cache_resource = *resource;
3143         cache_resource->action = mlx5_glue->dv_create_flow_action_tag
3144                 (resource->tag);
3145         if (!cache_resource->action) {
3146                 rte_free(cache_resource);
3147                 return rte_flow_error_set(error, ENOMEM,
3148                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3149                                           NULL, "cannot create action");
3150         }
3151         rte_atomic32_init(&cache_resource->refcnt);
3152         rte_atomic32_inc(&cache_resource->refcnt);
3153         LIST_INSERT_HEAD(&sh->tags, cache_resource, next);
3154         dev_flow->flow->tag_resource = cache_resource;
3155         DRV_LOG(DEBUG, "new tag resource %p: refcnt %d++",
3156                 (void *)cache_resource,
3157                 rte_atomic32_read(&cache_resource->refcnt));
3158         return 0;
3159 }
3160
3161 /**
3162  * Release the tag.
3163  *
3164  * @param dev
3165  *   Pointer to Ethernet device.
3166  * @param flow
3167  *   Pointer to mlx5_flow.
3168  *
3169  * @return
3170  *   1 while a reference on it exists, 0 when freed.
3171  */
3172 static int
3173 flow_dv_tag_release(struct rte_eth_dev *dev,
3174                     struct mlx5_flow_dv_tag_resource *tag)
3175 {
3176         assert(tag);
3177         DRV_LOG(DEBUG, "port %u tag %p: refcnt %d--",
3178                 dev->data->port_id, (void *)tag,
3179                 rte_atomic32_read(&tag->refcnt));
3180         if (rte_atomic32_dec_and_test(&tag->refcnt)) {
3181                 claim_zero(mlx5_glue->destroy_flow_action(tag->action));
3182                 LIST_REMOVE(tag, next);
3183                 DRV_LOG(DEBUG, "port %u tag %p: removed",
3184                         dev->data->port_id, (void *)tag);
3185                 rte_free(tag);
3186                 return 0;
3187         }
3188         return 1;
3189 }
3190
3191 /**
3192  * Fill the flow with DV spec.
3193  *
3194  * @param[in] dev
3195  *   Pointer to rte_eth_dev structure.
3196  * @param[in, out] dev_flow
3197  *   Pointer to the sub flow.
3198  * @param[in] attr
3199  *   Pointer to the flow attributes.
3200  * @param[in] items
3201  *   Pointer to the list of items.
3202  * @param[in] actions
3203  *   Pointer to the list of actions.
3204  * @param[out] error
3205  *   Pointer to the error structure.
3206  *
3207  * @return
3208  *   0 on success, a negative errno value otherwise and rte_errno is set.
3209  */
3210 static int
3211 flow_dv_translate(struct rte_eth_dev *dev,
3212                   struct mlx5_flow *dev_flow,
3213                   const struct rte_flow_attr *attr,
3214                   const struct rte_flow_item items[],
3215                   const struct rte_flow_action actions[],
3216                   struct rte_flow_error *error)
3217 {
3218         struct mlx5_priv *priv = dev->data->dev_private;
3219         struct rte_flow *flow = dev_flow->flow;
3220         uint64_t item_flags = 0;
3221         uint64_t last_item = 0;
3222         uint64_t action_flags = 0;
3223         uint64_t priority = attr->priority;
3224         struct mlx5_flow_dv_matcher matcher = {
3225                 .mask = {
3226                         .size = sizeof(matcher.mask.buf),
3227                 },
3228         };
3229         int actions_n = 0;
3230         bool actions_end = false;
3231         struct mlx5_flow_dv_modify_hdr_resource res = {
3232                 .ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
3233                                           MLX5DV_FLOW_TABLE_TYPE_NIC_RX
3234         };
3235         union flow_dv_attr flow_attr = { .attr = 0 };
3236         struct mlx5_flow_dv_tag_resource tag_resource;
3237         uint32_t modify_action_position = UINT32_MAX;
3238
3239         if (priority == MLX5_FLOW_PRIO_RSVD)
3240                 priority = priv->config.flow_prio - 1;
3241         for (; !actions_end ; actions++) {
3242                 const struct rte_flow_action_queue *queue;
3243                 const struct rte_flow_action_rss *rss;
3244                 const struct rte_flow_action *action = actions;
3245                 const struct rte_flow_action_count *count = action->conf;
3246                 const uint8_t *rss_key;
3247                 const struct rte_flow_action_jump *jump_data;
3248                 struct mlx5_flow_dv_jump_tbl_resource jump_tbl_resource;
3249                 struct mlx5_flow_tbl_resource *tbl;
3250
3251                 switch (actions->type) {
3252                 case RTE_FLOW_ACTION_TYPE_VOID:
3253                         break;
3254                 case RTE_FLOW_ACTION_TYPE_FLAG:
3255                         tag_resource.tag =
3256                                 mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
3257                         if (!flow->tag_resource)
3258                                 if (flow_dv_tag_resource_register
3259                                     (dev, &tag_resource, dev_flow, error))
3260                                         return errno;
3261                         dev_flow->dv.actions[actions_n++] =
3262                                 flow->tag_resource->action;
3263                         action_flags |= MLX5_FLOW_ACTION_FLAG;
3264                         break;
3265                 case RTE_FLOW_ACTION_TYPE_MARK:
3266                         tag_resource.tag = mlx5_flow_mark_set
3267                               (((const struct rte_flow_action_mark *)
3268                                (actions->conf))->id);
3269                         if (!flow->tag_resource)
3270                                 if (flow_dv_tag_resource_register
3271                                     (dev, &tag_resource, dev_flow, error))
3272                                         return errno;
3273                         dev_flow->dv.actions[actions_n++] =
3274                                 flow->tag_resource->action;
3275                         action_flags |= MLX5_FLOW_ACTION_MARK;
3276                         break;
3277                 case RTE_FLOW_ACTION_TYPE_DROP:
3278                         action_flags |= MLX5_FLOW_ACTION_DROP;
3279                         break;
3280                 case RTE_FLOW_ACTION_TYPE_QUEUE:
3281                         queue = actions->conf;
3282                         flow->rss.queue_num = 1;
3283                         (*flow->queue)[0] = queue->index;
3284                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
3285                         break;
3286                 case RTE_FLOW_ACTION_TYPE_RSS:
3287                         rss = actions->conf;
3288                         if (flow->queue)
3289                                 memcpy((*flow->queue), rss->queue,
3290                                        rss->queue_num * sizeof(uint16_t));
3291                         flow->rss.queue_num = rss->queue_num;
3292                         /* NULL RSS key indicates default RSS key. */
3293                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
3294                         memcpy(flow->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
3295                         /* RSS type 0 indicates default RSS type ETH_RSS_IP. */
3296                         flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
3297                         flow->rss.level = rss->level;
3298                         action_flags |= MLX5_FLOW_ACTION_RSS;
3299                         break;
3300                 case RTE_FLOW_ACTION_TYPE_COUNT:
3301                         if (!priv->config.devx) {
3302                                 rte_errno = ENOTSUP;
3303                                 goto cnt_err;
3304                         }
3305                         flow->counter = flow_dv_counter_new(dev, count->shared,
3306                                                             count->id);
3307                         if (flow->counter == NULL)
3308                                 goto cnt_err;
3309                         dev_flow->dv.actions[actions_n++] =
3310                                 flow->counter->action;
3311                         action_flags |= MLX5_FLOW_ACTION_COUNT;
3312                         break;
3313 cnt_err:
3314                         if (rte_errno == ENOTSUP)
3315                                 return rte_flow_error_set
3316                                               (error, ENOTSUP,
3317                                                RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3318                                                NULL,
3319                                                "count action not supported");
3320                         else
3321                                 return rte_flow_error_set
3322                                                 (error, rte_errno,
3323                                                  RTE_FLOW_ERROR_TYPE_ACTION,
3324                                                  action,
3325                                                  "cannot create counter"
3326                                                   " object.");
3327                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
3328                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
3329                         if (flow_dv_create_action_l2_encap(dev, actions,
3330                                                            dev_flow, error))
3331                                 return -rte_errno;
3332                         dev_flow->dv.actions[actions_n++] =
3333                                 dev_flow->dv.encap_decap->verbs_action;
3334                         action_flags |= actions->type ==
3335                                         RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
3336                                         MLX5_FLOW_ACTION_VXLAN_ENCAP :
3337                                         MLX5_FLOW_ACTION_NVGRE_ENCAP;
3338                         break;
3339                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
3340                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
3341                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
3342                                                            error))
3343                                 return -rte_errno;
3344                         dev_flow->dv.actions[actions_n++] =
3345                                 dev_flow->dv.encap_decap->verbs_action;
3346                         action_flags |= actions->type ==
3347                                         RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
3348                                         MLX5_FLOW_ACTION_VXLAN_DECAP :
3349                                         MLX5_FLOW_ACTION_NVGRE_DECAP;
3350                         break;
3351                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
3352                         /* Handle encap with preceding decap. */
3353                         if (action_flags & MLX5_FLOW_ACTION_RAW_DECAP) {
3354                                 if (flow_dv_create_action_raw_encap
3355                                         (dev, actions, dev_flow, attr, error))
3356                                         return -rte_errno;
3357                                 dev_flow->dv.actions[actions_n++] =
3358                                         dev_flow->dv.encap_decap->verbs_action;
3359                         } else {
3360                                 /* Handle encap without preceding decap. */
3361                                 if (flow_dv_create_action_l2_encap(dev, actions,
3362                                                                    dev_flow,
3363                                                                    error))
3364                                         return -rte_errno;
3365                                 dev_flow->dv.actions[actions_n++] =
3366                                         dev_flow->dv.encap_decap->verbs_action;
3367                         }
3368                         action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
3369                         break;
3370                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
3371                         /* Check if this decap is followed by encap. */
3372                         for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
3373                                action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
3374                                action++) {
3375                         }
3376                         /* Handle decap only if it isn't followed by encap. */
3377                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
3378                                 if (flow_dv_create_action_l2_decap(dev,
3379                                                                    dev_flow,
3380                                                                    error))
3381                                         return -rte_errno;
3382                                 dev_flow->dv.actions[actions_n++] =
3383                                         dev_flow->dv.encap_decap->verbs_action;
3384                         }
3385                         /* If decap is followed by encap, handle it at encap. */
3386                         action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
3387                         break;
3388                 case RTE_FLOW_ACTION_TYPE_JUMP:
3389                         jump_data = action->conf;
3390                         tbl = flow_dv_tbl_resource_get(dev, jump_data->group *
3391                                                        MLX5_GROUP_FACTOR,
3392                                                        attr->egress, error);
3393                         if (!tbl)
3394                                 return rte_flow_error_set
3395                                                 (error, errno,
3396                                                  RTE_FLOW_ERROR_TYPE_ACTION,
3397                                                  NULL,
3398                                                  "cannot create jump action.");
3399                         jump_tbl_resource.tbl = tbl;
3400                         if (flow_dv_jump_tbl_resource_register
3401                             (dev, &jump_tbl_resource, dev_flow, error)) {
3402                                 flow_dv_tbl_resource_release(tbl);
3403                                 return rte_flow_error_set
3404                                                 (error, errno,
3405                                                  RTE_FLOW_ERROR_TYPE_ACTION,
3406                                                  NULL,
3407                                                  "cannot create jump action.");
3408                         }
3409                         dev_flow->dv.actions[actions_n++] =
3410                                 dev_flow->dv.jump->action;
3411                         action_flags |= MLX5_FLOW_ACTION_JUMP;
3412                         break;
3413                 case RTE_FLOW_ACTION_TYPE_SET_MAC_SRC:
3414                 case RTE_FLOW_ACTION_TYPE_SET_MAC_DST:
3415                         if (flow_dv_convert_action_modify_mac(&res, actions,
3416                                                               error))
3417                                 return -rte_errno;
3418                         action_flags |= actions->type ==
3419                                         RTE_FLOW_ACTION_TYPE_SET_MAC_SRC ?
3420                                         MLX5_FLOW_ACTION_SET_MAC_SRC :
3421                                         MLX5_FLOW_ACTION_SET_MAC_DST;
3422                         break;
3423                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC:
3424                 case RTE_FLOW_ACTION_TYPE_SET_IPV4_DST:
3425                         if (flow_dv_convert_action_modify_ipv4(&res, actions,
3426                                                                error))
3427                                 return -rte_errno;
3428                         action_flags |= actions->type ==
3429                                         RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC ?
3430                                         MLX5_FLOW_ACTION_SET_IPV4_SRC :
3431                                         MLX5_FLOW_ACTION_SET_IPV4_DST;
3432                         break;
3433                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC:
3434                 case RTE_FLOW_ACTION_TYPE_SET_IPV6_DST:
3435                         if (flow_dv_convert_action_modify_ipv6(&res, actions,
3436                                                                error))
3437                                 return -rte_errno;
3438                         action_flags |= actions->type ==
3439                                         RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC ?
3440                                         MLX5_FLOW_ACTION_SET_IPV6_SRC :
3441                                         MLX5_FLOW_ACTION_SET_IPV6_DST;
3442                         break;
3443                 case RTE_FLOW_ACTION_TYPE_SET_TP_SRC:
3444                 case RTE_FLOW_ACTION_TYPE_SET_TP_DST:
3445                         if (flow_dv_convert_action_modify_tp(&res, actions,
3446                                                              items, &flow_attr,
3447                                                              error))
3448                                 return -rte_errno;
3449                         action_flags |= actions->type ==
3450                                         RTE_FLOW_ACTION_TYPE_SET_TP_SRC ?
3451                                         MLX5_FLOW_ACTION_SET_TP_SRC :
3452                                         MLX5_FLOW_ACTION_SET_TP_DST;
3453                         break;
3454                 case RTE_FLOW_ACTION_TYPE_DEC_TTL:
3455                         if (flow_dv_convert_action_modify_dec_ttl(&res, items,
3456                                                                   &flow_attr,
3457                                                                   error))
3458                                 return -rte_errno;
3459                         action_flags |= MLX5_FLOW_ACTION_DEC_TTL;
3460                         break;
3461                 case RTE_FLOW_ACTION_TYPE_SET_TTL:
3462                         if (flow_dv_convert_action_modify_ttl(&res, actions,
3463                                                              items, &flow_attr,
3464                                                              error))
3465                                 return -rte_errno;
3466                         action_flags |= MLX5_FLOW_ACTION_SET_TTL;
3467                         break;
3468                 case RTE_FLOW_ACTION_TYPE_END:
3469                         actions_end = true;
3470                         if (action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) {
3471                                 /* create modify action if needed. */
3472                                 if (flow_dv_modify_hdr_resource_register
3473                                                                 (dev, &res,
3474                                                                  dev_flow,
3475                                                                  error))
3476                                         return -rte_errno;
3477                                 dev_flow->dv.actions[modify_action_position] =
3478                                         dev_flow->dv.modify_hdr->verbs_action;
3479                         }
3480                         break;
3481                 default:
3482                         break;
3483                 }
3484                 if ((action_flags & MLX5_FLOW_MODIFY_HDR_ACTIONS) &&
3485                     modify_action_position == UINT32_MAX)
3486                         modify_action_position = actions_n++;
3487         }
3488         dev_flow->dv.actions_n = actions_n;
3489         flow->actions = action_flags;
3490         if (attr->ingress && !attr->transfer &&
3491             (priv->representor || priv->master)) {
3492                 /* It was validated - we support unidirection flows only. */
3493                 assert(!attr->egress);
3494                 /*
3495                  * Add matching on source vport index only
3496                  * for ingress rules in E-Switch configurations.
3497                  */
3498                 flow_dv_translate_source_vport(matcher.mask.buf,
3499                                                dev_flow->dv.value.buf,
3500                                                priv->vport_id,
3501                                                0xffff);
3502         }
3503         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
3504                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
3505                 void *match_mask = matcher.mask.buf;
3506                 void *match_value = dev_flow->dv.value.buf;
3507
3508                 switch (items->type) {
3509                 case RTE_FLOW_ITEM_TYPE_ETH:
3510                         flow_dv_translate_item_eth(match_mask, match_value,
3511                                                    items, tunnel);
3512                         matcher.priority = MLX5_PRIORITY_MAP_L2;
3513                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
3514                                              MLX5_FLOW_LAYER_OUTER_L2;
3515                         break;
3516                 case RTE_FLOW_ITEM_TYPE_VLAN:
3517                         flow_dv_translate_item_vlan(match_mask, match_value,
3518                                                     items, tunnel);
3519                         matcher.priority = MLX5_PRIORITY_MAP_L2;
3520                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
3521                                               MLX5_FLOW_LAYER_INNER_VLAN) :
3522                                              (MLX5_FLOW_LAYER_OUTER_L2 |
3523                                               MLX5_FLOW_LAYER_OUTER_VLAN);
3524                         break;
3525                 case RTE_FLOW_ITEM_TYPE_IPV4:
3526                         flow_dv_translate_item_ipv4(match_mask, match_value,
3527                                                     items, tunnel, attr->group);
3528                         matcher.priority = MLX5_PRIORITY_MAP_L3;
3529                         dev_flow->dv.hash_fields |=
3530                                 mlx5_flow_hashfields_adjust
3531                                         (dev_flow, tunnel,
3532                                          MLX5_IPV4_LAYER_TYPES,
3533                                          MLX5_IPV4_IBV_RX_HASH);
3534                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
3535                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3536                         break;
3537                 case RTE_FLOW_ITEM_TYPE_IPV6:
3538                         flow_dv_translate_item_ipv6(match_mask, match_value,
3539                                                     items, tunnel, attr->group);
3540                         matcher.priority = MLX5_PRIORITY_MAP_L3;
3541                         dev_flow->dv.hash_fields |=
3542                                 mlx5_flow_hashfields_adjust
3543                                         (dev_flow, tunnel,
3544                                          MLX5_IPV6_LAYER_TYPES,
3545                                          MLX5_IPV6_IBV_RX_HASH);
3546                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
3547                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3548                         break;
3549                 case RTE_FLOW_ITEM_TYPE_TCP:
3550                         flow_dv_translate_item_tcp(match_mask, match_value,
3551                                                    items, tunnel);
3552                         matcher.priority = MLX5_PRIORITY_MAP_L4;
3553                         dev_flow->dv.hash_fields |=
3554                                 mlx5_flow_hashfields_adjust
3555                                         (dev_flow, tunnel, ETH_RSS_TCP,
3556                                          IBV_RX_HASH_SRC_PORT_TCP |
3557                                          IBV_RX_HASH_DST_PORT_TCP);
3558                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
3559                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
3560                         break;
3561                 case RTE_FLOW_ITEM_TYPE_UDP:
3562                         flow_dv_translate_item_udp(match_mask, match_value,
3563                                                    items, tunnel);
3564                         matcher.priority = MLX5_PRIORITY_MAP_L4;
3565                         dev_flow->dv.hash_fields |=
3566                                 mlx5_flow_hashfields_adjust
3567                                         (dev_flow, tunnel, ETH_RSS_UDP,
3568                                          IBV_RX_HASH_SRC_PORT_UDP |
3569                                          IBV_RX_HASH_DST_PORT_UDP);
3570                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
3571                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
3572                         break;
3573                 case RTE_FLOW_ITEM_TYPE_GRE:
3574                         flow_dv_translate_item_gre(match_mask, match_value,
3575                                                    items, tunnel);
3576                         last_item = MLX5_FLOW_LAYER_GRE;
3577                         break;
3578                 case RTE_FLOW_ITEM_TYPE_NVGRE:
3579                         flow_dv_translate_item_nvgre(match_mask, match_value,
3580                                                      items, tunnel);
3581                         last_item = MLX5_FLOW_LAYER_GRE;
3582                         break;
3583                 case RTE_FLOW_ITEM_TYPE_VXLAN:
3584                         flow_dv_translate_item_vxlan(match_mask, match_value,
3585                                                      items, tunnel);
3586                         last_item = MLX5_FLOW_LAYER_VXLAN;
3587                         break;
3588                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3589                         flow_dv_translate_item_vxlan(match_mask, match_value,
3590                                                      items, tunnel);
3591                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
3592                         break;
3593                 case RTE_FLOW_ITEM_TYPE_MPLS:
3594                         flow_dv_translate_item_mpls(match_mask, match_value,
3595                                                     items, last_item, tunnel);
3596                         last_item = MLX5_FLOW_LAYER_MPLS;
3597                         break;
3598                 case RTE_FLOW_ITEM_TYPE_META:
3599                         flow_dv_translate_item_meta(match_mask, match_value,
3600                                                     items);
3601                         last_item = MLX5_FLOW_ITEM_METADATA;
3602                         break;
3603                 default:
3604                         break;
3605                 }
3606                 item_flags |= last_item;
3607         }
3608         assert(!flow_dv_check_valid_spec(matcher.mask.buf,
3609                                          dev_flow->dv.value.buf));
3610         dev_flow->layers = item_flags;
3611         /* Register matcher. */
3612         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
3613                                     matcher.mask.size);
3614         matcher.priority = mlx5_flow_adjust_priority(dev, priority,
3615                                                      matcher.priority);
3616         matcher.egress = attr->egress;
3617         matcher.group = attr->group;
3618         if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
3619                 return -rte_errno;
3620         return 0;
3621 }
3622
3623 /**
3624  * Apply the flow to the NIC.
3625  *
3626  * @param[in] dev
3627  *   Pointer to the Ethernet device structure.
3628  * @param[in, out] flow
3629  *   Pointer to flow structure.
3630  * @param[out] error
3631  *   Pointer to error structure.
3632  *
3633  * @return
3634  *   0 on success, a negative errno value otherwise and rte_errno is set.
3635  */
3636 static int
3637 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
3638               struct rte_flow_error *error)
3639 {
3640         struct mlx5_flow_dv *dv;
3641         struct mlx5_flow *dev_flow;
3642         int n;
3643         int err;
3644
3645         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
3646                 dv = &dev_flow->dv;
3647                 n = dv->actions_n;
3648                 if (flow->actions & MLX5_FLOW_ACTION_DROP) {
3649                         dv->hrxq = mlx5_hrxq_drop_new(dev);
3650                         if (!dv->hrxq) {
3651                                 rte_flow_error_set
3652                                         (error, errno,
3653                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3654                                          "cannot get drop hash queue");
3655                                 goto error;
3656                         }
3657                         dv->actions[n++] = dv->hrxq->action;
3658                 } else if (flow->actions &
3659                            (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
3660                         struct mlx5_hrxq *hrxq;
3661
3662                         hrxq = mlx5_hrxq_get(dev, flow->key,
3663                                              MLX5_RSS_HASH_KEY_LEN,
3664                                              dv->hash_fields,
3665                                              (*flow->queue),
3666                                              flow->rss.queue_num);
3667                         if (!hrxq)
3668                                 hrxq = mlx5_hrxq_new
3669                                         (dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
3670                                          dv->hash_fields, (*flow->queue),
3671                                          flow->rss.queue_num,
3672                                          !!(dev_flow->layers &
3673                                             MLX5_FLOW_LAYER_TUNNEL));
3674                         if (!hrxq) {
3675                                 rte_flow_error_set
3676                                         (error, rte_errno,
3677                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
3678                                          "cannot get hash queue");
3679                                 goto error;
3680                         }
3681                         dv->hrxq = hrxq;
3682                         dv->actions[n++] = dv->hrxq->action;
3683                 }
3684                 dv->flow =
3685                         mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
3686                                                   (void *)&dv->value, n,
3687                                                   dv->actions);
3688                 if (!dv->flow) {
3689                         rte_flow_error_set(error, errno,
3690                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3691                                            NULL,
3692                                            "hardware refuses to create flow");
3693                         goto error;
3694                 }
3695         }
3696         return 0;
3697 error:
3698         err = rte_errno; /* Save rte_errno before cleanup. */
3699         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
3700                 struct mlx5_flow_dv *dv = &dev_flow->dv;
3701                 if (dv->hrxq) {
3702                         if (flow->actions & MLX5_FLOW_ACTION_DROP)
3703                                 mlx5_hrxq_drop_release(dev);
3704                         else
3705                                 mlx5_hrxq_release(dev, dv->hrxq);
3706                         dv->hrxq = NULL;
3707                 }
3708         }
3709         rte_errno = err; /* Restore rte_errno. */
3710         return -rte_errno;
3711 }
3712
3713 /**
3714  * Release the flow matcher.
3715  *
3716  * @param dev
3717  *   Pointer to Ethernet device.
3718  * @param flow
3719  *   Pointer to mlx5_flow.
3720  *
3721  * @return
3722  *   1 while a reference on it exists, 0 when freed.
3723  */
3724 static int
3725 flow_dv_matcher_release(struct rte_eth_dev *dev,
3726                         struct mlx5_flow *flow)
3727 {
3728         struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
3729         struct mlx5_priv *priv = dev->data->dev_private;
3730         struct mlx5_ibv_shared *sh = priv->sh;
3731         struct mlx5_flow_tbl_resource *tbl;
3732
3733         assert(matcher->matcher_object);
3734         DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
3735                 dev->data->port_id, (void *)matcher,
3736                 rte_atomic32_read(&matcher->refcnt));
3737         if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
3738                 claim_zero(mlx5_glue->dv_destroy_flow_matcher
3739                            (matcher->matcher_object));
3740                 LIST_REMOVE(matcher, next);
3741                 if (matcher->egress)
3742                         tbl = &sh->tx_tbl[matcher->group];
3743                 else
3744                         tbl = &sh->rx_tbl[matcher->group];
3745                 flow_dv_tbl_resource_release(tbl);
3746                 rte_free(matcher);
3747                 DRV_LOG(DEBUG, "port %u matcher %p: removed",
3748                         dev->data->port_id, (void *)matcher);
3749                 return 0;
3750         }
3751         return 1;
3752 }
3753
3754 /**
3755  * Release an encap/decap resource.
3756  *
3757  * @param flow
3758  *   Pointer to mlx5_flow.
3759  *
3760  * @return
3761  *   1 while a reference on it exists, 0 when freed.
3762  */
3763 static int
3764 flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
3765 {
3766         struct mlx5_flow_dv_encap_decap_resource *cache_resource =
3767                                                 flow->dv.encap_decap;
3768
3769         assert(cache_resource->verbs_action);
3770         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
3771                 (void *)cache_resource,
3772                 rte_atomic32_read(&cache_resource->refcnt));
3773         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
3774                 claim_zero(mlx5_glue->destroy_flow_action
3775                                 (cache_resource->verbs_action));
3776                 LIST_REMOVE(cache_resource, next);
3777                 rte_free(cache_resource);
3778                 DRV_LOG(DEBUG, "encap/decap resource %p: removed",
3779                         (void *)cache_resource);
3780                 return 0;
3781         }
3782         return 1;
3783 }
3784
3785 /**
3786  * Release an jump to table action resource.
3787  *
3788  * @param flow
3789  *   Pointer to mlx5_flow.
3790  *
3791  * @return
3792  *   1 while a reference on it exists, 0 when freed.
3793  */
3794 static int
3795 flow_dv_jump_tbl_resource_release(struct mlx5_flow *flow)
3796 {
3797         struct mlx5_flow_dv_jump_tbl_resource *cache_resource =
3798                                                 flow->dv.jump;
3799
3800         assert(cache_resource->action);
3801         DRV_LOG(DEBUG, "jump table resource %p: refcnt %d--",
3802                 (void *)cache_resource,
3803                 rte_atomic32_read(&cache_resource->refcnt));
3804         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
3805                 claim_zero(mlx5_glue->destroy_flow_action
3806                                 (cache_resource->action));
3807                 LIST_REMOVE(cache_resource, next);
3808                 flow_dv_tbl_resource_release(cache_resource->tbl);
3809                 rte_free(cache_resource);
3810                 DRV_LOG(DEBUG, "jump table resource %p: removed",
3811                         (void *)cache_resource);
3812                 return 0;
3813         }
3814         return 1;
3815 }
3816
3817 /**
3818  * Release a modify-header resource.
3819  *
3820  * @param flow
3821  *   Pointer to mlx5_flow.
3822  *
3823  * @return
3824  *   1 while a reference on it exists, 0 when freed.
3825  */
3826 static int
3827 flow_dv_modify_hdr_resource_release(struct mlx5_flow *flow)
3828 {
3829         struct mlx5_flow_dv_modify_hdr_resource *cache_resource =
3830                                                 flow->dv.modify_hdr;
3831
3832         assert(cache_resource->verbs_action);
3833         DRV_LOG(DEBUG, "modify-header resource %p: refcnt %d--",
3834                 (void *)cache_resource,
3835                 rte_atomic32_read(&cache_resource->refcnt));
3836         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
3837                 claim_zero(mlx5_glue->destroy_flow_action
3838                                 (cache_resource->verbs_action));
3839                 LIST_REMOVE(cache_resource, next);
3840                 rte_free(cache_resource);
3841                 DRV_LOG(DEBUG, "modify-header resource %p: removed",
3842                         (void *)cache_resource);
3843                 return 0;
3844         }
3845         return 1;
3846 }
3847
3848 /**
3849  * Remove the flow from the NIC but keeps it in memory.
3850  *
3851  * @param[in] dev
3852  *   Pointer to Ethernet device.
3853  * @param[in, out] flow
3854  *   Pointer to flow structure.
3855  */
3856 static void
3857 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
3858 {
3859         struct mlx5_flow_dv *dv;
3860         struct mlx5_flow *dev_flow;
3861
3862         if (!flow)
3863                 return;
3864         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
3865                 dv = &dev_flow->dv;
3866                 if (dv->flow) {
3867                         claim_zero(mlx5_glue->dv_destroy_flow(dv->flow));
3868                         dv->flow = NULL;
3869                 }
3870                 if (dv->hrxq) {
3871                         if (flow->actions & MLX5_FLOW_ACTION_DROP)
3872                                 mlx5_hrxq_drop_release(dev);
3873                         else
3874                                 mlx5_hrxq_release(dev, dv->hrxq);
3875                         dv->hrxq = NULL;
3876                 }
3877         }
3878 }
3879
3880 /**
3881  * Remove the flow from the NIC and the memory.
3882  *
3883  * @param[in] dev
3884  *   Pointer to the Ethernet device structure.
3885  * @param[in, out] flow
3886  *   Pointer to flow structure.
3887  */
3888 static void
3889 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
3890 {
3891         struct mlx5_flow *dev_flow;
3892
3893         if (!flow)
3894                 return;
3895         flow_dv_remove(dev, flow);
3896         if (flow->counter) {
3897                 flow_dv_counter_release(flow->counter);
3898                 flow->counter = NULL;
3899         }
3900         if (flow->tag_resource) {
3901                 flow_dv_tag_release(dev, flow->tag_resource);
3902                 flow->tag_resource = NULL;
3903         }
3904         while (!LIST_EMPTY(&flow->dev_flows)) {
3905                 dev_flow = LIST_FIRST(&flow->dev_flows);
3906                 LIST_REMOVE(dev_flow, next);
3907                 if (dev_flow->dv.matcher)
3908                         flow_dv_matcher_release(dev, dev_flow);
3909                 if (dev_flow->dv.encap_decap)
3910                         flow_dv_encap_decap_resource_release(dev_flow);
3911                 if (dev_flow->dv.modify_hdr)
3912                         flow_dv_modify_hdr_resource_release(dev_flow);
3913                 if (dev_flow->dv.jump)
3914                         flow_dv_jump_tbl_resource_release(dev_flow);
3915                 rte_free(dev_flow);
3916         }
3917 }
3918
3919 /**
3920  * Query a dv flow  rule for its statistics via devx.
3921  *
3922  * @param[in] dev
3923  *   Pointer to Ethernet device.
3924  * @param[in] flow
3925  *   Pointer to the sub flow.
3926  * @param[out] data
3927  *   data retrieved by the query.
3928  * @param[out] error
3929  *   Perform verbose error reporting if not NULL.
3930  *
3931  * @return
3932  *   0 on success, a negative errno value otherwise and rte_errno is set.
3933  */
3934 static int
3935 flow_dv_query_count(struct rte_eth_dev *dev, struct rte_flow *flow,
3936                     void *data, struct rte_flow_error *error)
3937 {
3938         struct mlx5_priv *priv = dev->data->dev_private;
3939         struct rte_flow_query_count *qc = data;
3940         uint64_t pkts = 0;
3941         uint64_t bytes = 0;
3942         int err;
3943
3944         if (!priv->config.devx)
3945                 return rte_flow_error_set(error, ENOTSUP,
3946                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3947                                           NULL,
3948                                           "counters are not supported");
3949         if (flow->counter) {
3950                 err = mlx5_devx_cmd_flow_counter_query
3951                                                 (flow->counter->dcs,
3952                                                  qc->reset, &pkts, &bytes);
3953                 if (err)
3954                         return rte_flow_error_set
3955                                 (error, err,
3956                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3957                                  NULL,
3958                                  "cannot read counters");
3959                 qc->hits_set = 1;
3960                 qc->bytes_set = 1;
3961                 qc->hits = pkts - flow->counter->hits;
3962                 qc->bytes = bytes - flow->counter->bytes;
3963                 if (qc->reset) {
3964                         flow->counter->hits = pkts;
3965                         flow->counter->bytes = bytes;
3966                 }
3967                 return 0;
3968         }
3969         return rte_flow_error_set(error, EINVAL,
3970                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
3971                                   NULL,
3972                                   "counters are not available");
3973 }
3974
3975 /**
3976  * Query a flow.
3977  *
3978  * @see rte_flow_query()
3979  * @see rte_flow_ops
3980  */
3981 static int
3982 flow_dv_query(struct rte_eth_dev *dev,
3983               struct rte_flow *flow __rte_unused,
3984               const struct rte_flow_action *actions __rte_unused,
3985               void *data __rte_unused,
3986               struct rte_flow_error *error __rte_unused)
3987 {
3988         int ret = -EINVAL;
3989
3990         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
3991                 switch (actions->type) {
3992                 case RTE_FLOW_ACTION_TYPE_VOID:
3993                         break;
3994                 case RTE_FLOW_ACTION_TYPE_COUNT:
3995                         ret = flow_dv_query_count(dev, flow, data, error);
3996                         break;
3997                 default:
3998                         return rte_flow_error_set(error, ENOTSUP,
3999                                                   RTE_FLOW_ERROR_TYPE_ACTION,
4000                                                   actions,
4001                                                   "action not supported");
4002                 }
4003         }
4004         return ret;
4005 }
4006
4007 /*
4008  * Mutex-protected thunk to flow_dv_translate().
4009  */
4010 static int
4011 flow_d_translate(struct rte_eth_dev *dev,
4012                  struct mlx5_flow *dev_flow,
4013                  const struct rte_flow_attr *attr,
4014                  const struct rte_flow_item items[],
4015                  const struct rte_flow_action actions[],
4016                  struct rte_flow_error *error)
4017 {
4018         int ret;
4019
4020         flow_d_shared_lock(dev);
4021         ret = flow_dv_translate(dev, dev_flow, attr, items, actions, error);
4022         flow_d_shared_unlock(dev);
4023         return ret;
4024 }
4025
4026 /*
4027  * Mutex-protected thunk to flow_dv_apply().
4028  */
4029 static int
4030 flow_d_apply(struct rte_eth_dev *dev,
4031              struct rte_flow *flow,
4032              struct rte_flow_error *error)
4033 {
4034         int ret;
4035
4036         flow_d_shared_lock(dev);
4037         ret = flow_dv_apply(dev, flow, error);
4038         flow_d_shared_unlock(dev);
4039         return ret;
4040 }
4041
4042 /*
4043  * Mutex-protected thunk to flow_dv_remove().
4044  */
4045 static void
4046 flow_d_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
4047 {
4048         flow_d_shared_lock(dev);
4049         flow_dv_remove(dev, flow);
4050         flow_d_shared_unlock(dev);
4051 }
4052
4053 /*
4054  * Mutex-protected thunk to flow_dv_destroy().
4055  */
4056 static void
4057 flow_d_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
4058 {
4059         flow_d_shared_lock(dev);
4060         flow_dv_destroy(dev, flow);
4061         flow_d_shared_unlock(dev);
4062 }
4063
4064 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
4065         .validate = flow_dv_validate,
4066         .prepare = flow_dv_prepare,
4067         .translate = flow_d_translate,
4068         .apply = flow_d_apply,
4069         .remove = flow_d_remove,
4070         .destroy = flow_d_destroy,
4071         .query = flow_dv_query,
4072 };
4073
4074 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */